Merge branch 'master'
[sfrench/cifs-2.6.git] / drivers / net / ibm_emac / ibm_emac_core.c
1 /*
2  * drivers/net/ibm_emac/ibm_emac_core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright (c) 2004, 2005 Zultys Technologies.
7  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8  *
9  * Based on original work by
10  *      Matt Porter <mporter@kernel.crashing.org>
11  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12  *      Armin Kuster <akuster@mvista.com>
13  *      Johnnie Peters <jpeters@mvista.com>
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  */
21
22 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/string.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40
41 #include <asm/processor.h>
42 #include <asm/io.h>
43 #include <asm/dma.h>
44 #include <asm/uaccess.h>
45 #include <asm/ocp.h>
46
47 #include "ibm_emac_core.h"
48 #include "ibm_emac_debug.h"
49
50 /*
51  * Lack of dma_unmap_???? calls is intentional.
52  *
53  * API-correct usage requires additional support state information to be 
54  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
55  * EMAC design (e.g. TX buffer passed from network stack can be split into
56  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
57  * maintaining such information will add additional overhead.
58  * Current DMA API implementation for 4xx processors only ensures cache coherency
59  * and dma_unmap_???? routines are empty and are likely to stay this way.
60  * I decided to omit dma_unmap_??? calls because I don't want to add additional
61  * complexity just for the sake of following some abstract API, when it doesn't
62  * add any real benefit to the driver. I understand that this decision maybe 
63  * controversial, but I really tried to make code API-correct and efficient 
64  * at the same time and didn't come up with code I liked :(.                --ebs
65  */
66
67 #define DRV_NAME        "emac"
68 #define DRV_VERSION     "3.53"
69 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
70
71 MODULE_DESCRIPTION(DRV_DESC);
72 MODULE_AUTHOR
73     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
74 MODULE_LICENSE("GPL");
75
76 /* minimum number of free TX descriptors required to wake up TX process */
77 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
78
79 /* If packet size is less than this number, we allocate small skb and copy packet 
80  * contents into it instead of just sending original big skb up
81  */
82 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
83
84 /* Since multiple EMACs share MDIO lines in various ways, we need
85  * to avoid re-using the same PHY ID in cases where the arch didn't
86  * setup precise phy_map entries
87  */
88 static u32 busy_phy_map;
89
90 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
91     (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
92 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
93  * with PHY RX clock problem.
94  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
95  * also allows controlling each EMAC clock
96  */
97 static inline void EMAC_RX_CLK_TX(int idx)
98 {
99         unsigned long flags;
100         local_irq_save(flags);
101
102 #if defined(CONFIG_405EP)
103         mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
104 #else /* CONFIG_440EP || CONFIG_440GR */
105         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
106 #endif
107
108         local_irq_restore(flags);
109 }
110
111 static inline void EMAC_RX_CLK_DEFAULT(int idx)
112 {
113         unsigned long flags;
114         local_irq_save(flags);
115
116 #if defined(CONFIG_405EP)
117         mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
118 #else /* CONFIG_440EP */
119         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
120 #endif
121
122         local_irq_restore(flags);
123 }
124 #else
125 #define EMAC_RX_CLK_TX(idx)             ((void)0)
126 #define EMAC_RX_CLK_DEFAULT(idx)        ((void)0)
127 #endif
128
129 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
130 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
131  * unfortunately this is less flexible than 440EP case, because it's a global 
132  * setting for all EMACs, therefore we do this clock trick only during probe.
133  */
134 #define EMAC_CLK_INTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
135                                             SDR_READ(DCRN_SDR_MFR) | 0x08000000)
136 #define EMAC_CLK_EXTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
137                                             SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
138 #else
139 #define EMAC_CLK_INTERNAL               ((void)0)
140 #define EMAC_CLK_EXTERNAL               ((void)0)
141 #endif
142
143 /* I don't want to litter system log with timeout errors 
144  * when we have brain-damaged PHY.
145  */
146 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
147                                              const char *error)
148 {
149 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
150         DBG("%d: %s" NL, dev->def->index, error);
151 #else
152         if (net_ratelimit())
153                 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
154 #endif
155 }
156
157 /* PHY polling intervals */
158 #define PHY_POLL_LINK_ON        HZ
159 #define PHY_POLL_LINK_OFF       (HZ / 5)
160
161 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
162 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
163         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
164         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
165         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
166         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
167         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
168         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
169         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
170         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
171         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
172         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
173         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
174         "tx_bd_excessive_collisions", "tx_bd_late_collision",
175         "tx_bd_multple_collisions", "tx_bd_single_collision",
176         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
177         "tx_errors"
178 };
179
180 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs);
181 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
182
183 static inline int emac_phy_supports_gige(int phy_mode)
184 {
185         return  phy_mode == PHY_MODE_GMII ||
186                 phy_mode == PHY_MODE_RGMII ||
187                 phy_mode == PHY_MODE_TBI ||
188                 phy_mode == PHY_MODE_RTBI;
189 }
190
191 static inline int emac_phy_gpcs(int phy_mode)
192 {
193         return  phy_mode == PHY_MODE_TBI ||
194                 phy_mode == PHY_MODE_RTBI;
195 }
196
197 static inline void emac_tx_enable(struct ocp_enet_private *dev)
198 {
199         struct emac_regs *p = dev->emacp;
200         unsigned long flags;
201         u32 r;
202
203         local_irq_save(flags);
204
205         DBG("%d: tx_enable" NL, dev->def->index);
206
207         r = in_be32(&p->mr0);
208         if (!(r & EMAC_MR0_TXE))
209                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
210         local_irq_restore(flags);
211 }
212
213 static void emac_tx_disable(struct ocp_enet_private *dev)
214 {
215         struct emac_regs *p = dev->emacp;
216         unsigned long flags;
217         u32 r;
218
219         local_irq_save(flags);
220
221         DBG("%d: tx_disable" NL, dev->def->index);
222
223         r = in_be32(&p->mr0);
224         if (r & EMAC_MR0_TXE) {
225                 int n = 300;
226                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
227                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n)
228                         --n;
229                 if (unlikely(!n))
230                         emac_report_timeout_error(dev, "TX disable timeout");
231         }
232         local_irq_restore(flags);
233 }
234
235 static void emac_rx_enable(struct ocp_enet_private *dev)
236 {
237         struct emac_regs *p = dev->emacp;
238         unsigned long flags;
239         u32 r;
240
241         local_irq_save(flags);
242         if (unlikely(dev->commac.rx_stopped))
243                 goto out;
244
245         DBG("%d: rx_enable" NL, dev->def->index);
246
247         r = in_be32(&p->mr0);
248         if (!(r & EMAC_MR0_RXE)) {
249                 if (unlikely(!(r & EMAC_MR0_RXI))) {
250                         /* Wait if previous async disable is still in progress */
251                         int n = 100;
252                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
253                                 --n;
254                         if (unlikely(!n))
255                                 emac_report_timeout_error(dev,
256                                                           "RX disable timeout");
257                 }
258                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
259         }
260       out:
261         local_irq_restore(flags);
262 }
263
264 static void emac_rx_disable(struct ocp_enet_private *dev)
265 {
266         struct emac_regs *p = dev->emacp;
267         unsigned long flags;
268         u32 r;
269
270         local_irq_save(flags);
271
272         DBG("%d: rx_disable" NL, dev->def->index);
273
274         r = in_be32(&p->mr0);
275         if (r & EMAC_MR0_RXE) {
276                 int n = 300;
277                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
278                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n)
279                         --n;
280                 if (unlikely(!n))
281                         emac_report_timeout_error(dev, "RX disable timeout");
282         }
283         local_irq_restore(flags);
284 }
285
286 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
287 {
288         struct emac_regs *p = dev->emacp;
289         unsigned long flags;
290         u32 r;
291
292         local_irq_save(flags);
293
294         DBG("%d: rx_disable_async" NL, dev->def->index);
295
296         r = in_be32(&p->mr0);
297         if (r & EMAC_MR0_RXE)
298                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
299         local_irq_restore(flags);
300 }
301
302 static int emac_reset(struct ocp_enet_private *dev)
303 {
304         struct emac_regs *p = dev->emacp;
305         unsigned long flags;
306         int n = 20;
307
308         DBG("%d: reset" NL, dev->def->index);
309
310         local_irq_save(flags);
311
312         if (!dev->reset_failed) {
313                 /* 40x erratum suggests stopping RX channel before reset,
314                  * we stop TX as well
315                  */
316                 emac_rx_disable(dev);
317                 emac_tx_disable(dev);
318         }
319
320         out_be32(&p->mr0, EMAC_MR0_SRST);
321         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
322                 --n;
323         local_irq_restore(flags);
324
325         if (n) {
326                 dev->reset_failed = 0;
327                 return 0;
328         } else {
329                 emac_report_timeout_error(dev, "reset timeout");
330                 dev->reset_failed = 1;
331                 return -ETIMEDOUT;
332         }
333 }
334
335 static void emac_hash_mc(struct ocp_enet_private *dev)
336 {
337         struct emac_regs *p = dev->emacp;
338         u16 gaht[4] = { 0 };
339         struct dev_mc_list *dmi;
340
341         DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
342
343         for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
344                 int bit;
345                 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
346                      dev->def->index,
347                      dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
348                      dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
349
350                 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
351                 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
352         }
353         out_be32(&p->gaht1, gaht[0]);
354         out_be32(&p->gaht2, gaht[1]);
355         out_be32(&p->gaht3, gaht[2]);
356         out_be32(&p->gaht4, gaht[3]);
357 }
358
359 static inline u32 emac_iff2rmr(struct net_device *ndev)
360 {
361         u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
362             EMAC_RMR_BASE;
363
364         if (ndev->flags & IFF_PROMISC)
365                 r |= EMAC_RMR_PME;
366         else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
367                 r |= EMAC_RMR_PMME;
368         else if (ndev->mc_count > 0)
369                 r |= EMAC_RMR_MAE;
370
371         return r;
372 }
373
374 static inline int emac_opb_mhz(void)
375 {
376         return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
377 }
378
379 /* BHs disabled */
380 static int emac_configure(struct ocp_enet_private *dev)
381 {
382         struct emac_regs *p = dev->emacp;
383         struct net_device *ndev = dev->ndev;
384         int gige;
385         u32 r;
386
387         DBG("%d: configure" NL, dev->def->index);
388
389         if (emac_reset(dev) < 0)
390                 return -ETIMEDOUT;
391
392         tah_reset(dev->tah_dev);
393
394         /* Mode register */
395         r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
396         if (dev->phy.duplex == DUPLEX_FULL)
397                 r |= EMAC_MR1_FDE;
398         switch (dev->phy.speed) {
399         case SPEED_1000:
400                 if (emac_phy_gpcs(dev->phy.mode)) {
401                         r |= EMAC_MR1_MF_1000GPCS |
402                             EMAC_MR1_MF_IPPA(dev->phy.address);
403
404                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
405                          * identify this GPCS PHY later.
406                          */
407                         out_be32(&p->ipcr, 0xdeadbeef);
408                 } else
409                         r |= EMAC_MR1_MF_1000;
410                 r |= EMAC_MR1_RFS_16K;
411                 gige = 1;
412                 
413                 if (dev->ndev->mtu > ETH_DATA_LEN)
414                         r |= EMAC_MR1_JPSM;
415                 break;
416         case SPEED_100:
417                 r |= EMAC_MR1_MF_100;
418                 /* Fall through */
419         default:
420                 r |= EMAC_MR1_RFS_4K;
421                 gige = 0;
422                 break;
423         }
424
425         if (dev->rgmii_dev)
426                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
427                                 dev->phy.speed);
428         else
429                 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
430
431 #if !defined(CONFIG_40x)
432         /* on 40x erratum forces us to NOT use integrated flow control, 
433          * let's hope it works on 44x ;)
434          */
435         if (dev->phy.duplex == DUPLEX_FULL) {
436                 if (dev->phy.pause)
437                         r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
438                 else if (dev->phy.asym_pause)
439                         r |= EMAC_MR1_APP;
440         }
441 #endif
442         out_be32(&p->mr1, r);
443
444         /* Set individual MAC address */
445         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
446         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
447                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
448                  ndev->dev_addr[5]);
449
450         /* VLAN Tag Protocol ID */
451         out_be32(&p->vtpid, 0x8100);
452
453         /* Receive mode register */
454         r = emac_iff2rmr(ndev);
455         if (r & EMAC_RMR_MAE)
456                 emac_hash_mc(dev);
457         out_be32(&p->rmr, r);
458
459         /* FIFOs thresholds */
460         r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
461                       EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
462         out_be32(&p->tmr1, r);
463         out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
464
465         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
466            there should be still enough space in FIFO to allow the our link
467            partner time to process this frame and also time to send PAUSE 
468            frame itself.
469
470            Here is the worst case scenario for the RX FIFO "headroom"
471            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
472
473            1) One maximum-length frame on TX                    1522 bytes
474            2) One PAUSE frame time                                64 bytes
475            3) PAUSE frame decode time allowance                   64 bytes
476            4) One maximum-length frame on RX                    1522 bytes
477            5) Round-trip propagation delay of the link (100Mb)    15 bytes
478            ----------       
479            3187 bytes
480
481            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
482            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
483          */
484         r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
485                       EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
486         out_be32(&p->rwmr, r);
487
488         /* Set PAUSE timer to the maximum */
489         out_be32(&p->ptr, 0xffff);
490
491         /* IRQ sources */
492         out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
493                  EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
494                  EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
495                  EMAC_ISR_IRE | EMAC_ISR_TE);
496                  
497         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
498         if (emac_phy_gpcs(dev->phy.mode)) 
499                 mii_reset_phy(&dev->phy);
500                  
501         return 0;
502 }
503
504 /* BHs disabled */
505 static void emac_reinitialize(struct ocp_enet_private *dev)
506 {
507         DBG("%d: reinitialize" NL, dev->def->index);
508
509         if (!emac_configure(dev)) {
510                 emac_tx_enable(dev);
511                 emac_rx_enable(dev);
512         }
513 }
514
515 /* BHs disabled */
516 static void emac_full_tx_reset(struct net_device *ndev)
517 {
518         struct ocp_enet_private *dev = ndev->priv;
519         struct ocp_func_emac_data *emacdata = dev->def->additions;
520
521         DBG("%d: full_tx_reset" NL, dev->def->index);
522
523         emac_tx_disable(dev);
524         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
525         emac_clean_tx_ring(dev);
526         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
527
528         emac_configure(dev);
529
530         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
531         emac_tx_enable(dev);
532         emac_rx_enable(dev);
533
534         netif_wake_queue(ndev);
535 }
536
537 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
538 {
539         struct emac_regs *p = dev->emacp;
540         u32 r;
541         int n;
542
543         DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
544
545         /* Enable proper MDIO port */
546         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
547
548         /* Wait for management interface to become idle */
549         n = 10;
550         while (!emac_phy_done(in_be32(&p->stacr))) {
551                 udelay(1);
552                 if (!--n)
553                         goto to;
554         }
555
556         /* Issue read command */
557         out_be32(&p->stacr,
558                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
559                  (reg & EMAC_STACR_PRA_MASK)
560                  | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
561                  | EMAC_STACR_START);
562
563         /* Wait for read to complete */
564         n = 100;
565         while (!emac_phy_done(r = in_be32(&p->stacr))) {
566                 udelay(1);
567                 if (!--n)
568                         goto to;
569         }
570
571         if (unlikely(r & EMAC_STACR_PHYE)) {
572                 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
573                     id, reg);
574                 return -EREMOTEIO;
575         }
576
577         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
578         DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
579         return r;
580       to:
581         DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
582         return -ETIMEDOUT;
583 }
584
585 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
586                               u16 val)
587 {
588         struct emac_regs *p = dev->emacp;
589         int n;
590
591         DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
592              val);
593
594         /* Enable proper MDIO port */
595         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
596
597         /* Wait for management interface to be idle */
598         n = 10;
599         while (!emac_phy_done(in_be32(&p->stacr))) {
600                 udelay(1);
601                 if (!--n)
602                         goto to;
603         }
604
605         /* Issue write command */
606         out_be32(&p->stacr,
607                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
608                  (reg & EMAC_STACR_PRA_MASK) |
609                  ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
610                  (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
611
612         /* Wait for write to complete */
613         n = 100;
614         while (!emac_phy_done(in_be32(&p->stacr))) {
615                 udelay(1);
616                 if (!--n)
617                         goto to;
618         }
619         return;
620       to:
621         DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
622 }
623
624 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
625 {
626         struct ocp_enet_private *dev = ndev->priv;
627         int res;
628
629         local_bh_disable();
630         res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
631                                (u8) reg);
632         local_bh_enable();
633         return res;
634 }
635
636 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
637 {
638         struct ocp_enet_private *dev = ndev->priv;
639
640         local_bh_disable();
641         __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
642                           (u8) reg, (u16) val);
643         local_bh_enable();
644 }
645
646 /* BHs disabled */
647 static void emac_set_multicast_list(struct net_device *ndev)
648 {
649         struct ocp_enet_private *dev = ndev->priv;
650         struct emac_regs *p = dev->emacp;
651         u32 rmr = emac_iff2rmr(ndev);
652
653         DBG("%d: multicast %08x" NL, dev->def->index, rmr);
654         BUG_ON(!netif_running(dev->ndev));
655
656         /* I decided to relax register access rules here to avoid
657          * full EMAC reset.
658          *
659          * There is a real problem with EMAC4 core if we use MWSW_001 bit 
660          * in MR1 register and do a full EMAC reset.
661          * One TX BD status update is delayed and, after EMAC reset, it 
662          * never happens, resulting in TX hung (it'll be recovered by TX 
663          * timeout handler eventually, but this is just gross).
664          * So we either have to do full TX reset or try to cheat here :)
665          *
666          * The only required change is to RX mode register, so I *think* all
667          * we need is just to stop RX channel. This seems to work on all
668          * tested SoCs.                                                --ebs
669          */
670         emac_rx_disable(dev);
671         if (rmr & EMAC_RMR_MAE)
672                 emac_hash_mc(dev);
673         out_be32(&p->rmr, rmr);
674         emac_rx_enable(dev);
675 }
676
677 /* BHs disabled */
678 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
679 {
680         struct ocp_func_emac_data *emacdata = dev->def->additions;
681         int rx_sync_size = emac_rx_sync_size(new_mtu);
682         int rx_skb_size = emac_rx_skb_size(new_mtu);
683         int i, ret = 0;
684
685         emac_rx_disable(dev);
686         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
687
688         if (dev->rx_sg_skb) {
689                 ++dev->estats.rx_dropped_resize;
690                 dev_kfree_skb(dev->rx_sg_skb);
691                 dev->rx_sg_skb = NULL;
692         }
693
694         /* Make a first pass over RX ring and mark BDs ready, dropping 
695          * non-processed packets on the way. We need this as a separate pass
696          * to simplify error recovery in the case of allocation failure later.
697          */
698         for (i = 0; i < NUM_RX_BUFF; ++i) {
699                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
700                         ++dev->estats.rx_dropped_resize;
701
702                 dev->rx_desc[i].data_len = 0;
703                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
704                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
705         }
706
707         /* Reallocate RX ring only if bigger skb buffers are required */
708         if (rx_skb_size <= dev->rx_skb_size)
709                 goto skip;
710
711         /* Second pass, allocate new skbs */
712         for (i = 0; i < NUM_RX_BUFF; ++i) {
713                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
714                 if (!skb) {
715                         ret = -ENOMEM;
716                         goto oom;
717                 }
718
719                 BUG_ON(!dev->rx_skb[i]);
720                 dev_kfree_skb(dev->rx_skb[i]);
721
722                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
723                 dev->rx_desc[i].data_ptr =
724                     dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
725                                    DMA_FROM_DEVICE) + 2;
726                 dev->rx_skb[i] = skb;
727         }
728       skip:
729         /* Check if we need to change "Jumbo" bit in MR1 */
730         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
731                 /* This is to prevent starting RX channel in emac_rx_enable() */
732                 dev->commac.rx_stopped = 1;
733
734                 dev->ndev->mtu = new_mtu;
735                 emac_full_tx_reset(dev->ndev);
736         }
737
738         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
739       oom:
740         /* Restart RX */
741         dev->commac.rx_stopped = dev->rx_slot = 0;
742         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
743         emac_rx_enable(dev);
744
745         return ret;
746 }
747
748 /* Process ctx, rtnl_lock semaphore */
749 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
750 {
751         struct ocp_enet_private *dev = ndev->priv;
752         int ret = 0;
753
754         if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
755                 return -EINVAL;
756
757         DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
758
759         local_bh_disable();
760         if (netif_running(ndev)) {
761                 /* Check if we really need to reinitalize RX ring */
762                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
763                         ret = emac_resize_rx_ring(dev, new_mtu);
764         }
765
766         if (!ret) {
767                 ndev->mtu = new_mtu;
768                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
769                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
770         }       
771         local_bh_enable();
772
773         return ret;
774 }
775
776 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
777 {
778         int i;
779         for (i = 0; i < NUM_TX_BUFF; ++i) {
780                 if (dev->tx_skb[i]) {
781                         dev_kfree_skb(dev->tx_skb[i]);
782                         dev->tx_skb[i] = NULL;
783                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
784                                 ++dev->estats.tx_dropped;
785                 }
786                 dev->tx_desc[i].ctrl = 0;
787                 dev->tx_desc[i].data_ptr = 0;
788         }
789 }
790
791 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
792 {
793         int i;
794         for (i = 0; i < NUM_RX_BUFF; ++i)
795                 if (dev->rx_skb[i]) {
796                         dev->rx_desc[i].ctrl = 0;
797                         dev_kfree_skb(dev->rx_skb[i]);
798                         dev->rx_skb[i] = NULL;
799                         dev->rx_desc[i].data_ptr = 0;
800                 }
801
802         if (dev->rx_sg_skb) {
803                 dev_kfree_skb(dev->rx_sg_skb);
804                 dev->rx_sg_skb = NULL;
805         }
806 }
807
808 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
809                                     int flags)
810 {
811         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
812         if (unlikely(!skb))
813                 return -ENOMEM;
814
815         dev->rx_skb[slot] = skb;
816         dev->rx_desc[slot].data_len = 0;
817
818         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
819         dev->rx_desc[slot].data_ptr = 
820             dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size, 
821                            DMA_FROM_DEVICE) + 2;
822         barrier();
823         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
824             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
825
826         return 0;
827 }
828
829 static void emac_print_link_status(struct ocp_enet_private *dev)
830 {
831         if (netif_carrier_ok(dev->ndev))
832                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
833                        dev->ndev->name, dev->phy.speed,
834                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
835                        dev->phy.pause ? ", pause enabled" :
836                        dev->phy.asym_pause ? ", assymetric pause enabled" : "");
837         else
838                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
839 }
840
841 /* Process ctx, rtnl_lock semaphore */
842 static int emac_open(struct net_device *ndev)
843 {
844         struct ocp_enet_private *dev = ndev->priv;
845         struct ocp_func_emac_data *emacdata = dev->def->additions;
846         int err, i;
847
848         DBG("%d: open" NL, dev->def->index);
849
850         /* Setup error IRQ handler */
851         err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
852         if (err) {
853                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
854                        ndev->name, dev->def->irq);
855                 return err;
856         }
857
858         /* Allocate RX ring */
859         for (i = 0; i < NUM_RX_BUFF; ++i)
860                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
861                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
862                                ndev->name);
863                         goto oom;
864                 }
865
866         local_bh_disable();
867         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
868             dev->commac.rx_stopped = 0;
869         dev->rx_sg_skb = NULL;
870
871         if (dev->phy.address >= 0) {
872                 int link_poll_interval;
873                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
874                         dev->phy.def->ops->read_link(&dev->phy);
875                         EMAC_RX_CLK_DEFAULT(dev->def->index);
876                         netif_carrier_on(dev->ndev);
877                         link_poll_interval = PHY_POLL_LINK_ON;
878                 } else {
879                         EMAC_RX_CLK_TX(dev->def->index);
880                         netif_carrier_off(dev->ndev);
881                         link_poll_interval = PHY_POLL_LINK_OFF;
882                 }
883                 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
884                 emac_print_link_status(dev);
885         } else
886                 netif_carrier_on(dev->ndev);
887
888         emac_configure(dev);
889         mal_poll_add(dev->mal, &dev->commac);
890         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
891         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
892         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
893         emac_tx_enable(dev);
894         emac_rx_enable(dev);
895         netif_start_queue(ndev);
896         local_bh_enable();
897
898         return 0;
899       oom:
900         emac_clean_rx_ring(dev);
901         free_irq(dev->def->irq, dev);
902         return -ENOMEM;
903 }
904
905 /* BHs disabled */
906 static int emac_link_differs(struct ocp_enet_private *dev)
907 {
908         u32 r = in_be32(&dev->emacp->mr1);
909
910         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
911         int speed, pause, asym_pause;
912
913         if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
914                 speed = SPEED_1000;
915         else if (r & EMAC_MR1_MF_100)
916                 speed = SPEED_100;
917         else
918                 speed = SPEED_10;
919
920         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
921         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
922                 pause = 1;
923                 asym_pause = 0;
924                 break;
925         case EMAC_MR1_APP:
926                 pause = 0;
927                 asym_pause = 1;
928                 break;
929         default:
930                 pause = asym_pause = 0;
931         }
932         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
933             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
934 }
935
936 /* BHs disabled */
937 static void emac_link_timer(unsigned long data)
938 {
939         struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
940         int link_poll_interval;
941
942         DBG2("%d: link timer" NL, dev->def->index);
943
944         if (dev->phy.def->ops->poll_link(&dev->phy)) {
945                 if (!netif_carrier_ok(dev->ndev)) {
946                         EMAC_RX_CLK_DEFAULT(dev->def->index);
947
948                         /* Get new link parameters */
949                         dev->phy.def->ops->read_link(&dev->phy);
950
951                         if (dev->tah_dev || emac_link_differs(dev))
952                                 emac_full_tx_reset(dev->ndev);
953
954                         netif_carrier_on(dev->ndev);
955                         emac_print_link_status(dev);
956                 }
957                 link_poll_interval = PHY_POLL_LINK_ON;
958         } else {
959                 if (netif_carrier_ok(dev->ndev)) {
960                         EMAC_RX_CLK_TX(dev->def->index);
961 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
962                         emac_reinitialize(dev);
963 #endif
964                         netif_carrier_off(dev->ndev);
965                         emac_print_link_status(dev);
966                 }
967
968                 /* Retry reset if the previous attempt failed.
969                  * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
970                  * case, but I left it here because it shouldn't trigger for
971                  * sane PHYs anyway.
972                  */
973                 if (unlikely(dev->reset_failed))
974                         emac_reinitialize(dev);
975
976                 link_poll_interval = PHY_POLL_LINK_OFF;
977         }
978         mod_timer(&dev->link_timer, jiffies + link_poll_interval);
979 }
980
981 /* BHs disabled */
982 static void emac_force_link_update(struct ocp_enet_private *dev)
983 {
984         netif_carrier_off(dev->ndev);
985         if (timer_pending(&dev->link_timer))
986                 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
987 }
988
989 /* Process ctx, rtnl_lock semaphore */
990 static int emac_close(struct net_device *ndev)
991 {
992         struct ocp_enet_private *dev = ndev->priv;
993         struct ocp_func_emac_data *emacdata = dev->def->additions;
994
995         DBG("%d: close" NL, dev->def->index);
996
997         local_bh_disable();
998
999         if (dev->phy.address >= 0)
1000                 del_timer_sync(&dev->link_timer);
1001
1002         netif_stop_queue(ndev);
1003         emac_rx_disable(dev);
1004         emac_tx_disable(dev);
1005         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1006         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1007         mal_poll_del(dev->mal, &dev->commac);
1008         local_bh_enable();
1009
1010         emac_clean_tx_ring(dev);
1011         emac_clean_rx_ring(dev);
1012         free_irq(dev->def->irq, dev);
1013
1014         return 0;
1015 }
1016
1017 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1018                                struct sk_buff *skb)
1019 {
1020 #if defined(CONFIG_IBM_EMAC_TAH)
1021         if (skb->ip_summed == CHECKSUM_HW) {
1022                 ++dev->stats.tx_packets_csum;
1023                 return EMAC_TX_CTRL_TAH_CSUM;
1024         }
1025 #endif
1026         return 0;
1027 }
1028
1029 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1030 {
1031         struct emac_regs *p = dev->emacp;
1032         struct net_device *ndev = dev->ndev;
1033
1034         /* Send the packet out */
1035         out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1036
1037         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1038                 netif_stop_queue(ndev);
1039                 DBG2("%d: stopped TX queue" NL, dev->def->index);
1040         }
1041
1042         ndev->trans_start = jiffies;
1043         ++dev->stats.tx_packets;
1044         dev->stats.tx_bytes += len;
1045
1046         return 0;
1047 }
1048
1049 /* BHs disabled */
1050 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1051 {
1052         struct ocp_enet_private *dev = ndev->priv;
1053         unsigned int len = skb->len;
1054         int slot;
1055
1056         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1057             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1058
1059         slot = dev->tx_slot++;
1060         if (dev->tx_slot == NUM_TX_BUFF) {
1061                 dev->tx_slot = 0;
1062                 ctrl |= MAL_TX_CTRL_WRAP;
1063         }
1064
1065         DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1066
1067         dev->tx_skb[slot] = skb;
1068         dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1069                                                      DMA_TO_DEVICE);
1070         dev->tx_desc[slot].data_len = (u16) len;
1071         barrier();
1072         dev->tx_desc[slot].ctrl = ctrl;
1073
1074         return emac_xmit_finish(dev, len);
1075 }
1076
1077 #if defined(CONFIG_IBM_EMAC_TAH)
1078 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1079                                   u32 pd, int len, int last, u16 base_ctrl)
1080 {
1081         while (1) {
1082                 u16 ctrl = base_ctrl;
1083                 int chunk = min(len, MAL_MAX_TX_SIZE);
1084                 len -= chunk;
1085
1086                 slot = (slot + 1) % NUM_TX_BUFF;
1087
1088                 if (last && !len)
1089                         ctrl |= MAL_TX_CTRL_LAST;
1090                 if (slot == NUM_TX_BUFF - 1)
1091                         ctrl |= MAL_TX_CTRL_WRAP;
1092
1093                 dev->tx_skb[slot] = NULL;
1094                 dev->tx_desc[slot].data_ptr = pd;
1095                 dev->tx_desc[slot].data_len = (u16) chunk;
1096                 dev->tx_desc[slot].ctrl = ctrl;
1097                 ++dev->tx_cnt;
1098
1099                 if (!len)
1100                         break;
1101
1102                 pd += chunk;
1103         }
1104         return slot;
1105 }
1106
1107 /* BHs disabled (SG version for TAH equipped EMACs) */
1108 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1109 {
1110         struct ocp_enet_private *dev = ndev->priv;
1111         int nr_frags = skb_shinfo(skb)->nr_frags;
1112         int len = skb->len, chunk;
1113         int slot, i;
1114         u16 ctrl;
1115         u32 pd;
1116
1117         /* This is common "fast" path */
1118         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1119                 return emac_start_xmit(skb, ndev);
1120
1121         len -= skb->data_len;
1122
1123         /* Note, this is only an *estimation*, we can still run out of empty
1124          * slots because of the additional fragmentation into
1125          * MAL_MAX_TX_SIZE-sized chunks
1126          */
1127         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1128                 goto stop_queue;
1129
1130         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1131             emac_tx_csum(dev, skb);
1132         slot = dev->tx_slot;
1133
1134         /* skb data */
1135         dev->tx_skb[slot] = NULL;
1136         chunk = min(len, MAL_MAX_TX_SIZE);
1137         dev->tx_desc[slot].data_ptr = pd =
1138             dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1139         dev->tx_desc[slot].data_len = (u16) chunk;
1140         len -= chunk;
1141         if (unlikely(len))
1142                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1143                                        ctrl);
1144         /* skb fragments */
1145         for (i = 0; i < nr_frags; ++i) {
1146                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1147                 len = frag->size;
1148
1149                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1150                         goto undo_frame;
1151
1152                 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1153                                   DMA_TO_DEVICE);
1154
1155                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1156                                        ctrl);
1157         }
1158
1159         DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1160              dev->tx_slot, slot);
1161
1162         /* Attach skb to the last slot so we don't release it too early */
1163         dev->tx_skb[slot] = skb;
1164
1165         /* Send the packet out */
1166         if (dev->tx_slot == NUM_TX_BUFF - 1)
1167                 ctrl |= MAL_TX_CTRL_WRAP;
1168         barrier();
1169         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1170         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1171
1172         return emac_xmit_finish(dev, skb->len);
1173
1174       undo_frame:
1175         /* Well, too bad. Our previous estimation was overly optimistic. 
1176          * Undo everything.
1177          */
1178         while (slot != dev->tx_slot) {
1179                 dev->tx_desc[slot].ctrl = 0;
1180                 --dev->tx_cnt;
1181                 if (--slot < 0)
1182                         slot = NUM_TX_BUFF - 1;
1183         }
1184         ++dev->estats.tx_undo;
1185
1186       stop_queue:
1187         netif_stop_queue(ndev);
1188         DBG2("%d: stopped TX queue" NL, dev->def->index);
1189         return 1;
1190 }
1191 #else
1192 # define emac_start_xmit_sg     emac_start_xmit
1193 #endif  /* !defined(CONFIG_IBM_EMAC_TAH) */
1194
1195 /* BHs disabled */
1196 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1197 {
1198         struct ibm_emac_error_stats *st = &dev->estats;
1199         DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1200
1201         ++st->tx_bd_errors;
1202         if (ctrl & EMAC_TX_ST_BFCS)
1203                 ++st->tx_bd_bad_fcs;
1204         if (ctrl & EMAC_TX_ST_LCS)
1205                 ++st->tx_bd_carrier_loss;
1206         if (ctrl & EMAC_TX_ST_ED)
1207                 ++st->tx_bd_excessive_deferral;
1208         if (ctrl & EMAC_TX_ST_EC)
1209                 ++st->tx_bd_excessive_collisions;
1210         if (ctrl & EMAC_TX_ST_LC)
1211                 ++st->tx_bd_late_collision;
1212         if (ctrl & EMAC_TX_ST_MC)
1213                 ++st->tx_bd_multple_collisions;
1214         if (ctrl & EMAC_TX_ST_SC)
1215                 ++st->tx_bd_single_collision;
1216         if (ctrl & EMAC_TX_ST_UR)
1217                 ++st->tx_bd_underrun;
1218         if (ctrl & EMAC_TX_ST_SQE)
1219                 ++st->tx_bd_sqe;
1220 }
1221
1222 static void emac_poll_tx(void *param)
1223 {
1224         struct ocp_enet_private *dev = param;
1225         DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1226              dev->ack_slot);
1227
1228         if (dev->tx_cnt) {
1229                 u16 ctrl;
1230                 int slot = dev->ack_slot, n = 0;
1231               again:
1232                 ctrl = dev->tx_desc[slot].ctrl;
1233                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1234                         struct sk_buff *skb = dev->tx_skb[slot];
1235                         ++n;
1236
1237                         if (skb) {
1238                                 dev_kfree_skb(skb);
1239                                 dev->tx_skb[slot] = NULL;
1240                         }
1241                         slot = (slot + 1) % NUM_TX_BUFF;
1242
1243                         if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1244                                 emac_parse_tx_error(dev, ctrl);
1245
1246                         if (--dev->tx_cnt)
1247                                 goto again;
1248                 }
1249                 if (n) {
1250                         dev->ack_slot = slot;
1251                         if (netif_queue_stopped(dev->ndev) &&
1252                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1253                                 netif_wake_queue(dev->ndev);
1254
1255                         DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1256                 }
1257         }
1258 }
1259
1260 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1261                                        int len)
1262 {
1263         struct sk_buff *skb = dev->rx_skb[slot];
1264         DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1265
1266         if (len) 
1267                 dma_map_single(dev->ldev, skb->data - 2, 
1268                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1269
1270         dev->rx_desc[slot].data_len = 0;
1271         barrier();
1272         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1273             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1274 }
1275
1276 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1277 {
1278         struct ibm_emac_error_stats *st = &dev->estats;
1279         DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1280
1281         ++st->rx_bd_errors;
1282         if (ctrl & EMAC_RX_ST_OE)
1283                 ++st->rx_bd_overrun;
1284         if (ctrl & EMAC_RX_ST_BP)
1285                 ++st->rx_bd_bad_packet;
1286         if (ctrl & EMAC_RX_ST_RP)
1287                 ++st->rx_bd_runt_packet;
1288         if (ctrl & EMAC_RX_ST_SE)
1289                 ++st->rx_bd_short_event;
1290         if (ctrl & EMAC_RX_ST_AE)
1291                 ++st->rx_bd_alignment_error;
1292         if (ctrl & EMAC_RX_ST_BFCS)
1293                 ++st->rx_bd_bad_fcs;
1294         if (ctrl & EMAC_RX_ST_PTL)
1295                 ++st->rx_bd_packet_too_long;
1296         if (ctrl & EMAC_RX_ST_ORE)
1297                 ++st->rx_bd_out_of_range;
1298         if (ctrl & EMAC_RX_ST_IRE)
1299                 ++st->rx_bd_in_range;
1300 }
1301
1302 static inline void emac_rx_csum(struct ocp_enet_private *dev,
1303                                 struct sk_buff *skb, u16 ctrl)
1304 {
1305 #if defined(CONFIG_IBM_EMAC_TAH)
1306         if (!ctrl && dev->tah_dev) {
1307                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1308                 ++dev->stats.rx_packets_csum;
1309         }
1310 #endif
1311 }
1312
1313 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1314 {
1315         if (likely(dev->rx_sg_skb != NULL)) {
1316                 int len = dev->rx_desc[slot].data_len;
1317                 int tot_len = dev->rx_sg_skb->len + len;
1318
1319                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1320                         ++dev->estats.rx_dropped_mtu;
1321                         dev_kfree_skb(dev->rx_sg_skb);
1322                         dev->rx_sg_skb = NULL;
1323                 } else {
1324                         cacheable_memcpy(dev->rx_sg_skb->tail,
1325                                          dev->rx_skb[slot]->data, len);
1326                         skb_put(dev->rx_sg_skb, len);
1327                         emac_recycle_rx_skb(dev, slot, len);
1328                         return 0;
1329                 }
1330         }
1331         emac_recycle_rx_skb(dev, slot, 0);
1332         return -1;
1333 }
1334
1335 /* BHs disabled */
1336 static int emac_poll_rx(void *param, int budget)
1337 {
1338         struct ocp_enet_private *dev = param;
1339         int slot = dev->rx_slot, received = 0;
1340
1341         DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1342
1343       again:
1344         while (budget > 0) {
1345                 int len;
1346                 struct sk_buff *skb;
1347                 u16 ctrl = dev->rx_desc[slot].ctrl;
1348
1349                 if (ctrl & MAL_RX_CTRL_EMPTY)
1350                         break;
1351
1352                 skb = dev->rx_skb[slot];
1353                 barrier();
1354                 len = dev->rx_desc[slot].data_len;
1355
1356                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1357                         goto sg;
1358
1359                 ctrl &= EMAC_BAD_RX_MASK;
1360                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1361                         emac_parse_rx_error(dev, ctrl);
1362                         ++dev->estats.rx_dropped_error;
1363                         emac_recycle_rx_skb(dev, slot, 0);
1364                         len = 0;
1365                         goto next;
1366                 }
1367
1368                 if (len && len < EMAC_RX_COPY_THRESH) {
1369                         struct sk_buff *copy_skb =
1370                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1371                         if (unlikely(!copy_skb))
1372                                 goto oom;
1373
1374                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1375                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1376                                          len + 2);
1377                         emac_recycle_rx_skb(dev, slot, len);
1378                         skb = copy_skb;
1379                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1380                         goto oom;
1381
1382                 skb_put(skb, len);
1383               push_packet:
1384                 skb->dev = dev->ndev;
1385                 skb->protocol = eth_type_trans(skb, dev->ndev);
1386                 emac_rx_csum(dev, skb, ctrl);
1387
1388                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1389                         ++dev->estats.rx_dropped_stack;
1390               next:
1391                 ++dev->stats.rx_packets;
1392               skip:
1393                 dev->stats.rx_bytes += len;
1394                 slot = (slot + 1) % NUM_RX_BUFF;
1395                 --budget;
1396                 ++received;
1397                 continue;
1398               sg:
1399                 if (ctrl & MAL_RX_CTRL_FIRST) {
1400                         BUG_ON(dev->rx_sg_skb);
1401                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1402                                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1403                                 ++dev->estats.rx_dropped_oom;
1404                                 emac_recycle_rx_skb(dev, slot, 0);
1405                         } else {
1406                                 dev->rx_sg_skb = skb;
1407                                 skb_put(skb, len);
1408                         }
1409                 } else if (!emac_rx_sg_append(dev, slot) &&
1410                            (ctrl & MAL_RX_CTRL_LAST)) {
1411
1412                         skb = dev->rx_sg_skb;
1413                         dev->rx_sg_skb = NULL;
1414
1415                         ctrl &= EMAC_BAD_RX_MASK;
1416                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1417                                 emac_parse_rx_error(dev, ctrl);
1418                                 ++dev->estats.rx_dropped_error;
1419                                 dev_kfree_skb(skb);
1420                                 len = 0;
1421                         } else
1422                                 goto push_packet;
1423                 }
1424                 goto skip;
1425               oom:
1426                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1427                 /* Drop the packet and recycle skb */
1428                 ++dev->estats.rx_dropped_oom;
1429                 emac_recycle_rx_skb(dev, slot, 0);
1430                 goto next;
1431         }
1432
1433         if (received) {
1434                 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1435                 dev->rx_slot = slot;
1436         }
1437
1438         if (unlikely(budget && dev->commac.rx_stopped)) {
1439                 struct ocp_func_emac_data *emacdata = dev->def->additions;
1440
1441                 barrier();
1442                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1443                         DBG2("%d: rx restart" NL, dev->def->index);
1444                         received = 0;
1445                         goto again;
1446                 }
1447
1448                 if (dev->rx_sg_skb) {
1449                         DBG2("%d: dropping partial rx packet" NL,
1450                              dev->def->index);
1451                         ++dev->estats.rx_dropped_error;
1452                         dev_kfree_skb(dev->rx_sg_skb);
1453                         dev->rx_sg_skb = NULL;
1454                 }
1455
1456                 dev->commac.rx_stopped = 0;
1457                 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1458                 emac_rx_enable(dev);
1459                 dev->rx_slot = 0;
1460         }
1461         return received;
1462 }
1463
1464 /* BHs disabled */
1465 static int emac_peek_rx(void *param)
1466 {
1467         struct ocp_enet_private *dev = param;
1468         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1469 }
1470
1471 /* BHs disabled */
1472 static int emac_peek_rx_sg(void *param)
1473 {
1474         struct ocp_enet_private *dev = param;
1475         int slot = dev->rx_slot;
1476         while (1) {
1477                 u16 ctrl = dev->rx_desc[slot].ctrl;
1478                 if (ctrl & MAL_RX_CTRL_EMPTY)
1479                         return 0;
1480                 else if (ctrl & MAL_RX_CTRL_LAST)
1481                         return 1;
1482
1483                 slot = (slot + 1) % NUM_RX_BUFF;
1484
1485                 /* I'm just being paranoid here :) */
1486                 if (unlikely(slot == dev->rx_slot))
1487                         return 0;
1488         }
1489 }
1490
1491 /* Hard IRQ */
1492 static void emac_rxde(void *param)
1493 {
1494         struct ocp_enet_private *dev = param;
1495         ++dev->estats.rx_stopped;
1496         emac_rx_disable_async(dev);
1497 }
1498
1499 /* Hard IRQ */
1500 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs)
1501 {
1502         struct ocp_enet_private *dev = dev_instance;
1503         struct emac_regs *p = dev->emacp;
1504         struct ibm_emac_error_stats *st = &dev->estats;
1505
1506         u32 isr = in_be32(&p->isr);
1507         out_be32(&p->isr, isr);
1508
1509         DBG("%d: isr = %08x" NL, dev->def->index, isr);
1510
1511         if (isr & EMAC_ISR_TXPE)
1512                 ++st->tx_parity;
1513         if (isr & EMAC_ISR_RXPE)
1514                 ++st->rx_parity;
1515         if (isr & EMAC_ISR_TXUE)
1516                 ++st->tx_underrun;
1517         if (isr & EMAC_ISR_RXOE)
1518                 ++st->rx_fifo_overrun;
1519         if (isr & EMAC_ISR_OVR)
1520                 ++st->rx_overrun;
1521         if (isr & EMAC_ISR_BP)
1522                 ++st->rx_bad_packet;
1523         if (isr & EMAC_ISR_RP)
1524                 ++st->rx_runt_packet;
1525         if (isr & EMAC_ISR_SE)
1526                 ++st->rx_short_event;
1527         if (isr & EMAC_ISR_ALE)
1528                 ++st->rx_alignment_error;
1529         if (isr & EMAC_ISR_BFCS)
1530                 ++st->rx_bad_fcs;
1531         if (isr & EMAC_ISR_PTLE)
1532                 ++st->rx_packet_too_long;
1533         if (isr & EMAC_ISR_ORE)
1534                 ++st->rx_out_of_range;
1535         if (isr & EMAC_ISR_IRE)
1536                 ++st->rx_in_range;
1537         if (isr & EMAC_ISR_SQE)
1538                 ++st->tx_sqe;
1539         if (isr & EMAC_ISR_TE)
1540                 ++st->tx_errors;
1541
1542         return IRQ_HANDLED;
1543 }
1544
1545 static struct net_device_stats *emac_stats(struct net_device *ndev)
1546 {
1547         struct ocp_enet_private *dev = ndev->priv;
1548         struct ibm_emac_stats *st = &dev->stats;
1549         struct ibm_emac_error_stats *est = &dev->estats;
1550         struct net_device_stats *nst = &dev->nstats;
1551
1552         DBG2("%d: stats" NL, dev->def->index);
1553
1554         /* Compute "legacy" statistics */
1555         local_irq_disable();
1556         nst->rx_packets = (unsigned long)st->rx_packets;
1557         nst->rx_bytes = (unsigned long)st->rx_bytes;
1558         nst->tx_packets = (unsigned long)st->tx_packets;
1559         nst->tx_bytes = (unsigned long)st->tx_bytes;
1560         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1561                                           est->rx_dropped_error +
1562                                           est->rx_dropped_resize +
1563                                           est->rx_dropped_mtu);
1564         nst->tx_dropped = (unsigned long)est->tx_dropped;
1565
1566         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1567         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1568                                               est->rx_fifo_overrun +
1569                                               est->rx_overrun);
1570         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1571                                                est->rx_alignment_error);
1572         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1573                                              est->rx_bad_fcs);
1574         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1575                                                 est->rx_bd_short_event +
1576                                                 est->rx_bd_packet_too_long +
1577                                                 est->rx_bd_out_of_range +
1578                                                 est->rx_bd_in_range +
1579                                                 est->rx_runt_packet +
1580                                                 est->rx_short_event +
1581                                                 est->rx_packet_too_long +
1582                                                 est->rx_out_of_range +
1583                                                 est->rx_in_range);
1584
1585         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1586         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1587                                               est->tx_underrun);
1588         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1589         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1590                                           est->tx_bd_excessive_collisions +
1591                                           est->tx_bd_late_collision +
1592                                           est->tx_bd_multple_collisions);
1593         local_irq_enable();
1594         return nst;
1595 }
1596
1597 static void emac_remove(struct ocp_device *ocpdev)
1598 {
1599         struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1600
1601         DBG("%d: remove" NL, dev->def->index);
1602
1603         ocp_set_drvdata(ocpdev, 0);
1604         unregister_netdev(dev->ndev);
1605
1606         tah_fini(dev->tah_dev);
1607         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1608         zmii_fini(dev->zmii_dev, dev->zmii_input);
1609
1610         emac_dbg_register(dev->def->index, 0);
1611
1612         mal_unregister_commac(dev->mal, &dev->commac);
1613         iounmap((void *)dev->emacp);
1614         kfree(dev->ndev);
1615 }
1616
1617 static struct mal_commac_ops emac_commac_ops = {
1618         .poll_tx = &emac_poll_tx,
1619         .poll_rx = &emac_poll_rx,
1620         .peek_rx = &emac_peek_rx,
1621         .rxde = &emac_rxde,
1622 };
1623
1624 static struct mal_commac_ops emac_commac_sg_ops = {
1625         .poll_tx = &emac_poll_tx,
1626         .poll_rx = &emac_poll_rx,
1627         .peek_rx = &emac_peek_rx_sg,
1628         .rxde = &emac_rxde,
1629 };
1630
1631 /* Ethtool support */
1632 static int emac_ethtool_get_settings(struct net_device *ndev,
1633                                      struct ethtool_cmd *cmd)
1634 {
1635         struct ocp_enet_private *dev = ndev->priv;
1636
1637         cmd->supported = dev->phy.features;
1638         cmd->port = PORT_MII;
1639         cmd->phy_address = dev->phy.address;
1640         cmd->transceiver =
1641             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1642
1643         local_bh_disable();
1644         cmd->advertising = dev->phy.advertising;
1645         cmd->autoneg = dev->phy.autoneg;
1646         cmd->speed = dev->phy.speed;
1647         cmd->duplex = dev->phy.duplex;
1648         local_bh_enable();
1649
1650         return 0;
1651 }
1652
1653 static int emac_ethtool_set_settings(struct net_device *ndev,
1654                                      struct ethtool_cmd *cmd)
1655 {
1656         struct ocp_enet_private *dev = ndev->priv;
1657         u32 f = dev->phy.features;
1658
1659         DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1660             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1661
1662         /* Basic sanity checks */
1663         if (dev->phy.address < 0)
1664                 return -EOPNOTSUPP;
1665         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1666                 return -EINVAL;
1667         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1668                 return -EINVAL;
1669         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1670                 return -EINVAL;
1671
1672         if (cmd->autoneg == AUTONEG_DISABLE) {
1673                 switch (cmd->speed) {
1674                 case SPEED_10:
1675                         if (cmd->duplex == DUPLEX_HALF
1676                             && !(f & SUPPORTED_10baseT_Half))
1677                                 return -EINVAL;
1678                         if (cmd->duplex == DUPLEX_FULL
1679                             && !(f & SUPPORTED_10baseT_Full))
1680                                 return -EINVAL;
1681                         break;
1682                 case SPEED_100:
1683                         if (cmd->duplex == DUPLEX_HALF
1684                             && !(f & SUPPORTED_100baseT_Half))
1685                                 return -EINVAL;
1686                         if (cmd->duplex == DUPLEX_FULL
1687                             && !(f & SUPPORTED_100baseT_Full))
1688                                 return -EINVAL;
1689                         break;
1690                 case SPEED_1000:
1691                         if (cmd->duplex == DUPLEX_HALF
1692                             && !(f & SUPPORTED_1000baseT_Half))
1693                                 return -EINVAL;
1694                         if (cmd->duplex == DUPLEX_FULL
1695                             && !(f & SUPPORTED_1000baseT_Full))
1696                                 return -EINVAL;
1697                         break;
1698                 default:
1699                         return -EINVAL;
1700                 }
1701
1702                 local_bh_disable();
1703                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1704                                                 cmd->duplex);
1705
1706         } else {
1707                 if (!(f & SUPPORTED_Autoneg))
1708                         return -EINVAL;
1709
1710                 local_bh_disable();
1711                 dev->phy.def->ops->setup_aneg(&dev->phy,
1712                                               (cmd->advertising & f) |
1713                                               (dev->phy.advertising &
1714                                                (ADVERTISED_Pause |
1715                                                 ADVERTISED_Asym_Pause)));
1716         }
1717         emac_force_link_update(dev);
1718         local_bh_enable();
1719
1720         return 0;
1721 }
1722
1723 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1724                                        struct ethtool_ringparam *rp)
1725 {
1726         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1727         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1728 }
1729
1730 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1731                                         struct ethtool_pauseparam *pp)
1732 {
1733         struct ocp_enet_private *dev = ndev->priv;
1734
1735         local_bh_disable();
1736         if ((dev->phy.features & SUPPORTED_Autoneg) &&
1737             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1738                 pp->autoneg = 1;
1739
1740         if (dev->phy.duplex == DUPLEX_FULL) {
1741                 if (dev->phy.pause)
1742                         pp->rx_pause = pp->tx_pause = 1;
1743                 else if (dev->phy.asym_pause)
1744                         pp->tx_pause = 1;
1745         }
1746         local_bh_enable();
1747 }
1748
1749 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1750 {
1751         struct ocp_enet_private *dev = ndev->priv;
1752         return dev->tah_dev != 0;
1753 }
1754
1755 static int emac_get_regs_len(struct ocp_enet_private *dev)
1756 {
1757         return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1758 }
1759
1760 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1761 {
1762         struct ocp_enet_private *dev = ndev->priv;
1763         return sizeof(struct emac_ethtool_regs_hdr) +
1764             emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1765             zmii_get_regs_len(dev->zmii_dev) +
1766             rgmii_get_regs_len(dev->rgmii_dev) +
1767             tah_get_regs_len(dev->tah_dev);
1768 }
1769
1770 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1771 {
1772         struct emac_ethtool_regs_subhdr *hdr = buf;
1773
1774         hdr->version = EMAC_ETHTOOL_REGS_VER;
1775         hdr->index = dev->def->index;
1776         memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1777         return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1778 }
1779
1780 static void emac_ethtool_get_regs(struct net_device *ndev,
1781                                   struct ethtool_regs *regs, void *buf)
1782 {
1783         struct ocp_enet_private *dev = ndev->priv;
1784         struct emac_ethtool_regs_hdr *hdr = buf;
1785
1786         hdr->components = 0;
1787         buf = hdr + 1;
1788
1789         local_irq_disable();
1790         buf = mal_dump_regs(dev->mal, buf);
1791         buf = emac_dump_regs(dev, buf);
1792         if (dev->zmii_dev) {
1793                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1794                 buf = zmii_dump_regs(dev->zmii_dev, buf);
1795         }
1796         if (dev->rgmii_dev) {
1797                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1798                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1799         }
1800         if (dev->tah_dev) {
1801                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1802                 buf = tah_dump_regs(dev->tah_dev, buf);
1803         }
1804         local_irq_enable();
1805 }
1806
1807 static int emac_ethtool_nway_reset(struct net_device *ndev)
1808 {
1809         struct ocp_enet_private *dev = ndev->priv;
1810         int res = 0;
1811
1812         DBG("%d: nway_reset" NL, dev->def->index);
1813
1814         if (dev->phy.address < 0)
1815                 return -EOPNOTSUPP;
1816
1817         local_bh_disable();
1818         if (!dev->phy.autoneg) {
1819                 res = -EINVAL;
1820                 goto out;
1821         }
1822
1823         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1824         emac_force_link_update(dev);
1825
1826       out:
1827         local_bh_enable();
1828         return res;
1829 }
1830
1831 static int emac_ethtool_get_stats_count(struct net_device *ndev)
1832 {
1833         return EMAC_ETHTOOL_STATS_COUNT;
1834 }
1835
1836 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1837                                      u8 * buf)
1838 {
1839         if (stringset == ETH_SS_STATS)
1840                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1841 }
1842
1843 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1844                                            struct ethtool_stats *estats,
1845                                            u64 * tmp_stats)
1846 {
1847         struct ocp_enet_private *dev = ndev->priv;
1848         local_irq_disable();
1849         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1850         tmp_stats += sizeof(dev->stats) / sizeof(u64);
1851         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1852         local_irq_enable();
1853 }
1854
1855 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1856                                      struct ethtool_drvinfo *info)
1857 {
1858         struct ocp_enet_private *dev = ndev->priv;
1859
1860         strcpy(info->driver, "ibm_emac");
1861         strcpy(info->version, DRV_VERSION);
1862         info->fw_version[0] = '\0';
1863         sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1864         info->n_stats = emac_ethtool_get_stats_count(ndev);
1865         info->regdump_len = emac_ethtool_get_regs_len(ndev);
1866 }
1867
1868 static struct ethtool_ops emac_ethtool_ops = {
1869         .get_settings = emac_ethtool_get_settings,
1870         .set_settings = emac_ethtool_set_settings,
1871         .get_drvinfo = emac_ethtool_get_drvinfo,
1872
1873         .get_regs_len = emac_ethtool_get_regs_len,
1874         .get_regs = emac_ethtool_get_regs,
1875
1876         .nway_reset = emac_ethtool_nway_reset,
1877
1878         .get_ringparam = emac_ethtool_get_ringparam,
1879         .get_pauseparam = emac_ethtool_get_pauseparam,
1880
1881         .get_rx_csum = emac_ethtool_get_rx_csum,
1882
1883         .get_strings = emac_ethtool_get_strings,
1884         .get_stats_count = emac_ethtool_get_stats_count,
1885         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1886
1887         .get_link = ethtool_op_get_link,
1888         .get_tx_csum = ethtool_op_get_tx_csum,
1889         .get_sg = ethtool_op_get_sg,
1890 };
1891
1892 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1893 {
1894         struct ocp_enet_private *dev = ndev->priv;
1895         uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1896
1897         DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1898
1899         if (dev->phy.address < 0)
1900                 return -EOPNOTSUPP;
1901
1902         switch (cmd) {
1903         case SIOCGMIIPHY:
1904         case SIOCDEVPRIVATE:
1905                 data[0] = dev->phy.address;
1906                 /* Fall through */
1907         case SIOCGMIIREG:
1908         case SIOCDEVPRIVATE + 1:
1909                 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1910                 return 0;
1911
1912         case SIOCSMIIREG:
1913         case SIOCDEVPRIVATE + 2:
1914                 if (!capable(CAP_NET_ADMIN))
1915                         return -EPERM;
1916                 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1917                 return 0;
1918         default:
1919                 return -EOPNOTSUPP;
1920         }
1921 }
1922
1923 static int __init emac_probe(struct ocp_device *ocpdev)
1924 {
1925         struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1926         struct net_device *ndev;
1927         struct ocp_device *maldev;
1928         struct ocp_enet_private *dev;
1929         int err, i;
1930
1931         DBG("%d: probe" NL, ocpdev->def->index);
1932
1933         if (!emacdata) {
1934                 printk(KERN_ERR "emac%d: Missing additional data!\n",
1935                        ocpdev->def->index);
1936                 return -ENODEV;
1937         }
1938
1939         /* Allocate our net_device structure */
1940         ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1941         if (!ndev) {
1942                 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1943                        ocpdev->def->index);
1944                 return -ENOMEM;
1945         }
1946         dev = ndev->priv;
1947         dev->ndev = ndev;
1948         dev->ldev = &ocpdev->dev;
1949         dev->def = ocpdev->def;
1950         SET_MODULE_OWNER(ndev);
1951
1952         /* Find MAL device we are connected to */
1953         maldev =
1954             ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1955         if (!maldev) {
1956                 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1957                        dev->def->index, emacdata->mal_idx);
1958                 err = -ENODEV;
1959                 goto out;
1960         }
1961         dev->mal = ocp_get_drvdata(maldev);
1962         if (!dev->mal) {
1963                 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1964                        dev->def->index, emacdata->mal_idx);
1965                 err = -ENODEV;
1966                 goto out;
1967         }
1968
1969         /* Register with MAL */
1970         dev->commac.ops = &emac_commac_ops;
1971         dev->commac.dev = dev;
1972         dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1973         dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1974         err = mal_register_commac(dev->mal, &dev->commac);
1975         if (err) {
1976                 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1977                        dev->def->index, emacdata->mal_idx);
1978                 goto out;
1979         }
1980         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
1981         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
1982
1983         /* Get pointers to BD rings */
1984         dev->tx_desc =
1985             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
1986                                                  emacdata->mal_tx_chan);
1987         dev->rx_desc =
1988             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
1989                                                  emacdata->mal_rx_chan);
1990
1991         DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
1992         DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
1993
1994         /* Clean rings */
1995         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
1996         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
1997
1998         /* If we depend on another EMAC for MDIO, check whether it was probed already */
1999         if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
2000                 struct ocp_device *mdiodev =
2001                     ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2002                                     emacdata->mdio_idx);
2003                 if (!mdiodev) {
2004                         printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2005                                dev->def->index, emacdata->mdio_idx);
2006                         err = -ENODEV;
2007                         goto out2;
2008                 }
2009                 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2010                 if (!dev->mdio_dev) {
2011                         printk(KERN_ERR
2012                                "emac%d: emac%d hasn't been initialized yet!\n",
2013                                dev->def->index, emacdata->mdio_idx);
2014                         err = -ENODEV;
2015                         goto out2;
2016                 }
2017         }
2018
2019         /* Attach to ZMII, if needed */
2020         if ((err = zmii_attach(dev)) != 0)
2021                 goto out2;
2022
2023         /* Attach to RGMII, if needed */
2024         if ((err = rgmii_attach(dev)) != 0)
2025                 goto out3;
2026
2027         /* Attach to TAH, if needed */
2028         if ((err = tah_attach(dev)) != 0)
2029                 goto out4;
2030
2031         /* Map EMAC regs */
2032         dev->emacp =
2033             (struct emac_regs *)ioremap(dev->def->paddr,
2034                                         sizeof(struct emac_regs));
2035         if (!dev->emacp) {
2036                 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2037                        dev->def->index);
2038                 err = -ENOMEM;
2039                 goto out5;
2040         }
2041
2042         /* Fill in MAC address */
2043         for (i = 0; i < 6; ++i)
2044                 ndev->dev_addr[i] = emacdata->mac_addr[i];
2045
2046         /* Set some link defaults before we can find out real parameters */
2047         dev->phy.speed = SPEED_100;
2048         dev->phy.duplex = DUPLEX_FULL;
2049         dev->phy.autoneg = AUTONEG_DISABLE;
2050         dev->phy.pause = dev->phy.asym_pause = 0;
2051         init_timer(&dev->link_timer);
2052         dev->link_timer.function = emac_link_timer;
2053         dev->link_timer.data = (unsigned long)dev;
2054
2055         /* Find PHY if any */
2056         dev->phy.dev = ndev;
2057         dev->phy.mode = emacdata->phy_mode;
2058         if (emacdata->phy_map != 0xffffffff) {
2059                 u32 phy_map = emacdata->phy_map | busy_phy_map;
2060                 u32 adv;
2061
2062                 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2063                     emacdata->phy_map, busy_phy_map);
2064
2065                 EMAC_RX_CLK_TX(dev->def->index);
2066
2067                 dev->phy.mdio_read = emac_mdio_read;
2068                 dev->phy.mdio_write = emac_mdio_write;
2069
2070                 /* Configure EMAC with defaults so we can at least use MDIO
2071                  * This is needed mostly for 440GX
2072                  */
2073                 if (emac_phy_gpcs(dev->phy.mode)) {
2074                         /* XXX
2075                          * Make GPCS PHY address equal to EMAC index.
2076                          * We probably should take into account busy_phy_map
2077                          * and/or phy_map here.
2078                          */
2079                         dev->phy.address = dev->def->index;
2080                 }
2081                 
2082                 emac_configure(dev);
2083
2084                 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2085                         if (!(phy_map & 1)) {
2086                                 int r;
2087                                 busy_phy_map |= 1 << i;
2088
2089                                 /* Quick check if there is a PHY at the address */
2090                                 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2091                                 if (r == 0xffff || r < 0)
2092                                         continue;
2093                                 if (!mii_phy_probe(&dev->phy, i))
2094                                         break;
2095                         }
2096                 if (i == 0x20) {
2097                         printk(KERN_WARNING "emac%d: can't find PHY!\n",
2098                                dev->def->index);
2099                         goto out6;
2100                 }
2101
2102                 /* Init PHY */
2103                 if (dev->phy.def->ops->init)
2104                         dev->phy.def->ops->init(&dev->phy);
2105                 
2106                 /* Disable any PHY features not supported by the platform */
2107                 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2108
2109                 /* Setup initial link parameters */
2110                 if (dev->phy.features & SUPPORTED_Autoneg) {
2111                         adv = dev->phy.features;
2112 #if !defined(CONFIG_40x)
2113                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2114 #endif
2115                         /* Restart autonegotiation */
2116                         dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2117                 } else {
2118                         u32 f = dev->phy.def->features;
2119                         int speed = SPEED_10, fd = DUPLEX_HALF;
2120
2121                         /* Select highest supported speed/duplex */
2122                         if (f & SUPPORTED_1000baseT_Full) {
2123                                 speed = SPEED_1000;
2124                                 fd = DUPLEX_FULL;
2125                         } else if (f & SUPPORTED_1000baseT_Half)
2126                                 speed = SPEED_1000;
2127                         else if (f & SUPPORTED_100baseT_Full) {
2128                                 speed = SPEED_100;
2129                                 fd = DUPLEX_FULL;
2130                         } else if (f & SUPPORTED_100baseT_Half)
2131                                 speed = SPEED_100;
2132                         else if (f & SUPPORTED_10baseT_Full)
2133                                 fd = DUPLEX_FULL;
2134
2135                         /* Force link parameters */
2136                         dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2137                 }
2138         } else {
2139                 emac_reset(dev);
2140
2141                 /* PHY-less configuration.
2142                  * XXX I probably should move these settings to emacdata
2143                  */
2144                 dev->phy.address = -1;
2145                 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2146                 dev->phy.pause = 1;
2147         }
2148
2149         /* Fill in the driver function table */
2150         ndev->open = &emac_open;
2151         if (dev->tah_dev) {
2152                 ndev->hard_start_xmit = &emac_start_xmit_sg;
2153                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2154         } else
2155                 ndev->hard_start_xmit = &emac_start_xmit;
2156         ndev->tx_timeout = &emac_full_tx_reset;
2157         ndev->watchdog_timeo = 5 * HZ;
2158         ndev->stop = &emac_close;
2159         ndev->get_stats = &emac_stats;
2160         ndev->set_multicast_list = &emac_set_multicast_list;
2161         ndev->do_ioctl = &emac_ioctl;
2162         if (emac_phy_supports_gige(emacdata->phy_mode)) {
2163                 ndev->change_mtu = &emac_change_mtu;
2164                 dev->commac.ops = &emac_commac_sg_ops;
2165         }
2166         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2167
2168         netif_carrier_off(ndev);
2169         netif_stop_queue(ndev);
2170
2171         err = register_netdev(ndev);
2172         if (err) {
2173                 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2174                        dev->def->index, err);
2175                 goto out6;
2176         }
2177
2178         ocp_set_drvdata(ocpdev, dev);
2179
2180         printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2181                ndev->name, dev->def->index,
2182                ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2183                ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2184
2185         if (dev->phy.address >= 0)
2186                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2187                        dev->phy.def->name, dev->phy.address);
2188
2189         emac_dbg_register(dev->def->index, dev);
2190
2191         return 0;
2192       out6:
2193         iounmap((void *)dev->emacp);
2194       out5:
2195         tah_fini(dev->tah_dev);
2196       out4:
2197         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2198       out3:
2199         zmii_fini(dev->zmii_dev, dev->zmii_input);
2200       out2:
2201         mal_unregister_commac(dev->mal, &dev->commac);
2202       out:
2203         kfree(ndev);
2204         return err;
2205 }
2206
2207 static struct ocp_device_id emac_ids[] = {
2208         { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2209         { .vendor = OCP_VENDOR_INVALID}
2210 };
2211
2212 static struct ocp_driver emac_driver = {
2213         .name = "emac",
2214         .id_table = emac_ids,
2215         .probe = emac_probe,
2216         .remove = emac_remove,
2217 };
2218
2219 static int __init emac_init(void)
2220 {
2221         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2222
2223         DBG(": init" NL);
2224
2225         if (mal_init())
2226                 return -ENODEV;
2227
2228         EMAC_CLK_INTERNAL;
2229         if (ocp_register_driver(&emac_driver)) {
2230                 EMAC_CLK_EXTERNAL;
2231                 ocp_unregister_driver(&emac_driver);
2232                 mal_exit();
2233                 return -ENODEV;
2234         }
2235         EMAC_CLK_EXTERNAL;
2236
2237         emac_init_debug();
2238         return 0;
2239 }
2240
2241 static void __exit emac_exit(void)
2242 {
2243         DBG(": exit" NL);
2244         ocp_unregister_driver(&emac_driver);
2245         mal_exit();
2246         emac_fini_debug();
2247 }
2248
2249 module_init(emac_init);
2250 module_exit(emac_exit);