Merge branch 'e1000' of ssh://198.78.49.142/srv/git/intel/linux-2.6
[sfrench/cifs-2.6.git] / drivers / net / ibm_emac / ibm_emac_core.c
1 /*
2  * drivers/net/ibm_emac/ibm_emac_core.c
3  *
4  * Driver for PowerPC 4xx on-chip ethernet controller.
5  *
6  * Copyright (c) 2004, 2005 Zultys Technologies.
7  * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
8  *
9  * Based on original work by
10  *      Matt Porter <mporter@kernel.crashing.org>
11  *      (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
12  *      Armin Kuster <akuster@mvista.com>
13  *      Johnnie Peters <jpeters@mvista.com>
14  *
15  * This program is free software; you can redistribute  it and/or modify it
16  * under  the terms of  the GNU General  Public License as published by the
17  * Free Software Foundation;  either version 2 of the  License, or (at your
18  * option) any later version.
19  *
20  */
21
22 #include <linux/config.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/sched.h>
26 #include <linux/string.h>
27 #include <linux/errno.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/init.h>
31 #include <linux/types.h>
32 #include <linux/pci.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/crc32.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/bitops.h>
40
41 #include <asm/processor.h>
42 #include <asm/io.h>
43 #include <asm/dma.h>
44 #include <asm/uaccess.h>
45 #include <asm/ocp.h>
46
47 #include "ibm_emac_core.h"
48 #include "ibm_emac_debug.h"
49
50 /*
51  * Lack of dma_unmap_???? calls is intentional.
52  *
53  * API-correct usage requires additional support state information to be 
54  * maintained for every RX and TX buffer descriptor (BD). Unfortunately, due to
55  * EMAC design (e.g. TX buffer passed from network stack can be split into
56  * several BDs, dma_map_single/dma_map_page can be used to map particular BD),
57  * maintaining such information will add additional overhead.
58  * Current DMA API implementation for 4xx processors only ensures cache coherency
59  * and dma_unmap_???? routines are empty and are likely to stay this way.
60  * I decided to omit dma_unmap_??? calls because I don't want to add additional
61  * complexity just for the sake of following some abstract API, when it doesn't
62  * add any real benefit to the driver. I understand that this decision maybe 
63  * controversial, but I really tried to make code API-correct and efficient 
64  * at the same time and didn't come up with code I liked :(.                --ebs
65  */
66
67 #define DRV_NAME        "emac"
68 #define DRV_VERSION     "3.54"
69 #define DRV_DESC        "PPC 4xx OCP EMAC driver"
70
71 MODULE_DESCRIPTION(DRV_DESC);
72 MODULE_AUTHOR
73     ("Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>");
74 MODULE_LICENSE("GPL");
75
76 /* minimum number of free TX descriptors required to wake up TX process */
77 #define EMAC_TX_WAKEUP_THRESH           (NUM_TX_BUFF / 4)
78
79 /* If packet size is less than this number, we allocate small skb and copy packet 
80  * contents into it instead of just sending original big skb up
81  */
82 #define EMAC_RX_COPY_THRESH             CONFIG_IBM_EMAC_RX_COPY_THRESHOLD
83
84 /* Since multiple EMACs share MDIO lines in various ways, we need
85  * to avoid re-using the same PHY ID in cases where the arch didn't
86  * setup precise phy_map entries
87  */
88 static u32 busy_phy_map;
89
90 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && \
91     (defined(CONFIG_405EP) || defined(CONFIG_440EP) || defined(CONFIG_440GR))
92 /* 405EP has "EMAC to PHY Control Register" (CPC0_EPCTL) which can help us
93  * with PHY RX clock problem.
94  * 440EP/440GR has more sane SDR0_MFR register implementation than 440GX, which
95  * also allows controlling each EMAC clock
96  */
97 static inline void EMAC_RX_CLK_TX(int idx)
98 {
99         unsigned long flags;
100         local_irq_save(flags);
101
102 #if defined(CONFIG_405EP)
103         mtdcr(0xf3, mfdcr(0xf3) | (1 << idx));
104 #else /* CONFIG_440EP || CONFIG_440GR */
105         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) | (0x08000000 >> idx));
106 #endif
107
108         local_irq_restore(flags);
109 }
110
111 static inline void EMAC_RX_CLK_DEFAULT(int idx)
112 {
113         unsigned long flags;
114         local_irq_save(flags);
115
116 #if defined(CONFIG_405EP)
117         mtdcr(0xf3, mfdcr(0xf3) & ~(1 << idx));
118 #else /* CONFIG_440EP */
119         SDR_WRITE(DCRN_SDR_MFR, SDR_READ(DCRN_SDR_MFR) & ~(0x08000000 >> idx));
120 #endif
121
122         local_irq_restore(flags);
123 }
124 #else
125 #define EMAC_RX_CLK_TX(idx)             ((void)0)
126 #define EMAC_RX_CLK_DEFAULT(idx)        ((void)0)
127 #endif
128
129 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX) && defined(CONFIG_440GX)
130 /* We can switch Ethernet clock to the internal source through SDR0_MFR[ECS],
131  * unfortunately this is less flexible than 440EP case, because it's a global 
132  * setting for all EMACs, therefore we do this clock trick only during probe.
133  */
134 #define EMAC_CLK_INTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
135                                             SDR_READ(DCRN_SDR_MFR) | 0x08000000)
136 #define EMAC_CLK_EXTERNAL               SDR_WRITE(DCRN_SDR_MFR, \
137                                             SDR_READ(DCRN_SDR_MFR) & ~0x08000000)
138 #else
139 #define EMAC_CLK_INTERNAL               ((void)0)
140 #define EMAC_CLK_EXTERNAL               ((void)0)
141 #endif
142
143 /* I don't want to litter system log with timeout errors 
144  * when we have brain-damaged PHY.
145  */
146 static inline void emac_report_timeout_error(struct ocp_enet_private *dev,
147                                              const char *error)
148 {
149 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
150         DBG("%d: %s" NL, dev->def->index, error);
151 #else
152         if (net_ratelimit())
153                 printk(KERN_ERR "emac%d: %s\n", dev->def->index, error);
154 #endif
155 }
156
157 /* PHY polling intervals */
158 #define PHY_POLL_LINK_ON        HZ
159 #define PHY_POLL_LINK_OFF       (HZ / 5)
160
161 /* Graceful stop timeouts in us. 
162  * We should allow up to 1 frame time (full-duplex, ignoring collisions) 
163  */
164 #define STOP_TIMEOUT_10         1230    
165 #define STOP_TIMEOUT_100        124
166 #define STOP_TIMEOUT_1000       13
167 #define STOP_TIMEOUT_1000_JUMBO 73
168
169 /* Please, keep in sync with struct ibm_emac_stats/ibm_emac_error_stats */
170 static const char emac_stats_keys[EMAC_ETHTOOL_STATS_COUNT][ETH_GSTRING_LEN] = {
171         "rx_packets", "rx_bytes", "tx_packets", "tx_bytes", "rx_packets_csum",
172         "tx_packets_csum", "tx_undo", "rx_dropped_stack", "rx_dropped_oom",
173         "rx_dropped_error", "rx_dropped_resize", "rx_dropped_mtu",
174         "rx_stopped", "rx_bd_errors", "rx_bd_overrun", "rx_bd_bad_packet",
175         "rx_bd_runt_packet", "rx_bd_short_event", "rx_bd_alignment_error",
176         "rx_bd_bad_fcs", "rx_bd_packet_too_long", "rx_bd_out_of_range",
177         "rx_bd_in_range", "rx_parity", "rx_fifo_overrun", "rx_overrun",
178         "rx_bad_packet", "rx_runt_packet", "rx_short_event",
179         "rx_alignment_error", "rx_bad_fcs", "rx_packet_too_long",
180         "rx_out_of_range", "rx_in_range", "tx_dropped", "tx_bd_errors",
181         "tx_bd_bad_fcs", "tx_bd_carrier_loss", "tx_bd_excessive_deferral",
182         "tx_bd_excessive_collisions", "tx_bd_late_collision",
183         "tx_bd_multple_collisions", "tx_bd_single_collision",
184         "tx_bd_underrun", "tx_bd_sqe", "tx_parity", "tx_underrun", "tx_sqe",
185         "tx_errors"
186 };
187
188 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs);
189 static void emac_clean_tx_ring(struct ocp_enet_private *dev);
190
191 static inline int emac_phy_supports_gige(int phy_mode)
192 {
193         return  phy_mode == PHY_MODE_GMII ||
194                 phy_mode == PHY_MODE_RGMII ||
195                 phy_mode == PHY_MODE_TBI ||
196                 phy_mode == PHY_MODE_RTBI;
197 }
198
199 static inline int emac_phy_gpcs(int phy_mode)
200 {
201         return  phy_mode == PHY_MODE_TBI ||
202                 phy_mode == PHY_MODE_RTBI;
203 }
204
205 static inline void emac_tx_enable(struct ocp_enet_private *dev)
206 {
207         struct emac_regs *p = dev->emacp;
208         unsigned long flags;
209         u32 r;
210
211         local_irq_save(flags);
212
213         DBG("%d: tx_enable" NL, dev->def->index);
214
215         r = in_be32(&p->mr0);
216         if (!(r & EMAC_MR0_TXE))
217                 out_be32(&p->mr0, r | EMAC_MR0_TXE);
218         local_irq_restore(flags);
219 }
220
221 static void emac_tx_disable(struct ocp_enet_private *dev)
222 {
223         struct emac_regs *p = dev->emacp;
224         unsigned long flags;
225         u32 r;
226
227         local_irq_save(flags);
228
229         DBG("%d: tx_disable" NL, dev->def->index);
230
231         r = in_be32(&p->mr0);
232         if (r & EMAC_MR0_TXE) {
233                 int n = dev->stop_timeout;
234                 out_be32(&p->mr0, r & ~EMAC_MR0_TXE);
235                 while (!(in_be32(&p->mr0) & EMAC_MR0_TXI) && n) {
236                         udelay(1);
237                         --n;
238                 }       
239                 if (unlikely(!n))
240                         emac_report_timeout_error(dev, "TX disable timeout");
241         }
242         local_irq_restore(flags);
243 }
244
245 static void emac_rx_enable(struct ocp_enet_private *dev)
246 {
247         struct emac_regs *p = dev->emacp;
248         unsigned long flags;
249         u32 r;
250
251         local_irq_save(flags);
252         if (unlikely(dev->commac.rx_stopped))
253                 goto out;
254
255         DBG("%d: rx_enable" NL, dev->def->index);
256
257         r = in_be32(&p->mr0);
258         if (!(r & EMAC_MR0_RXE)) {
259                 if (unlikely(!(r & EMAC_MR0_RXI))) {
260                         /* Wait if previous async disable is still in progress */
261                         int n = dev->stop_timeout;
262                         while (!(r = in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
263                                 udelay(1);
264                                 --n;
265                         }       
266                         if (unlikely(!n))
267                                 emac_report_timeout_error(dev,
268                                                           "RX disable timeout");
269                 }
270                 out_be32(&p->mr0, r | EMAC_MR0_RXE);
271         }
272       out:
273         local_irq_restore(flags);
274 }
275
276 static void emac_rx_disable(struct ocp_enet_private *dev)
277 {
278         struct emac_regs *p = dev->emacp;
279         unsigned long flags;
280         u32 r;
281
282         local_irq_save(flags);
283
284         DBG("%d: rx_disable" NL, dev->def->index);
285
286         r = in_be32(&p->mr0);
287         if (r & EMAC_MR0_RXE) {
288                 int n = dev->stop_timeout;
289                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
290                 while (!(in_be32(&p->mr0) & EMAC_MR0_RXI) && n) {
291                         udelay(1);
292                         --n;
293                 }       
294                 if (unlikely(!n))
295                         emac_report_timeout_error(dev, "RX disable timeout");
296         }
297         local_irq_restore(flags);
298 }
299
300 static inline void emac_rx_disable_async(struct ocp_enet_private *dev)
301 {
302         struct emac_regs *p = dev->emacp;
303         unsigned long flags;
304         u32 r;
305
306         local_irq_save(flags);
307
308         DBG("%d: rx_disable_async" NL, dev->def->index);
309
310         r = in_be32(&p->mr0);
311         if (r & EMAC_MR0_RXE)
312                 out_be32(&p->mr0, r & ~EMAC_MR0_RXE);
313         local_irq_restore(flags);
314 }
315
316 static int emac_reset(struct ocp_enet_private *dev)
317 {
318         struct emac_regs *p = dev->emacp;
319         unsigned long flags;
320         int n = 20;
321
322         DBG("%d: reset" NL, dev->def->index);
323
324         local_irq_save(flags);
325
326         if (!dev->reset_failed) {
327                 /* 40x erratum suggests stopping RX channel before reset,
328                  * we stop TX as well
329                  */
330                 emac_rx_disable(dev);
331                 emac_tx_disable(dev);
332         }
333
334         out_be32(&p->mr0, EMAC_MR0_SRST);
335         while ((in_be32(&p->mr0) & EMAC_MR0_SRST) && n)
336                 --n;
337         local_irq_restore(flags);
338
339         if (n) {
340                 dev->reset_failed = 0;
341                 return 0;
342         } else {
343                 emac_report_timeout_error(dev, "reset timeout");
344                 dev->reset_failed = 1;
345                 return -ETIMEDOUT;
346         }
347 }
348
349 static void emac_hash_mc(struct ocp_enet_private *dev)
350 {
351         struct emac_regs *p = dev->emacp;
352         u16 gaht[4] = { 0 };
353         struct dev_mc_list *dmi;
354
355         DBG("%d: hash_mc %d" NL, dev->def->index, dev->ndev->mc_count);
356
357         for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
358                 int bit;
359                 DBG2("%d: mc %02x:%02x:%02x:%02x:%02x:%02x" NL,
360                      dev->def->index,
361                      dmi->dmi_addr[0], dmi->dmi_addr[1], dmi->dmi_addr[2],
362                      dmi->dmi_addr[3], dmi->dmi_addr[4], dmi->dmi_addr[5]);
363
364                 bit = 63 - (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 26);
365                 gaht[bit >> 4] |= 0x8000 >> (bit & 0x0f);
366         }
367         out_be32(&p->gaht1, gaht[0]);
368         out_be32(&p->gaht2, gaht[1]);
369         out_be32(&p->gaht3, gaht[2]);
370         out_be32(&p->gaht4, gaht[3]);
371 }
372
373 static inline u32 emac_iff2rmr(struct net_device *ndev)
374 {
375         u32 r = EMAC_RMR_SP | EMAC_RMR_SFCS | EMAC_RMR_IAE | EMAC_RMR_BAE |
376             EMAC_RMR_BASE;
377
378         if (ndev->flags & IFF_PROMISC)
379                 r |= EMAC_RMR_PME;
380         else if (ndev->flags & IFF_ALLMULTI || ndev->mc_count > 32)
381                 r |= EMAC_RMR_PMME;
382         else if (ndev->mc_count > 0)
383                 r |= EMAC_RMR_MAE;
384
385         return r;
386 }
387
388 static inline int emac_opb_mhz(void)
389 {
390         return (ocp_sys_info.opb_bus_freq + 500000) / 1000000;
391 }
392
393 /* BHs disabled */
394 static int emac_configure(struct ocp_enet_private *dev)
395 {
396         struct emac_regs *p = dev->emacp;
397         struct net_device *ndev = dev->ndev;
398         int gige;
399         u32 r;
400
401         DBG("%d: configure" NL, dev->def->index);
402
403         if (emac_reset(dev) < 0)
404                 return -ETIMEDOUT;
405
406         tah_reset(dev->tah_dev);
407
408         /* Mode register */
409         r = EMAC_MR1_BASE(emac_opb_mhz()) | EMAC_MR1_VLE | EMAC_MR1_IST;
410         if (dev->phy.duplex == DUPLEX_FULL)
411                 r |= EMAC_MR1_FDE | EMAC_MR1_MWSW_001;
412         dev->stop_timeout = STOP_TIMEOUT_10;
413         switch (dev->phy.speed) {
414         case SPEED_1000:
415                 if (emac_phy_gpcs(dev->phy.mode)) {
416                         r |= EMAC_MR1_MF_1000GPCS |
417                             EMAC_MR1_MF_IPPA(dev->phy.address);
418
419                         /* Put some arbitrary OUI, Manuf & Rev IDs so we can
420                          * identify this GPCS PHY later.
421                          */
422                         out_be32(&p->ipcr, 0xdeadbeef);
423                 } else
424                         r |= EMAC_MR1_MF_1000;
425                 r |= EMAC_MR1_RFS_16K;
426                 gige = 1;
427
428                 if (dev->ndev->mtu > ETH_DATA_LEN) {
429                         r |= EMAC_MR1_JPSM;
430                         dev->stop_timeout = STOP_TIMEOUT_1000_JUMBO;
431                 } else
432                         dev->stop_timeout = STOP_TIMEOUT_1000;
433                 break;
434         case SPEED_100:
435                 r |= EMAC_MR1_MF_100;
436                 dev->stop_timeout = STOP_TIMEOUT_100;
437                 /* Fall through */
438         default:
439                 r |= EMAC_MR1_RFS_4K;
440                 gige = 0;
441                 break;
442         }
443
444         if (dev->rgmii_dev)
445                 rgmii_set_speed(dev->rgmii_dev, dev->rgmii_input,
446                                 dev->phy.speed);
447         else
448                 zmii_set_speed(dev->zmii_dev, dev->zmii_input, dev->phy.speed);
449
450 #if !defined(CONFIG_40x)
451         /* on 40x erratum forces us to NOT use integrated flow control, 
452          * let's hope it works on 44x ;)
453          */
454         if (dev->phy.duplex == DUPLEX_FULL) {
455                 if (dev->phy.pause)
456                         r |= EMAC_MR1_EIFC | EMAC_MR1_APP;
457                 else if (dev->phy.asym_pause)
458                         r |= EMAC_MR1_APP;
459         }
460 #endif
461         out_be32(&p->mr1, r);
462
463         /* Set individual MAC address */
464         out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
465         out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
466                  (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
467                  ndev->dev_addr[5]);
468
469         /* VLAN Tag Protocol ID */
470         out_be32(&p->vtpid, 0x8100);
471
472         /* Receive mode register */
473         r = emac_iff2rmr(ndev);
474         if (r & EMAC_RMR_MAE)
475                 emac_hash_mc(dev);
476         out_be32(&p->rmr, r);
477
478         /* FIFOs thresholds */
479         r = EMAC_TMR1((EMAC_MAL_BURST_SIZE / EMAC_FIFO_ENTRY_SIZE) + 1,
480                       EMAC_TX_FIFO_SIZE / 2 / EMAC_FIFO_ENTRY_SIZE);
481         out_be32(&p->tmr1, r);
482         out_be32(&p->trtr, EMAC_TRTR(EMAC_TX_FIFO_SIZE / 2));
483
484         /* PAUSE frame is sent when RX FIFO reaches its high-water mark,
485            there should be still enough space in FIFO to allow the our link
486            partner time to process this frame and also time to send PAUSE 
487            frame itself.
488
489            Here is the worst case scenario for the RX FIFO "headroom"
490            (from "The Switch Book") (100Mbps, without preamble, inter-frame gap):
491
492            1) One maximum-length frame on TX                    1522 bytes
493            2) One PAUSE frame time                                64 bytes
494            3) PAUSE frame decode time allowance                   64 bytes
495            4) One maximum-length frame on RX                    1522 bytes
496            5) Round-trip propagation delay of the link (100Mb)    15 bytes
497            ----------       
498            3187 bytes
499
500            I chose to set high-water mark to RX_FIFO_SIZE / 4 (1024 bytes)
501            low-water mark  to RX_FIFO_SIZE / 8 (512 bytes)
502          */
503         r = EMAC_RWMR(EMAC_RX_FIFO_SIZE(gige) / 8 / EMAC_FIFO_ENTRY_SIZE,
504                       EMAC_RX_FIFO_SIZE(gige) / 4 / EMAC_FIFO_ENTRY_SIZE);
505         out_be32(&p->rwmr, r);
506
507         /* Set PAUSE timer to the maximum */
508         out_be32(&p->ptr, 0xffff);
509
510         /* IRQ sources */
511         out_be32(&p->iser, EMAC_ISR_TXPE | EMAC_ISR_RXPE | /* EMAC_ISR_TXUE |
512                  EMAC_ISR_RXOE | */ EMAC_ISR_OVR | EMAC_ISR_BP | EMAC_ISR_SE |
513                  EMAC_ISR_ALE | EMAC_ISR_BFCS | EMAC_ISR_PTLE | EMAC_ISR_ORE |
514                  EMAC_ISR_IRE | EMAC_ISR_TE);
515                  
516         /* We need to take GPCS PHY out of isolate mode after EMAC reset */
517         if (emac_phy_gpcs(dev->phy.mode)) 
518                 mii_reset_phy(&dev->phy);
519                  
520         return 0;
521 }
522
523 /* BHs disabled */
524 static void emac_reinitialize(struct ocp_enet_private *dev)
525 {
526         DBG("%d: reinitialize" NL, dev->def->index);
527
528         if (!emac_configure(dev)) {
529                 emac_tx_enable(dev);
530                 emac_rx_enable(dev);
531         }
532 }
533
534 /* BHs disabled */
535 static void emac_full_tx_reset(struct net_device *ndev)
536 {
537         struct ocp_enet_private *dev = ndev->priv;
538         struct ocp_func_emac_data *emacdata = dev->def->additions;
539
540         DBG("%d: full_tx_reset" NL, dev->def->index);
541
542         emac_tx_disable(dev);
543         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
544         emac_clean_tx_ring(dev);
545         dev->tx_cnt = dev->tx_slot = dev->ack_slot = 0;
546
547         emac_configure(dev);
548
549         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
550         emac_tx_enable(dev);
551         emac_rx_enable(dev);
552
553         netif_wake_queue(ndev);
554 }
555
556 static int __emac_mdio_read(struct ocp_enet_private *dev, u8 id, u8 reg)
557 {
558         struct emac_regs *p = dev->emacp;
559         u32 r;
560         int n;
561
562         DBG2("%d: mdio_read(%02x,%02x)" NL, dev->def->index, id, reg);
563
564         /* Enable proper MDIO port */
565         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
566
567         /* Wait for management interface to become idle */
568         n = 10;
569         while (!emac_phy_done(in_be32(&p->stacr))) {
570                 udelay(1);
571                 if (!--n)
572                         goto to;
573         }
574
575         /* Issue read command */
576         out_be32(&p->stacr,
577                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_READ |
578                  (reg & EMAC_STACR_PRA_MASK)
579                  | ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT)
580                  | EMAC_STACR_START);
581
582         /* Wait for read to complete */
583         n = 100;
584         while (!emac_phy_done(r = in_be32(&p->stacr))) {
585                 udelay(1);
586                 if (!--n)
587                         goto to;
588         }
589
590         if (unlikely(r & EMAC_STACR_PHYE)) {
591                 DBG("%d: mdio_read(%02x, %02x) failed" NL, dev->def->index,
592                     id, reg);
593                 return -EREMOTEIO;
594         }
595
596         r = ((r >> EMAC_STACR_PHYD_SHIFT) & EMAC_STACR_PHYD_MASK);
597         DBG2("%d: mdio_read -> %04x" NL, dev->def->index, r);
598         return r;
599       to:
600         DBG("%d: MII management interface timeout (read)" NL, dev->def->index);
601         return -ETIMEDOUT;
602 }
603
604 static void __emac_mdio_write(struct ocp_enet_private *dev, u8 id, u8 reg,
605                               u16 val)
606 {
607         struct emac_regs *p = dev->emacp;
608         int n;
609
610         DBG2("%d: mdio_write(%02x,%02x,%04x)" NL, dev->def->index, id, reg,
611              val);
612
613         /* Enable proper MDIO port */
614         zmii_enable_mdio(dev->zmii_dev, dev->zmii_input);
615
616         /* Wait for management interface to be idle */
617         n = 10;
618         while (!emac_phy_done(in_be32(&p->stacr))) {
619                 udelay(1);
620                 if (!--n)
621                         goto to;
622         }
623
624         /* Issue write command */
625         out_be32(&p->stacr,
626                  EMAC_STACR_BASE(emac_opb_mhz()) | EMAC_STACR_STAC_WRITE |
627                  (reg & EMAC_STACR_PRA_MASK) |
628                  ((id & EMAC_STACR_PCDA_MASK) << EMAC_STACR_PCDA_SHIFT) |
629                  (val << EMAC_STACR_PHYD_SHIFT) | EMAC_STACR_START);
630
631         /* Wait for write to complete */
632         n = 100;
633         while (!emac_phy_done(in_be32(&p->stacr))) {
634                 udelay(1);
635                 if (!--n)
636                         goto to;
637         }
638         return;
639       to:
640         DBG("%d: MII management interface timeout (write)" NL, dev->def->index);
641 }
642
643 static int emac_mdio_read(struct net_device *ndev, int id, int reg)
644 {
645         struct ocp_enet_private *dev = ndev->priv;
646         int res;
647
648         local_bh_disable();
649         res = __emac_mdio_read(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
650                                (u8) reg);
651         local_bh_enable();
652         return res;
653 }
654
655 static void emac_mdio_write(struct net_device *ndev, int id, int reg, int val)
656 {
657         struct ocp_enet_private *dev = ndev->priv;
658
659         local_bh_disable();
660         __emac_mdio_write(dev->mdio_dev ? dev->mdio_dev : dev, (u8) id,
661                           (u8) reg, (u16) val);
662         local_bh_enable();
663 }
664
665 /* BHs disabled */
666 static void emac_set_multicast_list(struct net_device *ndev)
667 {
668         struct ocp_enet_private *dev = ndev->priv;
669         struct emac_regs *p = dev->emacp;
670         u32 rmr = emac_iff2rmr(ndev);
671
672         DBG("%d: multicast %08x" NL, dev->def->index, rmr);
673         BUG_ON(!netif_running(dev->ndev));
674
675         /* I decided to relax register access rules here to avoid
676          * full EMAC reset.
677          *
678          * There is a real problem with EMAC4 core if we use MWSW_001 bit 
679          * in MR1 register and do a full EMAC reset.
680          * One TX BD status update is delayed and, after EMAC reset, it 
681          * never happens, resulting in TX hung (it'll be recovered by TX 
682          * timeout handler eventually, but this is just gross).
683          * So we either have to do full TX reset or try to cheat here :)
684          *
685          * The only required change is to RX mode register, so I *think* all
686          * we need is just to stop RX channel. This seems to work on all
687          * tested SoCs.                                                --ebs
688          */
689         emac_rx_disable(dev);
690         if (rmr & EMAC_RMR_MAE)
691                 emac_hash_mc(dev);
692         out_be32(&p->rmr, rmr);
693         emac_rx_enable(dev);
694 }
695
696 /* BHs disabled */
697 static int emac_resize_rx_ring(struct ocp_enet_private *dev, int new_mtu)
698 {
699         struct ocp_func_emac_data *emacdata = dev->def->additions;
700         int rx_sync_size = emac_rx_sync_size(new_mtu);
701         int rx_skb_size = emac_rx_skb_size(new_mtu);
702         int i, ret = 0;
703
704         emac_rx_disable(dev);
705         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
706
707         if (dev->rx_sg_skb) {
708                 ++dev->estats.rx_dropped_resize;
709                 dev_kfree_skb(dev->rx_sg_skb);
710                 dev->rx_sg_skb = NULL;
711         }
712
713         /* Make a first pass over RX ring and mark BDs ready, dropping 
714          * non-processed packets on the way. We need this as a separate pass
715          * to simplify error recovery in the case of allocation failure later.
716          */
717         for (i = 0; i < NUM_RX_BUFF; ++i) {
718                 if (dev->rx_desc[i].ctrl & MAL_RX_CTRL_FIRST)
719                         ++dev->estats.rx_dropped_resize;
720
721                 dev->rx_desc[i].data_len = 0;
722                 dev->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY |
723                     (i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
724         }
725
726         /* Reallocate RX ring only if bigger skb buffers are required */
727         if (rx_skb_size <= dev->rx_skb_size)
728                 goto skip;
729
730         /* Second pass, allocate new skbs */
731         for (i = 0; i < NUM_RX_BUFF; ++i) {
732                 struct sk_buff *skb = alloc_skb(rx_skb_size, GFP_ATOMIC);
733                 if (!skb) {
734                         ret = -ENOMEM;
735                         goto oom;
736                 }
737
738                 BUG_ON(!dev->rx_skb[i]);
739                 dev_kfree_skb(dev->rx_skb[i]);
740
741                 skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
742                 dev->rx_desc[i].data_ptr =
743                     dma_map_single(dev->ldev, skb->data - 2, rx_sync_size,
744                                    DMA_FROM_DEVICE) + 2;
745                 dev->rx_skb[i] = skb;
746         }
747       skip:
748         /* Check if we need to change "Jumbo" bit in MR1 */
749         if ((new_mtu > ETH_DATA_LEN) ^ (dev->ndev->mtu > ETH_DATA_LEN)) {
750                 /* This is to prevent starting RX channel in emac_rx_enable() */
751                 dev->commac.rx_stopped = 1;
752
753                 dev->ndev->mtu = new_mtu;
754                 emac_full_tx_reset(dev->ndev);
755         }
756
757         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(new_mtu));
758       oom:
759         /* Restart RX */
760         dev->commac.rx_stopped = dev->rx_slot = 0;
761         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
762         emac_rx_enable(dev);
763
764         return ret;
765 }
766
767 /* Process ctx, rtnl_lock semaphore */
768 static int emac_change_mtu(struct net_device *ndev, int new_mtu)
769 {
770         struct ocp_enet_private *dev = ndev->priv;
771         int ret = 0;
772
773         if (new_mtu < EMAC_MIN_MTU || new_mtu > EMAC_MAX_MTU)
774                 return -EINVAL;
775
776         DBG("%d: change_mtu(%d)" NL, dev->def->index, new_mtu);
777
778         local_bh_disable();
779         if (netif_running(ndev)) {
780                 /* Check if we really need to reinitalize RX ring */
781                 if (emac_rx_skb_size(ndev->mtu) != emac_rx_skb_size(new_mtu))
782                         ret = emac_resize_rx_ring(dev, new_mtu);
783         }
784
785         if (!ret) {
786                 ndev->mtu = new_mtu;
787                 dev->rx_skb_size = emac_rx_skb_size(new_mtu);
788                 dev->rx_sync_size = emac_rx_sync_size(new_mtu);
789         }       
790         local_bh_enable();
791
792         return ret;
793 }
794
795 static void emac_clean_tx_ring(struct ocp_enet_private *dev)
796 {
797         int i;
798         for (i = 0; i < NUM_TX_BUFF; ++i) {
799                 if (dev->tx_skb[i]) {
800                         dev_kfree_skb(dev->tx_skb[i]);
801                         dev->tx_skb[i] = NULL;
802                         if (dev->tx_desc[i].ctrl & MAL_TX_CTRL_READY)
803                                 ++dev->estats.tx_dropped;
804                 }
805                 dev->tx_desc[i].ctrl = 0;
806                 dev->tx_desc[i].data_ptr = 0;
807         }
808 }
809
810 static void emac_clean_rx_ring(struct ocp_enet_private *dev)
811 {
812         int i;
813         for (i = 0; i < NUM_RX_BUFF; ++i)
814                 if (dev->rx_skb[i]) {
815                         dev->rx_desc[i].ctrl = 0;
816                         dev_kfree_skb(dev->rx_skb[i]);
817                         dev->rx_skb[i] = NULL;
818                         dev->rx_desc[i].data_ptr = 0;
819                 }
820
821         if (dev->rx_sg_skb) {
822                 dev_kfree_skb(dev->rx_sg_skb);
823                 dev->rx_sg_skb = NULL;
824         }
825 }
826
827 static inline int emac_alloc_rx_skb(struct ocp_enet_private *dev, int slot,
828                                     int flags)
829 {
830         struct sk_buff *skb = alloc_skb(dev->rx_skb_size, flags);
831         if (unlikely(!skb))
832                 return -ENOMEM;
833
834         dev->rx_skb[slot] = skb;
835         dev->rx_desc[slot].data_len = 0;
836
837         skb_reserve(skb, EMAC_RX_SKB_HEADROOM + 2);
838         dev->rx_desc[slot].data_ptr = 
839             dma_map_single(dev->ldev, skb->data - 2, dev->rx_sync_size, 
840                            DMA_FROM_DEVICE) + 2;
841         barrier();
842         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
843             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
844
845         return 0;
846 }
847
848 static void emac_print_link_status(struct ocp_enet_private *dev)
849 {
850         if (netif_carrier_ok(dev->ndev))
851                 printk(KERN_INFO "%s: link is up, %d %s%s\n",
852                        dev->ndev->name, dev->phy.speed,
853                        dev->phy.duplex == DUPLEX_FULL ? "FDX" : "HDX",
854                        dev->phy.pause ? ", pause enabled" :
855                        dev->phy.asym_pause ? ", assymetric pause enabled" : "");
856         else
857                 printk(KERN_INFO "%s: link is down\n", dev->ndev->name);
858 }
859
860 /* Process ctx, rtnl_lock semaphore */
861 static int emac_open(struct net_device *ndev)
862 {
863         struct ocp_enet_private *dev = ndev->priv;
864         struct ocp_func_emac_data *emacdata = dev->def->additions;
865         int err, i;
866
867         DBG("%d: open" NL, dev->def->index);
868
869         /* Setup error IRQ handler */
870         err = request_irq(dev->def->irq, emac_irq, 0, "EMAC", dev);
871         if (err) {
872                 printk(KERN_ERR "%s: failed to request IRQ %d\n",
873                        ndev->name, dev->def->irq);
874                 return err;
875         }
876
877         /* Allocate RX ring */
878         for (i = 0; i < NUM_RX_BUFF; ++i)
879                 if (emac_alloc_rx_skb(dev, i, GFP_KERNEL)) {
880                         printk(KERN_ERR "%s: failed to allocate RX ring\n",
881                                ndev->name);
882                         goto oom;
883                 }
884
885         local_bh_disable();
886         dev->tx_cnt = dev->tx_slot = dev->ack_slot = dev->rx_slot =
887             dev->commac.rx_stopped = 0;
888         dev->rx_sg_skb = NULL;
889
890         if (dev->phy.address >= 0) {
891                 int link_poll_interval;
892                 if (dev->phy.def->ops->poll_link(&dev->phy)) {
893                         dev->phy.def->ops->read_link(&dev->phy);
894                         EMAC_RX_CLK_DEFAULT(dev->def->index);
895                         netif_carrier_on(dev->ndev);
896                         link_poll_interval = PHY_POLL_LINK_ON;
897                 } else {
898                         EMAC_RX_CLK_TX(dev->def->index);
899                         netif_carrier_off(dev->ndev);
900                         link_poll_interval = PHY_POLL_LINK_OFF;
901                 }
902                 mod_timer(&dev->link_timer, jiffies + link_poll_interval);
903                 emac_print_link_status(dev);
904         } else
905                 netif_carrier_on(dev->ndev);
906
907         emac_configure(dev);
908         mal_poll_add(dev->mal, &dev->commac);
909         mal_enable_tx_channel(dev->mal, emacdata->mal_tx_chan);
910         mal_set_rcbs(dev->mal, emacdata->mal_rx_chan, emac_rx_size(ndev->mtu));
911         mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
912         emac_tx_enable(dev);
913         emac_rx_enable(dev);
914         netif_start_queue(ndev);
915         local_bh_enable();
916
917         return 0;
918       oom:
919         emac_clean_rx_ring(dev);
920         free_irq(dev->def->irq, dev);
921         return -ENOMEM;
922 }
923
924 /* BHs disabled */
925 static int emac_link_differs(struct ocp_enet_private *dev)
926 {
927         u32 r = in_be32(&dev->emacp->mr1);
928
929         int duplex = r & EMAC_MR1_FDE ? DUPLEX_FULL : DUPLEX_HALF;
930         int speed, pause, asym_pause;
931
932         if (r & (EMAC_MR1_MF_1000 | EMAC_MR1_MF_1000GPCS))
933                 speed = SPEED_1000;
934         else if (r & EMAC_MR1_MF_100)
935                 speed = SPEED_100;
936         else
937                 speed = SPEED_10;
938
939         switch (r & (EMAC_MR1_EIFC | EMAC_MR1_APP)) {
940         case (EMAC_MR1_EIFC | EMAC_MR1_APP):
941                 pause = 1;
942                 asym_pause = 0;
943                 break;
944         case EMAC_MR1_APP:
945                 pause = 0;
946                 asym_pause = 1;
947                 break;
948         default:
949                 pause = asym_pause = 0;
950         }
951         return speed != dev->phy.speed || duplex != dev->phy.duplex ||
952             pause != dev->phy.pause || asym_pause != dev->phy.asym_pause;
953 }
954
955 /* BHs disabled */
956 static void emac_link_timer(unsigned long data)
957 {
958         struct ocp_enet_private *dev = (struct ocp_enet_private *)data;
959         int link_poll_interval;
960
961         DBG2("%d: link timer" NL, dev->def->index);
962
963         if (dev->phy.def->ops->poll_link(&dev->phy)) {
964                 if (!netif_carrier_ok(dev->ndev)) {
965                         EMAC_RX_CLK_DEFAULT(dev->def->index);
966
967                         /* Get new link parameters */
968                         dev->phy.def->ops->read_link(&dev->phy);
969
970                         if (dev->tah_dev || emac_link_differs(dev))
971                                 emac_full_tx_reset(dev->ndev);
972
973                         netif_carrier_on(dev->ndev);
974                         emac_print_link_status(dev);
975                 }
976                 link_poll_interval = PHY_POLL_LINK_ON;
977         } else {
978                 if (netif_carrier_ok(dev->ndev)) {
979                         EMAC_RX_CLK_TX(dev->def->index);
980 #if defined(CONFIG_IBM_EMAC_PHY_RX_CLK_FIX)
981                         emac_reinitialize(dev);
982 #endif
983                         netif_carrier_off(dev->ndev);
984                         emac_print_link_status(dev);
985                 }
986
987                 /* Retry reset if the previous attempt failed.
988                  * This is needed mostly for CONFIG_IBM_EMAC_PHY_RX_CLK_FIX
989                  * case, but I left it here because it shouldn't trigger for
990                  * sane PHYs anyway.
991                  */
992                 if (unlikely(dev->reset_failed))
993                         emac_reinitialize(dev);
994
995                 link_poll_interval = PHY_POLL_LINK_OFF;
996         }
997         mod_timer(&dev->link_timer, jiffies + link_poll_interval);
998 }
999
1000 /* BHs disabled */
1001 static void emac_force_link_update(struct ocp_enet_private *dev)
1002 {
1003         netif_carrier_off(dev->ndev);
1004         if (timer_pending(&dev->link_timer))
1005                 mod_timer(&dev->link_timer, jiffies + PHY_POLL_LINK_OFF);
1006 }
1007
1008 /* Process ctx, rtnl_lock semaphore */
1009 static int emac_close(struct net_device *ndev)
1010 {
1011         struct ocp_enet_private *dev = ndev->priv;
1012         struct ocp_func_emac_data *emacdata = dev->def->additions;
1013
1014         DBG("%d: close" NL, dev->def->index);
1015
1016         local_bh_disable();
1017
1018         if (dev->phy.address >= 0)
1019                 del_timer_sync(&dev->link_timer);
1020
1021         netif_stop_queue(ndev);
1022         emac_rx_disable(dev);
1023         emac_tx_disable(dev);
1024         mal_disable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1025         mal_disable_tx_channel(dev->mal, emacdata->mal_tx_chan);
1026         mal_poll_del(dev->mal, &dev->commac);
1027         local_bh_enable();
1028
1029         emac_clean_tx_ring(dev);
1030         emac_clean_rx_ring(dev);
1031         free_irq(dev->def->irq, dev);
1032
1033         return 0;
1034 }
1035
1036 static inline u16 emac_tx_csum(struct ocp_enet_private *dev,
1037                                struct sk_buff *skb)
1038 {
1039 #if defined(CONFIG_IBM_EMAC_TAH)
1040         if (skb->ip_summed == CHECKSUM_HW) {
1041                 ++dev->stats.tx_packets_csum;
1042                 return EMAC_TX_CTRL_TAH_CSUM;
1043         }
1044 #endif
1045         return 0;
1046 }
1047
1048 static inline int emac_xmit_finish(struct ocp_enet_private *dev, int len)
1049 {
1050         struct emac_regs *p = dev->emacp;
1051         struct net_device *ndev = dev->ndev;
1052
1053         /* Send the packet out */
1054         out_be32(&p->tmr0, EMAC_TMR0_XMIT);
1055
1056         if (unlikely(++dev->tx_cnt == NUM_TX_BUFF)) {
1057                 netif_stop_queue(ndev);
1058                 DBG2("%d: stopped TX queue" NL, dev->def->index);
1059         }
1060
1061         ndev->trans_start = jiffies;
1062         ++dev->stats.tx_packets;
1063         dev->stats.tx_bytes += len;
1064
1065         return 0;
1066 }
1067
1068 /* BHs disabled */
1069 static int emac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1070 {
1071         struct ocp_enet_private *dev = ndev->priv;
1072         unsigned int len = skb->len;
1073         int slot;
1074
1075         u16 ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1076             MAL_TX_CTRL_LAST | emac_tx_csum(dev, skb);
1077
1078         slot = dev->tx_slot++;
1079         if (dev->tx_slot == NUM_TX_BUFF) {
1080                 dev->tx_slot = 0;
1081                 ctrl |= MAL_TX_CTRL_WRAP;
1082         }
1083
1084         DBG2("%d: xmit(%u) %d" NL, dev->def->index, len, slot);
1085
1086         dev->tx_skb[slot] = skb;
1087         dev->tx_desc[slot].data_ptr = dma_map_single(dev->ldev, skb->data, len,
1088                                                      DMA_TO_DEVICE);
1089         dev->tx_desc[slot].data_len = (u16) len;
1090         barrier();
1091         dev->tx_desc[slot].ctrl = ctrl;
1092
1093         return emac_xmit_finish(dev, len);
1094 }
1095
1096 #if defined(CONFIG_IBM_EMAC_TAH)
1097 static inline int emac_xmit_split(struct ocp_enet_private *dev, int slot,
1098                                   u32 pd, int len, int last, u16 base_ctrl)
1099 {
1100         while (1) {
1101                 u16 ctrl = base_ctrl;
1102                 int chunk = min(len, MAL_MAX_TX_SIZE);
1103                 len -= chunk;
1104
1105                 slot = (slot + 1) % NUM_TX_BUFF;
1106
1107                 if (last && !len)
1108                         ctrl |= MAL_TX_CTRL_LAST;
1109                 if (slot == NUM_TX_BUFF - 1)
1110                         ctrl |= MAL_TX_CTRL_WRAP;
1111
1112                 dev->tx_skb[slot] = NULL;
1113                 dev->tx_desc[slot].data_ptr = pd;
1114                 dev->tx_desc[slot].data_len = (u16) chunk;
1115                 dev->tx_desc[slot].ctrl = ctrl;
1116                 ++dev->tx_cnt;
1117
1118                 if (!len)
1119                         break;
1120
1121                 pd += chunk;
1122         }
1123         return slot;
1124 }
1125
1126 /* BHs disabled (SG version for TAH equipped EMACs) */
1127 static int emac_start_xmit_sg(struct sk_buff *skb, struct net_device *ndev)
1128 {
1129         struct ocp_enet_private *dev = ndev->priv;
1130         int nr_frags = skb_shinfo(skb)->nr_frags;
1131         int len = skb->len, chunk;
1132         int slot, i;
1133         u16 ctrl;
1134         u32 pd;
1135
1136         /* This is common "fast" path */
1137         if (likely(!nr_frags && len <= MAL_MAX_TX_SIZE))
1138                 return emac_start_xmit(skb, ndev);
1139
1140         len -= skb->data_len;
1141
1142         /* Note, this is only an *estimation*, we can still run out of empty
1143          * slots because of the additional fragmentation into
1144          * MAL_MAX_TX_SIZE-sized chunks
1145          */
1146         if (unlikely(dev->tx_cnt + nr_frags + mal_tx_chunks(len) > NUM_TX_BUFF))
1147                 goto stop_queue;
1148
1149         ctrl = EMAC_TX_CTRL_GFCS | EMAC_TX_CTRL_GP | MAL_TX_CTRL_READY |
1150             emac_tx_csum(dev, skb);
1151         slot = dev->tx_slot;
1152
1153         /* skb data */
1154         dev->tx_skb[slot] = NULL;
1155         chunk = min(len, MAL_MAX_TX_SIZE);
1156         dev->tx_desc[slot].data_ptr = pd =
1157             dma_map_single(dev->ldev, skb->data, len, DMA_TO_DEVICE);
1158         dev->tx_desc[slot].data_len = (u16) chunk;
1159         len -= chunk;
1160         if (unlikely(len))
1161                 slot = emac_xmit_split(dev, slot, pd + chunk, len, !nr_frags,
1162                                        ctrl);
1163         /* skb fragments */
1164         for (i = 0; i < nr_frags; ++i) {
1165                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
1166                 len = frag->size;
1167
1168                 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF))
1169                         goto undo_frame;
1170
1171                 pd = dma_map_page(dev->ldev, frag->page, frag->page_offset, len,
1172                                   DMA_TO_DEVICE);
1173
1174                 slot = emac_xmit_split(dev, slot, pd, len, i == nr_frags - 1,
1175                                        ctrl);
1176         }
1177
1178         DBG2("%d: xmit_sg(%u) %d - %d" NL, dev->def->index, skb->len,
1179              dev->tx_slot, slot);
1180
1181         /* Attach skb to the last slot so we don't release it too early */
1182         dev->tx_skb[slot] = skb;
1183
1184         /* Send the packet out */
1185         if (dev->tx_slot == NUM_TX_BUFF - 1)
1186                 ctrl |= MAL_TX_CTRL_WRAP;
1187         barrier();
1188         dev->tx_desc[dev->tx_slot].ctrl = ctrl;
1189         dev->tx_slot = (slot + 1) % NUM_TX_BUFF;
1190
1191         return emac_xmit_finish(dev, skb->len);
1192
1193       undo_frame:
1194         /* Well, too bad. Our previous estimation was overly optimistic. 
1195          * Undo everything.
1196          */
1197         while (slot != dev->tx_slot) {
1198                 dev->tx_desc[slot].ctrl = 0;
1199                 --dev->tx_cnt;
1200                 if (--slot < 0)
1201                         slot = NUM_TX_BUFF - 1;
1202         }
1203         ++dev->estats.tx_undo;
1204
1205       stop_queue:
1206         netif_stop_queue(ndev);
1207         DBG2("%d: stopped TX queue" NL, dev->def->index);
1208         return 1;
1209 }
1210 #else
1211 # define emac_start_xmit_sg     emac_start_xmit
1212 #endif  /* !defined(CONFIG_IBM_EMAC_TAH) */
1213
1214 /* BHs disabled */
1215 static void emac_parse_tx_error(struct ocp_enet_private *dev, u16 ctrl)
1216 {
1217         struct ibm_emac_error_stats *st = &dev->estats;
1218         DBG("%d: BD TX error %04x" NL, dev->def->index, ctrl);
1219
1220         ++st->tx_bd_errors;
1221         if (ctrl & EMAC_TX_ST_BFCS)
1222                 ++st->tx_bd_bad_fcs;
1223         if (ctrl & EMAC_TX_ST_LCS)
1224                 ++st->tx_bd_carrier_loss;
1225         if (ctrl & EMAC_TX_ST_ED)
1226                 ++st->tx_bd_excessive_deferral;
1227         if (ctrl & EMAC_TX_ST_EC)
1228                 ++st->tx_bd_excessive_collisions;
1229         if (ctrl & EMAC_TX_ST_LC)
1230                 ++st->tx_bd_late_collision;
1231         if (ctrl & EMAC_TX_ST_MC)
1232                 ++st->tx_bd_multple_collisions;
1233         if (ctrl & EMAC_TX_ST_SC)
1234                 ++st->tx_bd_single_collision;
1235         if (ctrl & EMAC_TX_ST_UR)
1236                 ++st->tx_bd_underrun;
1237         if (ctrl & EMAC_TX_ST_SQE)
1238                 ++st->tx_bd_sqe;
1239 }
1240
1241 static void emac_poll_tx(void *param)
1242 {
1243         struct ocp_enet_private *dev = param;
1244         DBG2("%d: poll_tx, %d %d" NL, dev->def->index, dev->tx_cnt,
1245              dev->ack_slot);
1246
1247         if (dev->tx_cnt) {
1248                 u16 ctrl;
1249                 int slot = dev->ack_slot, n = 0;
1250               again:
1251                 ctrl = dev->tx_desc[slot].ctrl;
1252                 if (!(ctrl & MAL_TX_CTRL_READY)) {
1253                         struct sk_buff *skb = dev->tx_skb[slot];
1254                         ++n;
1255
1256                         if (skb) {
1257                                 dev_kfree_skb(skb);
1258                                 dev->tx_skb[slot] = NULL;
1259                         }
1260                         slot = (slot + 1) % NUM_TX_BUFF;
1261
1262                         if (unlikely(EMAC_IS_BAD_TX(ctrl)))
1263                                 emac_parse_tx_error(dev, ctrl);
1264
1265                         if (--dev->tx_cnt)
1266                                 goto again;
1267                 }
1268                 if (n) {
1269                         dev->ack_slot = slot;
1270                         if (netif_queue_stopped(dev->ndev) &&
1271                             dev->tx_cnt < EMAC_TX_WAKEUP_THRESH)
1272                                 netif_wake_queue(dev->ndev);
1273
1274                         DBG2("%d: tx %d pkts" NL, dev->def->index, n);
1275                 }
1276         }
1277 }
1278
1279 static inline void emac_recycle_rx_skb(struct ocp_enet_private *dev, int slot,
1280                                        int len)
1281 {
1282         struct sk_buff *skb = dev->rx_skb[slot];
1283         DBG2("%d: recycle %d %d" NL, dev->def->index, slot, len);
1284
1285         if (len) 
1286                 dma_map_single(dev->ldev, skb->data - 2, 
1287                                EMAC_DMA_ALIGN(len + 2), DMA_FROM_DEVICE);
1288
1289         dev->rx_desc[slot].data_len = 0;
1290         barrier();
1291         dev->rx_desc[slot].ctrl = MAL_RX_CTRL_EMPTY |
1292             (slot == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
1293 }
1294
1295 static void emac_parse_rx_error(struct ocp_enet_private *dev, u16 ctrl)
1296 {
1297         struct ibm_emac_error_stats *st = &dev->estats;
1298         DBG("%d: BD RX error %04x" NL, dev->def->index, ctrl);
1299
1300         ++st->rx_bd_errors;
1301         if (ctrl & EMAC_RX_ST_OE)
1302                 ++st->rx_bd_overrun;
1303         if (ctrl & EMAC_RX_ST_BP)
1304                 ++st->rx_bd_bad_packet;
1305         if (ctrl & EMAC_RX_ST_RP)
1306                 ++st->rx_bd_runt_packet;
1307         if (ctrl & EMAC_RX_ST_SE)
1308                 ++st->rx_bd_short_event;
1309         if (ctrl & EMAC_RX_ST_AE)
1310                 ++st->rx_bd_alignment_error;
1311         if (ctrl & EMAC_RX_ST_BFCS)
1312                 ++st->rx_bd_bad_fcs;
1313         if (ctrl & EMAC_RX_ST_PTL)
1314                 ++st->rx_bd_packet_too_long;
1315         if (ctrl & EMAC_RX_ST_ORE)
1316                 ++st->rx_bd_out_of_range;
1317         if (ctrl & EMAC_RX_ST_IRE)
1318                 ++st->rx_bd_in_range;
1319 }
1320
1321 static inline void emac_rx_csum(struct ocp_enet_private *dev,
1322                                 struct sk_buff *skb, u16 ctrl)
1323 {
1324 #if defined(CONFIG_IBM_EMAC_TAH)
1325         if (!ctrl && dev->tah_dev) {
1326                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1327                 ++dev->stats.rx_packets_csum;
1328         }
1329 #endif
1330 }
1331
1332 static inline int emac_rx_sg_append(struct ocp_enet_private *dev, int slot)
1333 {
1334         if (likely(dev->rx_sg_skb != NULL)) {
1335                 int len = dev->rx_desc[slot].data_len;
1336                 int tot_len = dev->rx_sg_skb->len + len;
1337
1338                 if (unlikely(tot_len + 2 > dev->rx_skb_size)) {
1339                         ++dev->estats.rx_dropped_mtu;
1340                         dev_kfree_skb(dev->rx_sg_skb);
1341                         dev->rx_sg_skb = NULL;
1342                 } else {
1343                         cacheable_memcpy(dev->rx_sg_skb->tail,
1344                                          dev->rx_skb[slot]->data, len);
1345                         skb_put(dev->rx_sg_skb, len);
1346                         emac_recycle_rx_skb(dev, slot, len);
1347                         return 0;
1348                 }
1349         }
1350         emac_recycle_rx_skb(dev, slot, 0);
1351         return -1;
1352 }
1353
1354 /* BHs disabled */
1355 static int emac_poll_rx(void *param, int budget)
1356 {
1357         struct ocp_enet_private *dev = param;
1358         int slot = dev->rx_slot, received = 0;
1359
1360         DBG2("%d: poll_rx(%d)" NL, dev->def->index, budget);
1361
1362       again:
1363         while (budget > 0) {
1364                 int len;
1365                 struct sk_buff *skb;
1366                 u16 ctrl = dev->rx_desc[slot].ctrl;
1367
1368                 if (ctrl & MAL_RX_CTRL_EMPTY)
1369                         break;
1370
1371                 skb = dev->rx_skb[slot];
1372                 barrier();
1373                 len = dev->rx_desc[slot].data_len;
1374
1375                 if (unlikely(!MAL_IS_SINGLE_RX(ctrl)))
1376                         goto sg;
1377
1378                 ctrl &= EMAC_BAD_RX_MASK;
1379                 if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1380                         emac_parse_rx_error(dev, ctrl);
1381                         ++dev->estats.rx_dropped_error;
1382                         emac_recycle_rx_skb(dev, slot, 0);
1383                         len = 0;
1384                         goto next;
1385                 }
1386
1387                 if (len && len < EMAC_RX_COPY_THRESH) {
1388                         struct sk_buff *copy_skb =
1389                             alloc_skb(len + EMAC_RX_SKB_HEADROOM + 2, GFP_ATOMIC);
1390                         if (unlikely(!copy_skb))
1391                                 goto oom;
1392
1393                         skb_reserve(copy_skb, EMAC_RX_SKB_HEADROOM + 2);
1394                         cacheable_memcpy(copy_skb->data - 2, skb->data - 2,
1395                                          len + 2);
1396                         emac_recycle_rx_skb(dev, slot, len);
1397                         skb = copy_skb;
1398                 } else if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC)))
1399                         goto oom;
1400
1401                 skb_put(skb, len);
1402               push_packet:
1403                 skb->dev = dev->ndev;
1404                 skb->protocol = eth_type_trans(skb, dev->ndev);
1405                 emac_rx_csum(dev, skb, ctrl);
1406
1407                 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP))
1408                         ++dev->estats.rx_dropped_stack;
1409               next:
1410                 ++dev->stats.rx_packets;
1411               skip:
1412                 dev->stats.rx_bytes += len;
1413                 slot = (slot + 1) % NUM_RX_BUFF;
1414                 --budget;
1415                 ++received;
1416                 continue;
1417               sg:
1418                 if (ctrl & MAL_RX_CTRL_FIRST) {
1419                         BUG_ON(dev->rx_sg_skb);
1420                         if (unlikely(emac_alloc_rx_skb(dev, slot, GFP_ATOMIC))) {
1421                                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1422                                 ++dev->estats.rx_dropped_oom;
1423                                 emac_recycle_rx_skb(dev, slot, 0);
1424                         } else {
1425                                 dev->rx_sg_skb = skb;
1426                                 skb_put(skb, len);
1427                         }
1428                 } else if (!emac_rx_sg_append(dev, slot) &&
1429                            (ctrl & MAL_RX_CTRL_LAST)) {
1430
1431                         skb = dev->rx_sg_skb;
1432                         dev->rx_sg_skb = NULL;
1433
1434                         ctrl &= EMAC_BAD_RX_MASK;
1435                         if (unlikely(ctrl && ctrl != EMAC_RX_TAH_BAD_CSUM)) {
1436                                 emac_parse_rx_error(dev, ctrl);
1437                                 ++dev->estats.rx_dropped_error;
1438                                 dev_kfree_skb(skb);
1439                                 len = 0;
1440                         } else
1441                                 goto push_packet;
1442                 }
1443                 goto skip;
1444               oom:
1445                 DBG("%d: rx OOM %d" NL, dev->def->index, slot);
1446                 /* Drop the packet and recycle skb */
1447                 ++dev->estats.rx_dropped_oom;
1448                 emac_recycle_rx_skb(dev, slot, 0);
1449                 goto next;
1450         }
1451
1452         if (received) {
1453                 DBG2("%d: rx %d BDs" NL, dev->def->index, received);
1454                 dev->rx_slot = slot;
1455         }
1456
1457         if (unlikely(budget && dev->commac.rx_stopped)) {
1458                 struct ocp_func_emac_data *emacdata = dev->def->additions;
1459
1460                 barrier();
1461                 if (!(dev->rx_desc[slot].ctrl & MAL_RX_CTRL_EMPTY)) {
1462                         DBG2("%d: rx restart" NL, dev->def->index);
1463                         received = 0;
1464                         goto again;
1465                 }
1466
1467                 if (dev->rx_sg_skb) {
1468                         DBG2("%d: dropping partial rx packet" NL,
1469                              dev->def->index);
1470                         ++dev->estats.rx_dropped_error;
1471                         dev_kfree_skb(dev->rx_sg_skb);
1472                         dev->rx_sg_skb = NULL;
1473                 }
1474
1475                 dev->commac.rx_stopped = 0;
1476                 mal_enable_rx_channel(dev->mal, emacdata->mal_rx_chan);
1477                 emac_rx_enable(dev);
1478                 dev->rx_slot = 0;
1479         }
1480         return received;
1481 }
1482
1483 /* BHs disabled */
1484 static int emac_peek_rx(void *param)
1485 {
1486         struct ocp_enet_private *dev = param;
1487         return !(dev->rx_desc[dev->rx_slot].ctrl & MAL_RX_CTRL_EMPTY);
1488 }
1489
1490 /* BHs disabled */
1491 static int emac_peek_rx_sg(void *param)
1492 {
1493         struct ocp_enet_private *dev = param;
1494         int slot = dev->rx_slot;
1495         while (1) {
1496                 u16 ctrl = dev->rx_desc[slot].ctrl;
1497                 if (ctrl & MAL_RX_CTRL_EMPTY)
1498                         return 0;
1499                 else if (ctrl & MAL_RX_CTRL_LAST)
1500                         return 1;
1501
1502                 slot = (slot + 1) % NUM_RX_BUFF;
1503
1504                 /* I'm just being paranoid here :) */
1505                 if (unlikely(slot == dev->rx_slot))
1506                         return 0;
1507         }
1508 }
1509
1510 /* Hard IRQ */
1511 static void emac_rxde(void *param)
1512 {
1513         struct ocp_enet_private *dev = param;
1514         ++dev->estats.rx_stopped;
1515         emac_rx_disable_async(dev);
1516 }
1517
1518 /* Hard IRQ */
1519 static irqreturn_t emac_irq(int irq, void *dev_instance, struct pt_regs *regs)
1520 {
1521         struct ocp_enet_private *dev = dev_instance;
1522         struct emac_regs *p = dev->emacp;
1523         struct ibm_emac_error_stats *st = &dev->estats;
1524
1525         u32 isr = in_be32(&p->isr);
1526         out_be32(&p->isr, isr);
1527
1528         DBG("%d: isr = %08x" NL, dev->def->index, isr);
1529
1530         if (isr & EMAC_ISR_TXPE)
1531                 ++st->tx_parity;
1532         if (isr & EMAC_ISR_RXPE)
1533                 ++st->rx_parity;
1534         if (isr & EMAC_ISR_TXUE)
1535                 ++st->tx_underrun;
1536         if (isr & EMAC_ISR_RXOE)
1537                 ++st->rx_fifo_overrun;
1538         if (isr & EMAC_ISR_OVR)
1539                 ++st->rx_overrun;
1540         if (isr & EMAC_ISR_BP)
1541                 ++st->rx_bad_packet;
1542         if (isr & EMAC_ISR_RP)
1543                 ++st->rx_runt_packet;
1544         if (isr & EMAC_ISR_SE)
1545                 ++st->rx_short_event;
1546         if (isr & EMAC_ISR_ALE)
1547                 ++st->rx_alignment_error;
1548         if (isr & EMAC_ISR_BFCS)
1549                 ++st->rx_bad_fcs;
1550         if (isr & EMAC_ISR_PTLE)
1551                 ++st->rx_packet_too_long;
1552         if (isr & EMAC_ISR_ORE)
1553                 ++st->rx_out_of_range;
1554         if (isr & EMAC_ISR_IRE)
1555                 ++st->rx_in_range;
1556         if (isr & EMAC_ISR_SQE)
1557                 ++st->tx_sqe;
1558         if (isr & EMAC_ISR_TE)
1559                 ++st->tx_errors;
1560
1561         return IRQ_HANDLED;
1562 }
1563
1564 static struct net_device_stats *emac_stats(struct net_device *ndev)
1565 {
1566         struct ocp_enet_private *dev = ndev->priv;
1567         struct ibm_emac_stats *st = &dev->stats;
1568         struct ibm_emac_error_stats *est = &dev->estats;
1569         struct net_device_stats *nst = &dev->nstats;
1570
1571         DBG2("%d: stats" NL, dev->def->index);
1572
1573         /* Compute "legacy" statistics */
1574         local_irq_disable();
1575         nst->rx_packets = (unsigned long)st->rx_packets;
1576         nst->rx_bytes = (unsigned long)st->rx_bytes;
1577         nst->tx_packets = (unsigned long)st->tx_packets;
1578         nst->tx_bytes = (unsigned long)st->tx_bytes;
1579         nst->rx_dropped = (unsigned long)(est->rx_dropped_oom +
1580                                           est->rx_dropped_error +
1581                                           est->rx_dropped_resize +
1582                                           est->rx_dropped_mtu);
1583         nst->tx_dropped = (unsigned long)est->tx_dropped;
1584
1585         nst->rx_errors = (unsigned long)est->rx_bd_errors;
1586         nst->rx_fifo_errors = (unsigned long)(est->rx_bd_overrun +
1587                                               est->rx_fifo_overrun +
1588                                               est->rx_overrun);
1589         nst->rx_frame_errors = (unsigned long)(est->rx_bd_alignment_error +
1590                                                est->rx_alignment_error);
1591         nst->rx_crc_errors = (unsigned long)(est->rx_bd_bad_fcs +
1592                                              est->rx_bad_fcs);
1593         nst->rx_length_errors = (unsigned long)(est->rx_bd_runt_packet +
1594                                                 est->rx_bd_short_event +
1595                                                 est->rx_bd_packet_too_long +
1596                                                 est->rx_bd_out_of_range +
1597                                                 est->rx_bd_in_range +
1598                                                 est->rx_runt_packet +
1599                                                 est->rx_short_event +
1600                                                 est->rx_packet_too_long +
1601                                                 est->rx_out_of_range +
1602                                                 est->rx_in_range);
1603
1604         nst->tx_errors = (unsigned long)(est->tx_bd_errors + est->tx_errors);
1605         nst->tx_fifo_errors = (unsigned long)(est->tx_bd_underrun +
1606                                               est->tx_underrun);
1607         nst->tx_carrier_errors = (unsigned long)est->tx_bd_carrier_loss;
1608         nst->collisions = (unsigned long)(est->tx_bd_excessive_deferral +
1609                                           est->tx_bd_excessive_collisions +
1610                                           est->tx_bd_late_collision +
1611                                           est->tx_bd_multple_collisions);
1612         local_irq_enable();
1613         return nst;
1614 }
1615
1616 static void emac_remove(struct ocp_device *ocpdev)
1617 {
1618         struct ocp_enet_private *dev = ocp_get_drvdata(ocpdev);
1619
1620         DBG("%d: remove" NL, dev->def->index);
1621
1622         ocp_set_drvdata(ocpdev, 0);
1623         unregister_netdev(dev->ndev);
1624
1625         tah_fini(dev->tah_dev);
1626         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
1627         zmii_fini(dev->zmii_dev, dev->zmii_input);
1628
1629         emac_dbg_register(dev->def->index, 0);
1630
1631         mal_unregister_commac(dev->mal, &dev->commac);
1632         iounmap((void *)dev->emacp);
1633         kfree(dev->ndev);
1634 }
1635
1636 static struct mal_commac_ops emac_commac_ops = {
1637         .poll_tx = &emac_poll_tx,
1638         .poll_rx = &emac_poll_rx,
1639         .peek_rx = &emac_peek_rx,
1640         .rxde = &emac_rxde,
1641 };
1642
1643 static struct mal_commac_ops emac_commac_sg_ops = {
1644         .poll_tx = &emac_poll_tx,
1645         .poll_rx = &emac_poll_rx,
1646         .peek_rx = &emac_peek_rx_sg,
1647         .rxde = &emac_rxde,
1648 };
1649
1650 /* Ethtool support */
1651 static int emac_ethtool_get_settings(struct net_device *ndev,
1652                                      struct ethtool_cmd *cmd)
1653 {
1654         struct ocp_enet_private *dev = ndev->priv;
1655
1656         cmd->supported = dev->phy.features;
1657         cmd->port = PORT_MII;
1658         cmd->phy_address = dev->phy.address;
1659         cmd->transceiver =
1660             dev->phy.address >= 0 ? XCVR_EXTERNAL : XCVR_INTERNAL;
1661
1662         local_bh_disable();
1663         cmd->advertising = dev->phy.advertising;
1664         cmd->autoneg = dev->phy.autoneg;
1665         cmd->speed = dev->phy.speed;
1666         cmd->duplex = dev->phy.duplex;
1667         local_bh_enable();
1668
1669         return 0;
1670 }
1671
1672 static int emac_ethtool_set_settings(struct net_device *ndev,
1673                                      struct ethtool_cmd *cmd)
1674 {
1675         struct ocp_enet_private *dev = ndev->priv;
1676         u32 f = dev->phy.features;
1677
1678         DBG("%d: set_settings(%d, %d, %d, 0x%08x)" NL, dev->def->index,
1679             cmd->autoneg, cmd->speed, cmd->duplex, cmd->advertising);
1680
1681         /* Basic sanity checks */
1682         if (dev->phy.address < 0)
1683                 return -EOPNOTSUPP;
1684         if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
1685                 return -EINVAL;
1686         if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
1687                 return -EINVAL;
1688         if (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL)
1689                 return -EINVAL;
1690
1691         if (cmd->autoneg == AUTONEG_DISABLE) {
1692                 switch (cmd->speed) {
1693                 case SPEED_10:
1694                         if (cmd->duplex == DUPLEX_HALF
1695                             && !(f & SUPPORTED_10baseT_Half))
1696                                 return -EINVAL;
1697                         if (cmd->duplex == DUPLEX_FULL
1698                             && !(f & SUPPORTED_10baseT_Full))
1699                                 return -EINVAL;
1700                         break;
1701                 case SPEED_100:
1702                         if (cmd->duplex == DUPLEX_HALF
1703                             && !(f & SUPPORTED_100baseT_Half))
1704                                 return -EINVAL;
1705                         if (cmd->duplex == DUPLEX_FULL
1706                             && !(f & SUPPORTED_100baseT_Full))
1707                                 return -EINVAL;
1708                         break;
1709                 case SPEED_1000:
1710                         if (cmd->duplex == DUPLEX_HALF
1711                             && !(f & SUPPORTED_1000baseT_Half))
1712                                 return -EINVAL;
1713                         if (cmd->duplex == DUPLEX_FULL
1714                             && !(f & SUPPORTED_1000baseT_Full))
1715                                 return -EINVAL;
1716                         break;
1717                 default:
1718                         return -EINVAL;
1719                 }
1720
1721                 local_bh_disable();
1722                 dev->phy.def->ops->setup_forced(&dev->phy, cmd->speed,
1723                                                 cmd->duplex);
1724
1725         } else {
1726                 if (!(f & SUPPORTED_Autoneg))
1727                         return -EINVAL;
1728
1729                 local_bh_disable();
1730                 dev->phy.def->ops->setup_aneg(&dev->phy,
1731                                               (cmd->advertising & f) |
1732                                               (dev->phy.advertising &
1733                                                (ADVERTISED_Pause |
1734                                                 ADVERTISED_Asym_Pause)));
1735         }
1736         emac_force_link_update(dev);
1737         local_bh_enable();
1738
1739         return 0;
1740 }
1741
1742 static void emac_ethtool_get_ringparam(struct net_device *ndev,
1743                                        struct ethtool_ringparam *rp)
1744 {
1745         rp->rx_max_pending = rp->rx_pending = NUM_RX_BUFF;
1746         rp->tx_max_pending = rp->tx_pending = NUM_TX_BUFF;
1747 }
1748
1749 static void emac_ethtool_get_pauseparam(struct net_device *ndev,
1750                                         struct ethtool_pauseparam *pp)
1751 {
1752         struct ocp_enet_private *dev = ndev->priv;
1753
1754         local_bh_disable();
1755         if ((dev->phy.features & SUPPORTED_Autoneg) &&
1756             (dev->phy.advertising & (ADVERTISED_Pause | ADVERTISED_Asym_Pause)))
1757                 pp->autoneg = 1;
1758
1759         if (dev->phy.duplex == DUPLEX_FULL) {
1760                 if (dev->phy.pause)
1761                         pp->rx_pause = pp->tx_pause = 1;
1762                 else if (dev->phy.asym_pause)
1763                         pp->tx_pause = 1;
1764         }
1765         local_bh_enable();
1766 }
1767
1768 static u32 emac_ethtool_get_rx_csum(struct net_device *ndev)
1769 {
1770         struct ocp_enet_private *dev = ndev->priv;
1771         return dev->tah_dev != 0;
1772 }
1773
1774 static int emac_get_regs_len(struct ocp_enet_private *dev)
1775 {
1776         return sizeof(struct emac_ethtool_regs_subhdr) + EMAC_ETHTOOL_REGS_SIZE;
1777 }
1778
1779 static int emac_ethtool_get_regs_len(struct net_device *ndev)
1780 {
1781         struct ocp_enet_private *dev = ndev->priv;
1782         return sizeof(struct emac_ethtool_regs_hdr) +
1783             emac_get_regs_len(dev) + mal_get_regs_len(dev->mal) +
1784             zmii_get_regs_len(dev->zmii_dev) +
1785             rgmii_get_regs_len(dev->rgmii_dev) +
1786             tah_get_regs_len(dev->tah_dev);
1787 }
1788
1789 static void *emac_dump_regs(struct ocp_enet_private *dev, void *buf)
1790 {
1791         struct emac_ethtool_regs_subhdr *hdr = buf;
1792
1793         hdr->version = EMAC_ETHTOOL_REGS_VER;
1794         hdr->index = dev->def->index;
1795         memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE);
1796         return ((void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE);
1797 }
1798
1799 static void emac_ethtool_get_regs(struct net_device *ndev,
1800                                   struct ethtool_regs *regs, void *buf)
1801 {
1802         struct ocp_enet_private *dev = ndev->priv;
1803         struct emac_ethtool_regs_hdr *hdr = buf;
1804
1805         hdr->components = 0;
1806         buf = hdr + 1;
1807
1808         local_irq_disable();
1809         buf = mal_dump_regs(dev->mal, buf);
1810         buf = emac_dump_regs(dev, buf);
1811         if (dev->zmii_dev) {
1812                 hdr->components |= EMAC_ETHTOOL_REGS_ZMII;
1813                 buf = zmii_dump_regs(dev->zmii_dev, buf);
1814         }
1815         if (dev->rgmii_dev) {
1816                 hdr->components |= EMAC_ETHTOOL_REGS_RGMII;
1817                 buf = rgmii_dump_regs(dev->rgmii_dev, buf);
1818         }
1819         if (dev->tah_dev) {
1820                 hdr->components |= EMAC_ETHTOOL_REGS_TAH;
1821                 buf = tah_dump_regs(dev->tah_dev, buf);
1822         }
1823         local_irq_enable();
1824 }
1825
1826 static int emac_ethtool_nway_reset(struct net_device *ndev)
1827 {
1828         struct ocp_enet_private *dev = ndev->priv;
1829         int res = 0;
1830
1831         DBG("%d: nway_reset" NL, dev->def->index);
1832
1833         if (dev->phy.address < 0)
1834                 return -EOPNOTSUPP;
1835
1836         local_bh_disable();
1837         if (!dev->phy.autoneg) {
1838                 res = -EINVAL;
1839                 goto out;
1840         }
1841
1842         dev->phy.def->ops->setup_aneg(&dev->phy, dev->phy.advertising);
1843         emac_force_link_update(dev);
1844
1845       out:
1846         local_bh_enable();
1847         return res;
1848 }
1849
1850 static int emac_ethtool_get_stats_count(struct net_device *ndev)
1851 {
1852         return EMAC_ETHTOOL_STATS_COUNT;
1853 }
1854
1855 static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
1856                                      u8 * buf)
1857 {
1858         if (stringset == ETH_SS_STATS)
1859                 memcpy(buf, &emac_stats_keys, sizeof(emac_stats_keys));
1860 }
1861
1862 static void emac_ethtool_get_ethtool_stats(struct net_device *ndev,
1863                                            struct ethtool_stats *estats,
1864                                            u64 * tmp_stats)
1865 {
1866         struct ocp_enet_private *dev = ndev->priv;
1867         local_irq_disable();
1868         memcpy(tmp_stats, &dev->stats, sizeof(dev->stats));
1869         tmp_stats += sizeof(dev->stats) / sizeof(u64);
1870         memcpy(tmp_stats, &dev->estats, sizeof(dev->estats));
1871         local_irq_enable();
1872 }
1873
1874 static void emac_ethtool_get_drvinfo(struct net_device *ndev,
1875                                      struct ethtool_drvinfo *info)
1876 {
1877         struct ocp_enet_private *dev = ndev->priv;
1878
1879         strcpy(info->driver, "ibm_emac");
1880         strcpy(info->version, DRV_VERSION);
1881         info->fw_version[0] = '\0';
1882         sprintf(info->bus_info, "PPC 4xx EMAC %d", dev->def->index);
1883         info->n_stats = emac_ethtool_get_stats_count(ndev);
1884         info->regdump_len = emac_ethtool_get_regs_len(ndev);
1885 }
1886
1887 static struct ethtool_ops emac_ethtool_ops = {
1888         .get_settings = emac_ethtool_get_settings,
1889         .set_settings = emac_ethtool_set_settings,
1890         .get_drvinfo = emac_ethtool_get_drvinfo,
1891
1892         .get_regs_len = emac_ethtool_get_regs_len,
1893         .get_regs = emac_ethtool_get_regs,
1894
1895         .nway_reset = emac_ethtool_nway_reset,
1896
1897         .get_ringparam = emac_ethtool_get_ringparam,
1898         .get_pauseparam = emac_ethtool_get_pauseparam,
1899
1900         .get_rx_csum = emac_ethtool_get_rx_csum,
1901
1902         .get_strings = emac_ethtool_get_strings,
1903         .get_stats_count = emac_ethtool_get_stats_count,
1904         .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
1905
1906         .get_link = ethtool_op_get_link,
1907         .get_tx_csum = ethtool_op_get_tx_csum,
1908         .get_sg = ethtool_op_get_sg,
1909 };
1910
1911 static int emac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1912 {
1913         struct ocp_enet_private *dev = ndev->priv;
1914         uint16_t *data = (uint16_t *) & rq->ifr_ifru;
1915
1916         DBG("%d: ioctl %08x" NL, dev->def->index, cmd);
1917
1918         if (dev->phy.address < 0)
1919                 return -EOPNOTSUPP;
1920
1921         switch (cmd) {
1922         case SIOCGMIIPHY:
1923         case SIOCDEVPRIVATE:
1924                 data[0] = dev->phy.address;
1925                 /* Fall through */
1926         case SIOCGMIIREG:
1927         case SIOCDEVPRIVATE + 1:
1928                 data[3] = emac_mdio_read(ndev, dev->phy.address, data[1]);
1929                 return 0;
1930
1931         case SIOCSMIIREG:
1932         case SIOCDEVPRIVATE + 2:
1933                 if (!capable(CAP_NET_ADMIN))
1934                         return -EPERM;
1935                 emac_mdio_write(ndev, dev->phy.address, data[1], data[2]);
1936                 return 0;
1937         default:
1938                 return -EOPNOTSUPP;
1939         }
1940 }
1941
1942 static int __init emac_probe(struct ocp_device *ocpdev)
1943 {
1944         struct ocp_func_emac_data *emacdata = ocpdev->def->additions;
1945         struct net_device *ndev;
1946         struct ocp_device *maldev;
1947         struct ocp_enet_private *dev;
1948         int err, i;
1949
1950         DBG("%d: probe" NL, ocpdev->def->index);
1951
1952         if (!emacdata) {
1953                 printk(KERN_ERR "emac%d: Missing additional data!\n",
1954                        ocpdev->def->index);
1955                 return -ENODEV;
1956         }
1957
1958         /* Allocate our net_device structure */
1959         ndev = alloc_etherdev(sizeof(struct ocp_enet_private));
1960         if (!ndev) {
1961                 printk(KERN_ERR "emac%d: could not allocate ethernet device!\n",
1962                        ocpdev->def->index);
1963                 return -ENOMEM;
1964         }
1965         dev = ndev->priv;
1966         dev->ndev = ndev;
1967         dev->ldev = &ocpdev->dev;
1968         dev->def = ocpdev->def;
1969         SET_MODULE_OWNER(ndev);
1970
1971         /* Find MAL device we are connected to */
1972         maldev =
1973             ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_MAL, emacdata->mal_idx);
1974         if (!maldev) {
1975                 printk(KERN_ERR "emac%d: unknown mal%d device!\n",
1976                        dev->def->index, emacdata->mal_idx);
1977                 err = -ENODEV;
1978                 goto out;
1979         }
1980         dev->mal = ocp_get_drvdata(maldev);
1981         if (!dev->mal) {
1982                 printk(KERN_ERR "emac%d: mal%d hasn't been initialized yet!\n",
1983                        dev->def->index, emacdata->mal_idx);
1984                 err = -ENODEV;
1985                 goto out;
1986         }
1987
1988         /* Register with MAL */
1989         dev->commac.ops = &emac_commac_ops;
1990         dev->commac.dev = dev;
1991         dev->commac.tx_chan_mask = MAL_CHAN_MASK(emacdata->mal_tx_chan);
1992         dev->commac.rx_chan_mask = MAL_CHAN_MASK(emacdata->mal_rx_chan);
1993         err = mal_register_commac(dev->mal, &dev->commac);
1994         if (err) {
1995                 printk(KERN_ERR "emac%d: failed to register with mal%d!\n",
1996                        dev->def->index, emacdata->mal_idx);
1997                 goto out;
1998         }
1999         dev->rx_skb_size = emac_rx_skb_size(ndev->mtu);
2000         dev->rx_sync_size = emac_rx_sync_size(ndev->mtu);
2001
2002         /* Get pointers to BD rings */
2003         dev->tx_desc =
2004             dev->mal->bd_virt + mal_tx_bd_offset(dev->mal,
2005                                                  emacdata->mal_tx_chan);
2006         dev->rx_desc =
2007             dev->mal->bd_virt + mal_rx_bd_offset(dev->mal,
2008                                                  emacdata->mal_rx_chan);
2009
2010         DBG("%d: tx_desc %p" NL, ocpdev->def->index, dev->tx_desc);
2011         DBG("%d: rx_desc %p" NL, ocpdev->def->index, dev->rx_desc);
2012
2013         /* Clean rings */
2014         memset(dev->tx_desc, 0, NUM_TX_BUFF * sizeof(struct mal_descriptor));
2015         memset(dev->rx_desc, 0, NUM_RX_BUFF * sizeof(struct mal_descriptor));
2016
2017         /* If we depend on another EMAC for MDIO, check whether it was probed already */
2018         if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
2019                 struct ocp_device *mdiodev =
2020                     ocp_find_device(OCP_VENDOR_IBM, OCP_FUNC_EMAC,
2021                                     emacdata->mdio_idx);
2022                 if (!mdiodev) {
2023                         printk(KERN_ERR "emac%d: unknown emac%d device!\n",
2024                                dev->def->index, emacdata->mdio_idx);
2025                         err = -ENODEV;
2026                         goto out2;
2027                 }
2028                 dev->mdio_dev = ocp_get_drvdata(mdiodev);
2029                 if (!dev->mdio_dev) {
2030                         printk(KERN_ERR
2031                                "emac%d: emac%d hasn't been initialized yet!\n",
2032                                dev->def->index, emacdata->mdio_idx);
2033                         err = -ENODEV;
2034                         goto out2;
2035                 }
2036         }
2037
2038         /* Attach to ZMII, if needed */
2039         if ((err = zmii_attach(dev)) != 0)
2040                 goto out2;
2041
2042         /* Attach to RGMII, if needed */
2043         if ((err = rgmii_attach(dev)) != 0)
2044                 goto out3;
2045
2046         /* Attach to TAH, if needed */
2047         if ((err = tah_attach(dev)) != 0)
2048                 goto out4;
2049
2050         /* Map EMAC regs */
2051         dev->emacp =
2052             (struct emac_regs *)ioremap(dev->def->paddr,
2053                                         sizeof(struct emac_regs));
2054         if (!dev->emacp) {
2055                 printk(KERN_ERR "emac%d: could not ioremap device registers!\n",
2056                        dev->def->index);
2057                 err = -ENOMEM;
2058                 goto out5;
2059         }
2060
2061         /* Fill in MAC address */
2062         for (i = 0; i < 6; ++i)
2063                 ndev->dev_addr[i] = emacdata->mac_addr[i];
2064
2065         /* Set some link defaults before we can find out real parameters */
2066         dev->phy.speed = SPEED_100;
2067         dev->phy.duplex = DUPLEX_FULL;
2068         dev->phy.autoneg = AUTONEG_DISABLE;
2069         dev->phy.pause = dev->phy.asym_pause = 0;
2070         dev->stop_timeout = STOP_TIMEOUT_100;
2071         init_timer(&dev->link_timer);
2072         dev->link_timer.function = emac_link_timer;
2073         dev->link_timer.data = (unsigned long)dev;
2074
2075         /* Find PHY if any */
2076         dev->phy.dev = ndev;
2077         dev->phy.mode = emacdata->phy_mode;
2078         if (emacdata->phy_map != 0xffffffff) {
2079                 u32 phy_map = emacdata->phy_map | busy_phy_map;
2080                 u32 adv;
2081
2082                 DBG("%d: PHY maps %08x %08x" NL, dev->def->index,
2083                     emacdata->phy_map, busy_phy_map);
2084
2085                 EMAC_RX_CLK_TX(dev->def->index);
2086
2087                 dev->phy.mdio_read = emac_mdio_read;
2088                 dev->phy.mdio_write = emac_mdio_write;
2089
2090                 /* Configure EMAC with defaults so we can at least use MDIO
2091                  * This is needed mostly for 440GX
2092                  */
2093                 if (emac_phy_gpcs(dev->phy.mode)) {
2094                         /* XXX
2095                          * Make GPCS PHY address equal to EMAC index.
2096                          * We probably should take into account busy_phy_map
2097                          * and/or phy_map here.
2098                          */
2099                         dev->phy.address = dev->def->index;
2100                 }
2101                 
2102                 emac_configure(dev);
2103
2104                 for (i = 0; i < 0x20; phy_map >>= 1, ++i)
2105                         if (!(phy_map & 1)) {
2106                                 int r;
2107                                 busy_phy_map |= 1 << i;
2108
2109                                 /* Quick check if there is a PHY at the address */
2110                                 r = emac_mdio_read(dev->ndev, i, MII_BMCR);
2111                                 if (r == 0xffff || r < 0)
2112                                         continue;
2113                                 if (!mii_phy_probe(&dev->phy, i))
2114                                         break;
2115                         }
2116                 if (i == 0x20) {
2117                         printk(KERN_WARNING "emac%d: can't find PHY!\n",
2118                                dev->def->index);
2119                         goto out6;
2120                 }
2121
2122                 /* Init PHY */
2123                 if (dev->phy.def->ops->init)
2124                         dev->phy.def->ops->init(&dev->phy);
2125                 
2126                 /* Disable any PHY features not supported by the platform */
2127                 dev->phy.def->features &= ~emacdata->phy_feat_exc;
2128
2129                 /* Setup initial link parameters */
2130                 if (dev->phy.features & SUPPORTED_Autoneg) {
2131                         adv = dev->phy.features;
2132 #if !defined(CONFIG_40x)
2133                         adv |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
2134 #endif
2135                         /* Restart autonegotiation */
2136                         dev->phy.def->ops->setup_aneg(&dev->phy, adv);
2137                 } else {
2138                         u32 f = dev->phy.def->features;
2139                         int speed = SPEED_10, fd = DUPLEX_HALF;
2140
2141                         /* Select highest supported speed/duplex */
2142                         if (f & SUPPORTED_1000baseT_Full) {
2143                                 speed = SPEED_1000;
2144                                 fd = DUPLEX_FULL;
2145                         } else if (f & SUPPORTED_1000baseT_Half)
2146                                 speed = SPEED_1000;
2147                         else if (f & SUPPORTED_100baseT_Full) {
2148                                 speed = SPEED_100;
2149                                 fd = DUPLEX_FULL;
2150                         } else if (f & SUPPORTED_100baseT_Half)
2151                                 speed = SPEED_100;
2152                         else if (f & SUPPORTED_10baseT_Full)
2153                                 fd = DUPLEX_FULL;
2154
2155                         /* Force link parameters */
2156                         dev->phy.def->ops->setup_forced(&dev->phy, speed, fd);
2157                 }
2158         } else {
2159                 emac_reset(dev);
2160
2161                 /* PHY-less configuration.
2162                  * XXX I probably should move these settings to emacdata
2163                  */
2164                 dev->phy.address = -1;
2165                 dev->phy.features = SUPPORTED_100baseT_Full | SUPPORTED_MII;
2166                 dev->phy.pause = 1;
2167         }
2168
2169         /* Fill in the driver function table */
2170         ndev->open = &emac_open;
2171         if (dev->tah_dev) {
2172                 ndev->hard_start_xmit = &emac_start_xmit_sg;
2173                 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
2174         } else
2175                 ndev->hard_start_xmit = &emac_start_xmit;
2176         ndev->tx_timeout = &emac_full_tx_reset;
2177         ndev->watchdog_timeo = 5 * HZ;
2178         ndev->stop = &emac_close;
2179         ndev->get_stats = &emac_stats;
2180         ndev->set_multicast_list = &emac_set_multicast_list;
2181         ndev->do_ioctl = &emac_ioctl;
2182         if (emac_phy_supports_gige(emacdata->phy_mode)) {
2183                 ndev->change_mtu = &emac_change_mtu;
2184                 dev->commac.ops = &emac_commac_sg_ops;
2185         }
2186         SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
2187
2188         netif_carrier_off(ndev);
2189         netif_stop_queue(ndev);
2190
2191         err = register_netdev(ndev);
2192         if (err) {
2193                 printk(KERN_ERR "emac%d: failed to register net device (%d)!\n",
2194                        dev->def->index, err);
2195                 goto out6;
2196         }
2197
2198         ocp_set_drvdata(ocpdev, dev);
2199
2200         printk("%s: emac%d, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
2201                ndev->name, dev->def->index,
2202                ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
2203                ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
2204
2205         if (dev->phy.address >= 0)
2206                 printk("%s: found %s PHY (0x%02x)\n", ndev->name,
2207                        dev->phy.def->name, dev->phy.address);
2208
2209         emac_dbg_register(dev->def->index, dev);
2210
2211         return 0;
2212       out6:
2213         iounmap((void *)dev->emacp);
2214       out5:
2215         tah_fini(dev->tah_dev);
2216       out4:
2217         rgmii_fini(dev->rgmii_dev, dev->rgmii_input);
2218       out3:
2219         zmii_fini(dev->zmii_dev, dev->zmii_input);
2220       out2:
2221         mal_unregister_commac(dev->mal, &dev->commac);
2222       out:
2223         kfree(ndev);
2224         return err;
2225 }
2226
2227 static struct ocp_device_id emac_ids[] = {
2228         { .vendor = OCP_VENDOR_IBM, .function = OCP_FUNC_EMAC },
2229         { .vendor = OCP_VENDOR_INVALID}
2230 };
2231
2232 static struct ocp_driver emac_driver = {
2233         .name = "emac",
2234         .id_table = emac_ids,
2235         .probe = emac_probe,
2236         .remove = emac_remove,
2237 };
2238
2239 static int __init emac_init(void)
2240 {
2241         printk(KERN_INFO DRV_DESC ", version " DRV_VERSION "\n");
2242
2243         DBG(": init" NL);
2244
2245         if (mal_init())
2246                 return -ENODEV;
2247
2248         EMAC_CLK_INTERNAL;
2249         if (ocp_register_driver(&emac_driver)) {
2250                 EMAC_CLK_EXTERNAL;
2251                 ocp_unregister_driver(&emac_driver);
2252                 mal_exit();
2253                 return -ENODEV;
2254         }
2255         EMAC_CLK_EXTERNAL;
2256
2257         emac_init_debug();
2258         return 0;
2259 }
2260
2261 static void __exit emac_exit(void)
2262 {
2263         DBG(": exit" NL);
2264         ocp_unregister_driver(&emac_driver);
2265         mal_exit();
2266         emac_fini_debug();
2267 }
2268
2269 module_init(emac_init);
2270 module_exit(emac_exit);