lan78xx: Avoid spurious kevent 4 "error"
[sfrench/cifs-2.6.git] / drivers / net / usb / lan78xx.c
1 /*
2  * Copyright (C) 2015 Microchip Technology
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public License
6  * as published by the Free Software Foundation; either version 2
7  * of the License, or (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  */
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <linux/phy.h>
33 #include <net/ip6_checksum.h>
34 #include <linux/interrupt.h>
35 #include <linux/irqdomain.h>
36 #include <linux/irq.h>
37 #include <linux/irqchip/chained_irq.h>
38 #include <linux/microchipphy.h>
39 #include <linux/phy.h>
40 #include "lan78xx.h"
41
42 #define DRIVER_AUTHOR   "WOOJUNG HUH <woojung.huh@microchip.com>"
43 #define DRIVER_DESC     "LAN78XX USB 3.0 Gigabit Ethernet Devices"
44 #define DRIVER_NAME     "lan78xx"
45 #define DRIVER_VERSION  "1.0.6"
46
47 #define TX_TIMEOUT_JIFFIES              (5 * HZ)
48 #define THROTTLE_JIFFIES                (HZ / 8)
49 #define UNLINK_TIMEOUT_MS               3
50
51 #define RX_MAX_QUEUE_MEMORY             (60 * 1518)
52
53 #define SS_USB_PKT_SIZE                 (1024)
54 #define HS_USB_PKT_SIZE                 (512)
55 #define FS_USB_PKT_SIZE                 (64)
56
57 #define MAX_RX_FIFO_SIZE                (12 * 1024)
58 #define MAX_TX_FIFO_SIZE                (12 * 1024)
59 #define DEFAULT_BURST_CAP_SIZE          (MAX_TX_FIFO_SIZE)
60 #define DEFAULT_BULK_IN_DELAY           (0x0800)
61 #define MAX_SINGLE_PACKET_SIZE          (9000)
62 #define DEFAULT_TX_CSUM_ENABLE          (true)
63 #define DEFAULT_RX_CSUM_ENABLE          (true)
64 #define DEFAULT_TSO_CSUM_ENABLE         (true)
65 #define DEFAULT_VLAN_FILTER_ENABLE      (true)
66 #define TX_OVERHEAD                     (8)
67 #define RXW_PADDING                     2
68
69 #define LAN78XX_USB_VENDOR_ID           (0x0424)
70 #define LAN7800_USB_PRODUCT_ID          (0x7800)
71 #define LAN7850_USB_PRODUCT_ID          (0x7850)
72 #define LAN7801_USB_PRODUCT_ID          (0x7801)
73 #define LAN78XX_EEPROM_MAGIC            (0x78A5)
74 #define LAN78XX_OTP_MAGIC               (0x78F3)
75
76 #define MII_READ                        1
77 #define MII_WRITE                       0
78
79 #define EEPROM_INDICATOR                (0xA5)
80 #define EEPROM_MAC_OFFSET               (0x01)
81 #define MAX_EEPROM_SIZE                 512
82 #define OTP_INDICATOR_1                 (0xF3)
83 #define OTP_INDICATOR_2                 (0xF7)
84
85 #define WAKE_ALL                        (WAKE_PHY | WAKE_UCAST | \
86                                          WAKE_MCAST | WAKE_BCAST | \
87                                          WAKE_ARP | WAKE_MAGIC)
88
89 /* USB related defines */
90 #define BULK_IN_PIPE                    1
91 #define BULK_OUT_PIPE                   2
92
93 /* default autosuspend delay (mSec)*/
94 #define DEFAULT_AUTOSUSPEND_DELAY       (10 * 1000)
95
96 /* statistic update interval (mSec) */
97 #define STAT_UPDATE_TIMER               (1 * 1000)
98
99 /* defines interrupts from interrupt EP */
100 #define MAX_INT_EP                      (32)
101 #define INT_EP_INTEP                    (31)
102 #define INT_EP_OTP_WR_DONE              (28)
103 #define INT_EP_EEE_TX_LPI_START         (26)
104 #define INT_EP_EEE_TX_LPI_STOP          (25)
105 #define INT_EP_EEE_RX_LPI               (24)
106 #define INT_EP_MAC_RESET_TIMEOUT        (23)
107 #define INT_EP_RDFO                     (22)
108 #define INT_EP_TXE                      (21)
109 #define INT_EP_USB_STATUS               (20)
110 #define INT_EP_TX_DIS                   (19)
111 #define INT_EP_RX_DIS                   (18)
112 #define INT_EP_PHY                      (17)
113 #define INT_EP_DP                       (16)
114 #define INT_EP_MAC_ERR                  (15)
115 #define INT_EP_TDFU                     (14)
116 #define INT_EP_TDFO                     (13)
117 #define INT_EP_UTX                      (12)
118 #define INT_EP_GPIO_11                  (11)
119 #define INT_EP_GPIO_10                  (10)
120 #define INT_EP_GPIO_9                   (9)
121 #define INT_EP_GPIO_8                   (8)
122 #define INT_EP_GPIO_7                   (7)
123 #define INT_EP_GPIO_6                   (6)
124 #define INT_EP_GPIO_5                   (5)
125 #define INT_EP_GPIO_4                   (4)
126 #define INT_EP_GPIO_3                   (3)
127 #define INT_EP_GPIO_2                   (2)
128 #define INT_EP_GPIO_1                   (1)
129 #define INT_EP_GPIO_0                   (0)
130
131 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
132         "RX FCS Errors",
133         "RX Alignment Errors",
134         "Rx Fragment Errors",
135         "RX Jabber Errors",
136         "RX Undersize Frame Errors",
137         "RX Oversize Frame Errors",
138         "RX Dropped Frames",
139         "RX Unicast Byte Count",
140         "RX Broadcast Byte Count",
141         "RX Multicast Byte Count",
142         "RX Unicast Frames",
143         "RX Broadcast Frames",
144         "RX Multicast Frames",
145         "RX Pause Frames",
146         "RX 64 Byte Frames",
147         "RX 65 - 127 Byte Frames",
148         "RX 128 - 255 Byte Frames",
149         "RX 256 - 511 Bytes Frames",
150         "RX 512 - 1023 Byte Frames",
151         "RX 1024 - 1518 Byte Frames",
152         "RX Greater 1518 Byte Frames",
153         "EEE RX LPI Transitions",
154         "EEE RX LPI Time",
155         "TX FCS Errors",
156         "TX Excess Deferral Errors",
157         "TX Carrier Errors",
158         "TX Bad Byte Count",
159         "TX Single Collisions",
160         "TX Multiple Collisions",
161         "TX Excessive Collision",
162         "TX Late Collisions",
163         "TX Unicast Byte Count",
164         "TX Broadcast Byte Count",
165         "TX Multicast Byte Count",
166         "TX Unicast Frames",
167         "TX Broadcast Frames",
168         "TX Multicast Frames",
169         "TX Pause Frames",
170         "TX 64 Byte Frames",
171         "TX 65 - 127 Byte Frames",
172         "TX 128 - 255 Byte Frames",
173         "TX 256 - 511 Bytes Frames",
174         "TX 512 - 1023 Byte Frames",
175         "TX 1024 - 1518 Byte Frames",
176         "TX Greater 1518 Byte Frames",
177         "EEE TX LPI Transitions",
178         "EEE TX LPI Time",
179 };
180
181 struct lan78xx_statstage {
182         u32 rx_fcs_errors;
183         u32 rx_alignment_errors;
184         u32 rx_fragment_errors;
185         u32 rx_jabber_errors;
186         u32 rx_undersize_frame_errors;
187         u32 rx_oversize_frame_errors;
188         u32 rx_dropped_frames;
189         u32 rx_unicast_byte_count;
190         u32 rx_broadcast_byte_count;
191         u32 rx_multicast_byte_count;
192         u32 rx_unicast_frames;
193         u32 rx_broadcast_frames;
194         u32 rx_multicast_frames;
195         u32 rx_pause_frames;
196         u32 rx_64_byte_frames;
197         u32 rx_65_127_byte_frames;
198         u32 rx_128_255_byte_frames;
199         u32 rx_256_511_bytes_frames;
200         u32 rx_512_1023_byte_frames;
201         u32 rx_1024_1518_byte_frames;
202         u32 rx_greater_1518_byte_frames;
203         u32 eee_rx_lpi_transitions;
204         u32 eee_rx_lpi_time;
205         u32 tx_fcs_errors;
206         u32 tx_excess_deferral_errors;
207         u32 tx_carrier_errors;
208         u32 tx_bad_byte_count;
209         u32 tx_single_collisions;
210         u32 tx_multiple_collisions;
211         u32 tx_excessive_collision;
212         u32 tx_late_collisions;
213         u32 tx_unicast_byte_count;
214         u32 tx_broadcast_byte_count;
215         u32 tx_multicast_byte_count;
216         u32 tx_unicast_frames;
217         u32 tx_broadcast_frames;
218         u32 tx_multicast_frames;
219         u32 tx_pause_frames;
220         u32 tx_64_byte_frames;
221         u32 tx_65_127_byte_frames;
222         u32 tx_128_255_byte_frames;
223         u32 tx_256_511_bytes_frames;
224         u32 tx_512_1023_byte_frames;
225         u32 tx_1024_1518_byte_frames;
226         u32 tx_greater_1518_byte_frames;
227         u32 eee_tx_lpi_transitions;
228         u32 eee_tx_lpi_time;
229 };
230
231 struct lan78xx_statstage64 {
232         u64 rx_fcs_errors;
233         u64 rx_alignment_errors;
234         u64 rx_fragment_errors;
235         u64 rx_jabber_errors;
236         u64 rx_undersize_frame_errors;
237         u64 rx_oversize_frame_errors;
238         u64 rx_dropped_frames;
239         u64 rx_unicast_byte_count;
240         u64 rx_broadcast_byte_count;
241         u64 rx_multicast_byte_count;
242         u64 rx_unicast_frames;
243         u64 rx_broadcast_frames;
244         u64 rx_multicast_frames;
245         u64 rx_pause_frames;
246         u64 rx_64_byte_frames;
247         u64 rx_65_127_byte_frames;
248         u64 rx_128_255_byte_frames;
249         u64 rx_256_511_bytes_frames;
250         u64 rx_512_1023_byte_frames;
251         u64 rx_1024_1518_byte_frames;
252         u64 rx_greater_1518_byte_frames;
253         u64 eee_rx_lpi_transitions;
254         u64 eee_rx_lpi_time;
255         u64 tx_fcs_errors;
256         u64 tx_excess_deferral_errors;
257         u64 tx_carrier_errors;
258         u64 tx_bad_byte_count;
259         u64 tx_single_collisions;
260         u64 tx_multiple_collisions;
261         u64 tx_excessive_collision;
262         u64 tx_late_collisions;
263         u64 tx_unicast_byte_count;
264         u64 tx_broadcast_byte_count;
265         u64 tx_multicast_byte_count;
266         u64 tx_unicast_frames;
267         u64 tx_broadcast_frames;
268         u64 tx_multicast_frames;
269         u64 tx_pause_frames;
270         u64 tx_64_byte_frames;
271         u64 tx_65_127_byte_frames;
272         u64 tx_128_255_byte_frames;
273         u64 tx_256_511_bytes_frames;
274         u64 tx_512_1023_byte_frames;
275         u64 tx_1024_1518_byte_frames;
276         u64 tx_greater_1518_byte_frames;
277         u64 eee_tx_lpi_transitions;
278         u64 eee_tx_lpi_time;
279 };
280
281 struct lan78xx_net;
282
283 struct lan78xx_priv {
284         struct lan78xx_net *dev;
285         u32 rfe_ctl;
286         u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
287         u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
288         u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
289         struct mutex dataport_mutex; /* for dataport access */
290         spinlock_t rfe_ctl_lock; /* for rfe register access */
291         struct work_struct set_multicast;
292         struct work_struct set_vlan;
293         u32 wol;
294 };
295
296 enum skb_state {
297         illegal = 0,
298         tx_start,
299         tx_done,
300         rx_start,
301         rx_done,
302         rx_cleanup,
303         unlink_start
304 };
305
306 struct skb_data {               /* skb->cb is one of these */
307         struct urb *urb;
308         struct lan78xx_net *dev;
309         enum skb_state state;
310         size_t length;
311         int num_of_packet;
312 };
313
314 struct usb_context {
315         struct usb_ctrlrequest req;
316         struct lan78xx_net *dev;
317 };
318
319 #define EVENT_TX_HALT                   0
320 #define EVENT_RX_HALT                   1
321 #define EVENT_RX_MEMORY                 2
322 #define EVENT_STS_SPLIT                 3
323 #define EVENT_LINK_RESET                4
324 #define EVENT_RX_PAUSED                 5
325 #define EVENT_DEV_WAKING                6
326 #define EVENT_DEV_ASLEEP                7
327 #define EVENT_DEV_OPEN                  8
328 #define EVENT_STAT_UPDATE               9
329
330 struct statstage {
331         struct mutex                    access_lock;    /* for stats access */
332         struct lan78xx_statstage        saved;
333         struct lan78xx_statstage        rollover_count;
334         struct lan78xx_statstage        rollover_max;
335         struct lan78xx_statstage64      curr_stat;
336 };
337
338 struct irq_domain_data {
339         struct irq_domain       *irqdomain;
340         unsigned int            phyirq;
341         struct irq_chip         *irqchip;
342         irq_flow_handler_t      irq_handler;
343         u32                     irqenable;
344         struct mutex            irq_lock;               /* for irq bus access */
345 };
346
347 struct lan78xx_net {
348         struct net_device       *net;
349         struct usb_device       *udev;
350         struct usb_interface    *intf;
351         void                    *driver_priv;
352
353         int                     rx_qlen;
354         int                     tx_qlen;
355         struct sk_buff_head     rxq;
356         struct sk_buff_head     txq;
357         struct sk_buff_head     done;
358         struct sk_buff_head     rxq_pause;
359         struct sk_buff_head     txq_pend;
360
361         struct tasklet_struct   bh;
362         struct delayed_work     wq;
363
364         struct usb_host_endpoint *ep_blkin;
365         struct usb_host_endpoint *ep_blkout;
366         struct usb_host_endpoint *ep_intr;
367
368         int                     msg_enable;
369
370         struct urb              *urb_intr;
371         struct usb_anchor       deferred;
372
373         struct mutex            phy_mutex; /* for phy access */
374         unsigned                pipe_in, pipe_out, pipe_intr;
375
376         u32                     hard_mtu;       /* count any extra framing */
377         size_t                  rx_urb_size;    /* size for rx urbs */
378
379         unsigned long           flags;
380
381         wait_queue_head_t       *wait;
382         unsigned char           suspend_count;
383
384         unsigned                maxpacket;
385         struct timer_list       delay;
386         struct timer_list       stat_monitor;
387
388         unsigned long           data[5];
389
390         int                     link_on;
391         u8                      mdix_ctrl;
392
393         u32                     chipid;
394         u32                     chiprev;
395         struct mii_bus          *mdiobus;
396         phy_interface_t         interface;
397
398         int                     fc_autoneg;
399         u8                      fc_request_control;
400
401         int                     delta;
402         struct statstage        stats;
403
404         struct irq_domain_data  domain_data;
405 };
406
407 /* define external phy id */
408 #define PHY_LAN8835                     (0x0007C130)
409 #define PHY_KSZ9031RNX                  (0x00221620)
410
411 /* use ethtool to change the level for any given device */
412 static int msg_level = -1;
413 module_param(msg_level, int, 0);
414 MODULE_PARM_DESC(msg_level, "Override default message level");
415
416 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
417 {
418         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
419         int ret;
420
421         if (!buf)
422                 return -ENOMEM;
423
424         ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
425                               USB_VENDOR_REQUEST_READ_REGISTER,
426                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
427                               0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
428         if (likely(ret >= 0)) {
429                 le32_to_cpus(buf);
430                 *data = *buf;
431         } else {
432                 netdev_warn(dev->net,
433                             "Failed to read register index 0x%08x. ret = %d",
434                             index, ret);
435         }
436
437         kfree(buf);
438
439         return ret;
440 }
441
442 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
443 {
444         u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
445         int ret;
446
447         if (!buf)
448                 return -ENOMEM;
449
450         *buf = data;
451         cpu_to_le32s(buf);
452
453         ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
454                               USB_VENDOR_REQUEST_WRITE_REGISTER,
455                               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
456                               0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
457         if (unlikely(ret < 0)) {
458                 netdev_warn(dev->net,
459                             "Failed to write register index 0x%08x. ret = %d",
460                             index, ret);
461         }
462
463         kfree(buf);
464
465         return ret;
466 }
467
468 static int lan78xx_read_stats(struct lan78xx_net *dev,
469                               struct lan78xx_statstage *data)
470 {
471         int ret = 0;
472         int i;
473         struct lan78xx_statstage *stats;
474         u32 *src;
475         u32 *dst;
476
477         stats = kmalloc(sizeof(*stats), GFP_KERNEL);
478         if (!stats)
479                 return -ENOMEM;
480
481         ret = usb_control_msg(dev->udev,
482                               usb_rcvctrlpipe(dev->udev, 0),
483                               USB_VENDOR_REQUEST_GET_STATS,
484                               USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
485                               0,
486                               0,
487                               (void *)stats,
488                               sizeof(*stats),
489                               USB_CTRL_SET_TIMEOUT);
490         if (likely(ret >= 0)) {
491                 src = (u32 *)stats;
492                 dst = (u32 *)data;
493                 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
494                         le32_to_cpus(&src[i]);
495                         dst[i] = src[i];
496                 }
497         } else {
498                 netdev_warn(dev->net,
499                             "Failed to read stat ret = 0x%x", ret);
500         }
501
502         kfree(stats);
503
504         return ret;
505 }
506
507 #define check_counter_rollover(struct1, dev_stats, member) {    \
508         if (struct1->member < dev_stats.saved.member)           \
509                 dev_stats.rollover_count.member++;              \
510         }
511
512 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
513                                         struct lan78xx_statstage *stats)
514 {
515         check_counter_rollover(stats, dev->stats, rx_fcs_errors);
516         check_counter_rollover(stats, dev->stats, rx_alignment_errors);
517         check_counter_rollover(stats, dev->stats, rx_fragment_errors);
518         check_counter_rollover(stats, dev->stats, rx_jabber_errors);
519         check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
520         check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
521         check_counter_rollover(stats, dev->stats, rx_dropped_frames);
522         check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
523         check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
524         check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
525         check_counter_rollover(stats, dev->stats, rx_unicast_frames);
526         check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
527         check_counter_rollover(stats, dev->stats, rx_multicast_frames);
528         check_counter_rollover(stats, dev->stats, rx_pause_frames);
529         check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
530         check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
531         check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
532         check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
533         check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
534         check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
535         check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
536         check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
537         check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
538         check_counter_rollover(stats, dev->stats, tx_fcs_errors);
539         check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
540         check_counter_rollover(stats, dev->stats, tx_carrier_errors);
541         check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
542         check_counter_rollover(stats, dev->stats, tx_single_collisions);
543         check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
544         check_counter_rollover(stats, dev->stats, tx_excessive_collision);
545         check_counter_rollover(stats, dev->stats, tx_late_collisions);
546         check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
547         check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
548         check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
549         check_counter_rollover(stats, dev->stats, tx_unicast_frames);
550         check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
551         check_counter_rollover(stats, dev->stats, tx_multicast_frames);
552         check_counter_rollover(stats, dev->stats, tx_pause_frames);
553         check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
554         check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
555         check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
556         check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
557         check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
558         check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
559         check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
560         check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
561         check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
562
563         memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
564 }
565
566 static void lan78xx_update_stats(struct lan78xx_net *dev)
567 {
568         u32 *p, *count, *max;
569         u64 *data;
570         int i;
571         struct lan78xx_statstage lan78xx_stats;
572
573         if (usb_autopm_get_interface(dev->intf) < 0)
574                 return;
575
576         p = (u32 *)&lan78xx_stats;
577         count = (u32 *)&dev->stats.rollover_count;
578         max = (u32 *)&dev->stats.rollover_max;
579         data = (u64 *)&dev->stats.curr_stat;
580
581         mutex_lock(&dev->stats.access_lock);
582
583         if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
584                 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
585
586         for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
587                 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
588
589         mutex_unlock(&dev->stats.access_lock);
590
591         usb_autopm_put_interface(dev->intf);
592 }
593
594 /* Loop until the read is completed with timeout called with phy_mutex held */
595 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
596 {
597         unsigned long start_time = jiffies;
598         u32 val;
599         int ret;
600
601         do {
602                 ret = lan78xx_read_reg(dev, MII_ACC, &val);
603                 if (unlikely(ret < 0))
604                         return -EIO;
605
606                 if (!(val & MII_ACC_MII_BUSY_))
607                         return 0;
608         } while (!time_after(jiffies, start_time + HZ));
609
610         return -EIO;
611 }
612
613 static inline u32 mii_access(int id, int index, int read)
614 {
615         u32 ret;
616
617         ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
618         ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
619         if (read)
620                 ret |= MII_ACC_MII_READ_;
621         else
622                 ret |= MII_ACC_MII_WRITE_;
623         ret |= MII_ACC_MII_BUSY_;
624
625         return ret;
626 }
627
628 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
629 {
630         unsigned long start_time = jiffies;
631         u32 val;
632         int ret;
633
634         do {
635                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
636                 if (unlikely(ret < 0))
637                         return -EIO;
638
639                 if (!(val & E2P_CMD_EPC_BUSY_) ||
640                     (val & E2P_CMD_EPC_TIMEOUT_))
641                         break;
642                 usleep_range(40, 100);
643         } while (!time_after(jiffies, start_time + HZ));
644
645         if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
646                 netdev_warn(dev->net, "EEPROM read operation timeout");
647                 return -EIO;
648         }
649
650         return 0;
651 }
652
653 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
654 {
655         unsigned long start_time = jiffies;
656         u32 val;
657         int ret;
658
659         do {
660                 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
661                 if (unlikely(ret < 0))
662                         return -EIO;
663
664                 if (!(val & E2P_CMD_EPC_BUSY_))
665                         return 0;
666
667                 usleep_range(40, 100);
668         } while (!time_after(jiffies, start_time + HZ));
669
670         netdev_warn(dev->net, "EEPROM is busy");
671         return -EIO;
672 }
673
674 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
675                                    u32 length, u8 *data)
676 {
677         u32 val;
678         u32 saved;
679         int i, ret;
680         int retval;
681
682         /* depends on chip, some EEPROM pins are muxed with LED function.
683          * disable & restore LED function to access EEPROM.
684          */
685         ret = lan78xx_read_reg(dev, HW_CFG, &val);
686         saved = val;
687         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
688                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
689                 ret = lan78xx_write_reg(dev, HW_CFG, val);
690         }
691
692         retval = lan78xx_eeprom_confirm_not_busy(dev);
693         if (retval)
694                 return retval;
695
696         for (i = 0; i < length; i++) {
697                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
698                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
699                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
700                 if (unlikely(ret < 0)) {
701                         retval = -EIO;
702                         goto exit;
703                 }
704
705                 retval = lan78xx_wait_eeprom(dev);
706                 if (retval < 0)
707                         goto exit;
708
709                 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
710                 if (unlikely(ret < 0)) {
711                         retval = -EIO;
712                         goto exit;
713                 }
714
715                 data[i] = val & 0xFF;
716                 offset++;
717         }
718
719         retval = 0;
720 exit:
721         if (dev->chipid == ID_REV_CHIP_ID_7800_)
722                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
723
724         return retval;
725 }
726
727 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
728                                u32 length, u8 *data)
729 {
730         u8 sig;
731         int ret;
732
733         ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
734         if ((ret == 0) && (sig == EEPROM_INDICATOR))
735                 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
736         else
737                 ret = -EINVAL;
738
739         return ret;
740 }
741
742 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
743                                     u32 length, u8 *data)
744 {
745         u32 val;
746         u32 saved;
747         int i, ret;
748         int retval;
749
750         /* depends on chip, some EEPROM pins are muxed with LED function.
751          * disable & restore LED function to access EEPROM.
752          */
753         ret = lan78xx_read_reg(dev, HW_CFG, &val);
754         saved = val;
755         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
756                 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
757                 ret = lan78xx_write_reg(dev, HW_CFG, val);
758         }
759
760         retval = lan78xx_eeprom_confirm_not_busy(dev);
761         if (retval)
762                 goto exit;
763
764         /* Issue write/erase enable command */
765         val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
766         ret = lan78xx_write_reg(dev, E2P_CMD, val);
767         if (unlikely(ret < 0)) {
768                 retval = -EIO;
769                 goto exit;
770         }
771
772         retval = lan78xx_wait_eeprom(dev);
773         if (retval < 0)
774                 goto exit;
775
776         for (i = 0; i < length; i++) {
777                 /* Fill data register */
778                 val = data[i];
779                 ret = lan78xx_write_reg(dev, E2P_DATA, val);
780                 if (ret < 0) {
781                         retval = -EIO;
782                         goto exit;
783                 }
784
785                 /* Send "write" command */
786                 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
787                 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
788                 ret = lan78xx_write_reg(dev, E2P_CMD, val);
789                 if (ret < 0) {
790                         retval = -EIO;
791                         goto exit;
792                 }
793
794                 retval = lan78xx_wait_eeprom(dev);
795                 if (retval < 0)
796                         goto exit;
797
798                 offset++;
799         }
800
801         retval = 0;
802 exit:
803         if (dev->chipid == ID_REV_CHIP_ID_7800_)
804                 ret = lan78xx_write_reg(dev, HW_CFG, saved);
805
806         return retval;
807 }
808
809 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
810                                 u32 length, u8 *data)
811 {
812         int i;
813         int ret;
814         u32 buf;
815         unsigned long timeout;
816
817         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
818
819         if (buf & OTP_PWR_DN_PWRDN_N_) {
820                 /* clear it and wait to be cleared */
821                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
822
823                 timeout = jiffies + HZ;
824                 do {
825                         usleep_range(1, 10);
826                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
827                         if (time_after(jiffies, timeout)) {
828                                 netdev_warn(dev->net,
829                                             "timeout on OTP_PWR_DN");
830                                 return -EIO;
831                         }
832                 } while (buf & OTP_PWR_DN_PWRDN_N_);
833         }
834
835         for (i = 0; i < length; i++) {
836                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
837                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
838                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
839                                         ((offset + i) & OTP_ADDR2_10_3));
840
841                 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
842                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
843
844                 timeout = jiffies + HZ;
845                 do {
846                         udelay(1);
847                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
848                         if (time_after(jiffies, timeout)) {
849                                 netdev_warn(dev->net,
850                                             "timeout on OTP_STATUS");
851                                 return -EIO;
852                         }
853                 } while (buf & OTP_STATUS_BUSY_);
854
855                 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
856
857                 data[i] = (u8)(buf & 0xFF);
858         }
859
860         return 0;
861 }
862
863 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
864                                  u32 length, u8 *data)
865 {
866         int i;
867         int ret;
868         u32 buf;
869         unsigned long timeout;
870
871         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
872
873         if (buf & OTP_PWR_DN_PWRDN_N_) {
874                 /* clear it and wait to be cleared */
875                 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
876
877                 timeout = jiffies + HZ;
878                 do {
879                         udelay(1);
880                         ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
881                         if (time_after(jiffies, timeout)) {
882                                 netdev_warn(dev->net,
883                                             "timeout on OTP_PWR_DN completion");
884                                 return -EIO;
885                         }
886                 } while (buf & OTP_PWR_DN_PWRDN_N_);
887         }
888
889         /* set to BYTE program mode */
890         ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
891
892         for (i = 0; i < length; i++) {
893                 ret = lan78xx_write_reg(dev, OTP_ADDR1,
894                                         ((offset + i) >> 8) & OTP_ADDR1_15_11);
895                 ret = lan78xx_write_reg(dev, OTP_ADDR2,
896                                         ((offset + i) & OTP_ADDR2_10_3));
897                 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
898                 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
899                 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
900
901                 timeout = jiffies + HZ;
902                 do {
903                         udelay(1);
904                         ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
905                         if (time_after(jiffies, timeout)) {
906                                 netdev_warn(dev->net,
907                                             "Timeout on OTP_STATUS completion");
908                                 return -EIO;
909                         }
910                 } while (buf & OTP_STATUS_BUSY_);
911         }
912
913         return 0;
914 }
915
916 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
917                             u32 length, u8 *data)
918 {
919         u8 sig;
920         int ret;
921
922         ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
923
924         if (ret == 0) {
925                 if (sig == OTP_INDICATOR_1)
926                         offset = offset;
927                 else if (sig == OTP_INDICATOR_2)
928                         offset += 0x100;
929                 else
930                         ret = -EINVAL;
931                 if (!ret)
932                         ret = lan78xx_read_raw_otp(dev, offset, length, data);
933         }
934
935         return ret;
936 }
937
938 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
939 {
940         int i, ret;
941
942         for (i = 0; i < 100; i++) {
943                 u32 dp_sel;
944
945                 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
946                 if (unlikely(ret < 0))
947                         return -EIO;
948
949                 if (dp_sel & DP_SEL_DPRDY_)
950                         return 0;
951
952                 usleep_range(40, 100);
953         }
954
955         netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
956
957         return -EIO;
958 }
959
960 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
961                                   u32 addr, u32 length, u32 *buf)
962 {
963         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
964         u32 dp_sel;
965         int i, ret;
966
967         if (usb_autopm_get_interface(dev->intf) < 0)
968                         return 0;
969
970         mutex_lock(&pdata->dataport_mutex);
971
972         ret = lan78xx_dataport_wait_not_busy(dev);
973         if (ret < 0)
974                 goto done;
975
976         ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
977
978         dp_sel &= ~DP_SEL_RSEL_MASK_;
979         dp_sel |= ram_select;
980         ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
981
982         for (i = 0; i < length; i++) {
983                 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
984
985                 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
986
987                 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
988
989                 ret = lan78xx_dataport_wait_not_busy(dev);
990                 if (ret < 0)
991                         goto done;
992         }
993
994 done:
995         mutex_unlock(&pdata->dataport_mutex);
996         usb_autopm_put_interface(dev->intf);
997
998         return ret;
999 }
1000
1001 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1002                                     int index, u8 addr[ETH_ALEN])
1003 {
1004         u32     temp;
1005
1006         if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1007                 temp = addr[3];
1008                 temp = addr[2] | (temp << 8);
1009                 temp = addr[1] | (temp << 8);
1010                 temp = addr[0] | (temp << 8);
1011                 pdata->pfilter_table[index][1] = temp;
1012                 temp = addr[5];
1013                 temp = addr[4] | (temp << 8);
1014                 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1015                 pdata->pfilter_table[index][0] = temp;
1016         }
1017 }
1018
1019 /* returns hash bit number for given MAC address */
1020 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1021 {
1022         return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1023 }
1024
1025 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1026 {
1027         struct lan78xx_priv *pdata =
1028                         container_of(param, struct lan78xx_priv, set_multicast);
1029         struct lan78xx_net *dev = pdata->dev;
1030         int i;
1031         int ret;
1032
1033         netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1034                   pdata->rfe_ctl);
1035
1036         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
1037                                DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1038
1039         for (i = 1; i < NUM_OF_MAF; i++) {
1040                 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1041                 ret = lan78xx_write_reg(dev, MAF_LO(i),
1042                                         pdata->pfilter_table[i][1]);
1043                 ret = lan78xx_write_reg(dev, MAF_HI(i),
1044                                         pdata->pfilter_table[i][0]);
1045         }
1046
1047         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1048 }
1049
1050 static void lan78xx_set_multicast(struct net_device *netdev)
1051 {
1052         struct lan78xx_net *dev = netdev_priv(netdev);
1053         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1054         unsigned long flags;
1055         int i;
1056
1057         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1058
1059         pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1060                             RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1061
1062         for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1063                         pdata->mchash_table[i] = 0;
1064         /* pfilter_table[0] has own HW address */
1065         for (i = 1; i < NUM_OF_MAF; i++) {
1066                         pdata->pfilter_table[i][0] =
1067                         pdata->pfilter_table[i][1] = 0;
1068         }
1069
1070         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1071
1072         if (dev->net->flags & IFF_PROMISC) {
1073                 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1074                 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1075         } else {
1076                 if (dev->net->flags & IFF_ALLMULTI) {
1077                         netif_dbg(dev, drv, dev->net,
1078                                   "receive all multicast enabled");
1079                         pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1080                 }
1081         }
1082
1083         if (netdev_mc_count(dev->net)) {
1084                 struct netdev_hw_addr *ha;
1085                 int i;
1086
1087                 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1088
1089                 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1090
1091                 i = 1;
1092                 netdev_for_each_mc_addr(ha, netdev) {
1093                         /* set first 32 into Perfect Filter */
1094                         if (i < 33) {
1095                                 lan78xx_set_addr_filter(pdata, i, ha->addr);
1096                         } else {
1097                                 u32 bitnum = lan78xx_hash(ha->addr);
1098
1099                                 pdata->mchash_table[bitnum / 32] |=
1100                                                         (1 << (bitnum % 32));
1101                                 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1102                         }
1103                         i++;
1104                 }
1105         }
1106
1107         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1108
1109         /* defer register writes to a sleepable context */
1110         schedule_work(&pdata->set_multicast);
1111 }
1112
1113 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1114                                       u16 lcladv, u16 rmtadv)
1115 {
1116         u32 flow = 0, fct_flow = 0;
1117         int ret;
1118         u8 cap;
1119
1120         if (dev->fc_autoneg)
1121                 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1122         else
1123                 cap = dev->fc_request_control;
1124
1125         if (cap & FLOW_CTRL_TX)
1126                 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1127
1128         if (cap & FLOW_CTRL_RX)
1129                 flow |= FLOW_CR_RX_FCEN_;
1130
1131         if (dev->udev->speed == USB_SPEED_SUPER)
1132                 fct_flow = 0x817;
1133         else if (dev->udev->speed == USB_SPEED_HIGH)
1134                 fct_flow = 0x211;
1135
1136         netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1137                   (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1138                   (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1139
1140         ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1141
1142         /* threshold value should be set before enabling flow */
1143         ret = lan78xx_write_reg(dev, FLOW, flow);
1144
1145         return 0;
1146 }
1147
1148 static int lan78xx_link_reset(struct lan78xx_net *dev)
1149 {
1150         struct phy_device *phydev = dev->net->phydev;
1151         struct ethtool_link_ksettings ecmd;
1152         int ladv, radv, ret;
1153         u32 buf;
1154
1155         /* clear LAN78xx interrupt status */
1156         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1157         if (unlikely(ret < 0))
1158                 return -EIO;
1159
1160         phy_read_status(phydev);
1161
1162         if (!phydev->link && dev->link_on) {
1163                 dev->link_on = false;
1164
1165                 /* reset MAC */
1166                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1167                 if (unlikely(ret < 0))
1168                         return -EIO;
1169                 buf |= MAC_CR_RST_;
1170                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1171                 if (unlikely(ret < 0))
1172                         return -EIO;
1173
1174                 del_timer(&dev->stat_monitor);
1175         } else if (phydev->link && !dev->link_on) {
1176                 dev->link_on = true;
1177
1178                 phy_ethtool_ksettings_get(phydev, &ecmd);
1179
1180                 if (dev->udev->speed == USB_SPEED_SUPER) {
1181                         if (ecmd.base.speed == 1000) {
1182                                 /* disable U2 */
1183                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1184                                 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1185                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1186                                 /* enable U1 */
1187                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1188                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1189                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1190                         } else {
1191                                 /* enable U1 & U2 */
1192                                 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1193                                 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1194                                 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1195                                 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1196                         }
1197                 }
1198
1199                 ladv = phy_read(phydev, MII_ADVERTISE);
1200                 if (ladv < 0)
1201                         return ladv;
1202
1203                 radv = phy_read(phydev, MII_LPA);
1204                 if (radv < 0)
1205                         return radv;
1206
1207                 netif_dbg(dev, link, dev->net,
1208                           "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1209                           ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1210
1211                 ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1212                                                  radv);
1213
1214                 if (!timer_pending(&dev->stat_monitor)) {
1215                         dev->delta = 1;
1216                         mod_timer(&dev->stat_monitor,
1217                                   jiffies + STAT_UPDATE_TIMER);
1218                 }
1219         }
1220
1221         return ret;
1222 }
1223
1224 /* some work can't be done in tasklets, so we use keventd
1225  *
1226  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1227  * but tasklet_schedule() doesn't.      hope the failure is rare.
1228  */
1229 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1230 {
1231         set_bit(work, &dev->flags);
1232         if (!schedule_delayed_work(&dev->wq, 0))
1233                 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1234 }
1235
1236 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1237 {
1238         u32 intdata;
1239
1240         if (urb->actual_length != 4) {
1241                 netdev_warn(dev->net,
1242                             "unexpected urb length %d", urb->actual_length);
1243                 return;
1244         }
1245
1246         memcpy(&intdata, urb->transfer_buffer, 4);
1247         le32_to_cpus(&intdata);
1248
1249         if (intdata & INT_ENP_PHY_INT) {
1250                 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1251                 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1252
1253                 if (dev->domain_data.phyirq > 0)
1254                         generic_handle_irq(dev->domain_data.phyirq);
1255         } else
1256                 netdev_warn(dev->net,
1257                             "unexpected interrupt: 0x%08x\n", intdata);
1258 }
1259
1260 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1261 {
1262         return MAX_EEPROM_SIZE;
1263 }
1264
1265 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1266                                       struct ethtool_eeprom *ee, u8 *data)
1267 {
1268         struct lan78xx_net *dev = netdev_priv(netdev);
1269         int ret;
1270
1271         ret = usb_autopm_get_interface(dev->intf);
1272         if (ret)
1273                 return ret;
1274
1275         ee->magic = LAN78XX_EEPROM_MAGIC;
1276
1277         ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1278
1279         usb_autopm_put_interface(dev->intf);
1280
1281         return ret;
1282 }
1283
1284 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1285                                       struct ethtool_eeprom *ee, u8 *data)
1286 {
1287         struct lan78xx_net *dev = netdev_priv(netdev);
1288         int ret;
1289
1290         ret = usb_autopm_get_interface(dev->intf);
1291         if (ret)
1292                 return ret;
1293
1294         /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1295          * to load data from EEPROM
1296          */
1297         if (ee->magic == LAN78XX_EEPROM_MAGIC)
1298                 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1299         else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1300                  (ee->offset == 0) &&
1301                  (ee->len == 512) &&
1302                  (data[0] == OTP_INDICATOR_1))
1303                 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1304
1305         usb_autopm_put_interface(dev->intf);
1306
1307         return ret;
1308 }
1309
1310 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1311                                 u8 *data)
1312 {
1313         if (stringset == ETH_SS_STATS)
1314                 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1315 }
1316
1317 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1318 {
1319         if (sset == ETH_SS_STATS)
1320                 return ARRAY_SIZE(lan78xx_gstrings);
1321         else
1322                 return -EOPNOTSUPP;
1323 }
1324
1325 static void lan78xx_get_stats(struct net_device *netdev,
1326                               struct ethtool_stats *stats, u64 *data)
1327 {
1328         struct lan78xx_net *dev = netdev_priv(netdev);
1329
1330         lan78xx_update_stats(dev);
1331
1332         mutex_lock(&dev->stats.access_lock);
1333         memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1334         mutex_unlock(&dev->stats.access_lock);
1335 }
1336
1337 static void lan78xx_get_wol(struct net_device *netdev,
1338                             struct ethtool_wolinfo *wol)
1339 {
1340         struct lan78xx_net *dev = netdev_priv(netdev);
1341         int ret;
1342         u32 buf;
1343         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1344
1345         if (usb_autopm_get_interface(dev->intf) < 0)
1346                         return;
1347
1348         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1349         if (unlikely(ret < 0)) {
1350                 wol->supported = 0;
1351                 wol->wolopts = 0;
1352         } else {
1353                 if (buf & USB_CFG_RMT_WKP_) {
1354                         wol->supported = WAKE_ALL;
1355                         wol->wolopts = pdata->wol;
1356                 } else {
1357                         wol->supported = 0;
1358                         wol->wolopts = 0;
1359                 }
1360         }
1361
1362         usb_autopm_put_interface(dev->intf);
1363 }
1364
1365 static int lan78xx_set_wol(struct net_device *netdev,
1366                            struct ethtool_wolinfo *wol)
1367 {
1368         struct lan78xx_net *dev = netdev_priv(netdev);
1369         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1370         int ret;
1371
1372         ret = usb_autopm_get_interface(dev->intf);
1373         if (ret < 0)
1374                 return ret;
1375
1376         pdata->wol = 0;
1377         if (wol->wolopts & WAKE_UCAST)
1378                 pdata->wol |= WAKE_UCAST;
1379         if (wol->wolopts & WAKE_MCAST)
1380                 pdata->wol |= WAKE_MCAST;
1381         if (wol->wolopts & WAKE_BCAST)
1382                 pdata->wol |= WAKE_BCAST;
1383         if (wol->wolopts & WAKE_MAGIC)
1384                 pdata->wol |= WAKE_MAGIC;
1385         if (wol->wolopts & WAKE_PHY)
1386                 pdata->wol |= WAKE_PHY;
1387         if (wol->wolopts & WAKE_ARP)
1388                 pdata->wol |= WAKE_ARP;
1389
1390         device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1391
1392         phy_ethtool_set_wol(netdev->phydev, wol);
1393
1394         usb_autopm_put_interface(dev->intf);
1395
1396         return ret;
1397 }
1398
1399 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1400 {
1401         struct lan78xx_net *dev = netdev_priv(net);
1402         struct phy_device *phydev = net->phydev;
1403         int ret;
1404         u32 buf;
1405
1406         ret = usb_autopm_get_interface(dev->intf);
1407         if (ret < 0)
1408                 return ret;
1409
1410         ret = phy_ethtool_get_eee(phydev, edata);
1411         if (ret < 0)
1412                 goto exit;
1413
1414         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1415         if (buf & MAC_CR_EEE_EN_) {
1416                 edata->eee_enabled = true;
1417                 edata->eee_active = !!(edata->advertised &
1418                                        edata->lp_advertised);
1419                 edata->tx_lpi_enabled = true;
1420                 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1421                 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1422                 edata->tx_lpi_timer = buf;
1423         } else {
1424                 edata->eee_enabled = false;
1425                 edata->eee_active = false;
1426                 edata->tx_lpi_enabled = false;
1427                 edata->tx_lpi_timer = 0;
1428         }
1429
1430         ret = 0;
1431 exit:
1432         usb_autopm_put_interface(dev->intf);
1433
1434         return ret;
1435 }
1436
1437 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1438 {
1439         struct lan78xx_net *dev = netdev_priv(net);
1440         int ret;
1441         u32 buf;
1442
1443         ret = usb_autopm_get_interface(dev->intf);
1444         if (ret < 0)
1445                 return ret;
1446
1447         if (edata->eee_enabled) {
1448                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1449                 buf |= MAC_CR_EEE_EN_;
1450                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1451
1452                 phy_ethtool_set_eee(net->phydev, edata);
1453
1454                 buf = (u32)edata->tx_lpi_timer;
1455                 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1456         } else {
1457                 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1458                 buf &= ~MAC_CR_EEE_EN_;
1459                 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1460         }
1461
1462         usb_autopm_put_interface(dev->intf);
1463
1464         return 0;
1465 }
1466
1467 static u32 lan78xx_get_link(struct net_device *net)
1468 {
1469         phy_read_status(net->phydev);
1470
1471         return net->phydev->link;
1472 }
1473
1474 static void lan78xx_get_drvinfo(struct net_device *net,
1475                                 struct ethtool_drvinfo *info)
1476 {
1477         struct lan78xx_net *dev = netdev_priv(net);
1478
1479         strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1480         strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1481         usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1482 }
1483
1484 static u32 lan78xx_get_msglevel(struct net_device *net)
1485 {
1486         struct lan78xx_net *dev = netdev_priv(net);
1487
1488         return dev->msg_enable;
1489 }
1490
1491 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1492 {
1493         struct lan78xx_net *dev = netdev_priv(net);
1494
1495         dev->msg_enable = level;
1496 }
1497
1498 static int lan78xx_get_link_ksettings(struct net_device *net,
1499                                       struct ethtool_link_ksettings *cmd)
1500 {
1501         struct lan78xx_net *dev = netdev_priv(net);
1502         struct phy_device *phydev = net->phydev;
1503         int ret;
1504
1505         ret = usb_autopm_get_interface(dev->intf);
1506         if (ret < 0)
1507                 return ret;
1508
1509         phy_ethtool_ksettings_get(phydev, cmd);
1510
1511         usb_autopm_put_interface(dev->intf);
1512
1513         return ret;
1514 }
1515
1516 static int lan78xx_set_link_ksettings(struct net_device *net,
1517                                       const struct ethtool_link_ksettings *cmd)
1518 {
1519         struct lan78xx_net *dev = netdev_priv(net);
1520         struct phy_device *phydev = net->phydev;
1521         int ret = 0;
1522         int temp;
1523
1524         ret = usb_autopm_get_interface(dev->intf);
1525         if (ret < 0)
1526                 return ret;
1527
1528         /* change speed & duplex */
1529         ret = phy_ethtool_ksettings_set(phydev, cmd);
1530
1531         if (!cmd->base.autoneg) {
1532                 /* force link down */
1533                 temp = phy_read(phydev, MII_BMCR);
1534                 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1535                 mdelay(1);
1536                 phy_write(phydev, MII_BMCR, temp);
1537         }
1538
1539         usb_autopm_put_interface(dev->intf);
1540
1541         return ret;
1542 }
1543
1544 static void lan78xx_get_pause(struct net_device *net,
1545                               struct ethtool_pauseparam *pause)
1546 {
1547         struct lan78xx_net *dev = netdev_priv(net);
1548         struct phy_device *phydev = net->phydev;
1549         struct ethtool_link_ksettings ecmd;
1550
1551         phy_ethtool_ksettings_get(phydev, &ecmd);
1552
1553         pause->autoneg = dev->fc_autoneg;
1554
1555         if (dev->fc_request_control & FLOW_CTRL_TX)
1556                 pause->tx_pause = 1;
1557
1558         if (dev->fc_request_control & FLOW_CTRL_RX)
1559                 pause->rx_pause = 1;
1560 }
1561
1562 static int lan78xx_set_pause(struct net_device *net,
1563                              struct ethtool_pauseparam *pause)
1564 {
1565         struct lan78xx_net *dev = netdev_priv(net);
1566         struct phy_device *phydev = net->phydev;
1567         struct ethtool_link_ksettings ecmd;
1568         int ret;
1569
1570         phy_ethtool_ksettings_get(phydev, &ecmd);
1571
1572         if (pause->autoneg && !ecmd.base.autoneg) {
1573                 ret = -EINVAL;
1574                 goto exit;
1575         }
1576
1577         dev->fc_request_control = 0;
1578         if (pause->rx_pause)
1579                 dev->fc_request_control |= FLOW_CTRL_RX;
1580
1581         if (pause->tx_pause)
1582                 dev->fc_request_control |= FLOW_CTRL_TX;
1583
1584         if (ecmd.base.autoneg) {
1585                 u32 mii_adv;
1586                 u32 advertising;
1587
1588                 ethtool_convert_link_mode_to_legacy_u32(
1589                         &advertising, ecmd.link_modes.advertising);
1590
1591                 advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1592                 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1593                 advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1594
1595                 ethtool_convert_legacy_u32_to_link_mode(
1596                         ecmd.link_modes.advertising, advertising);
1597
1598                 phy_ethtool_ksettings_set(phydev, &ecmd);
1599         }
1600
1601         dev->fc_autoneg = pause->autoneg;
1602
1603         ret = 0;
1604 exit:
1605         return ret;
1606 }
1607
1608 static const struct ethtool_ops lan78xx_ethtool_ops = {
1609         .get_link       = lan78xx_get_link,
1610         .nway_reset     = phy_ethtool_nway_reset,
1611         .get_drvinfo    = lan78xx_get_drvinfo,
1612         .get_msglevel   = lan78xx_get_msglevel,
1613         .set_msglevel   = lan78xx_set_msglevel,
1614         .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1615         .get_eeprom     = lan78xx_ethtool_get_eeprom,
1616         .set_eeprom     = lan78xx_ethtool_set_eeprom,
1617         .get_ethtool_stats = lan78xx_get_stats,
1618         .get_sset_count = lan78xx_get_sset_count,
1619         .get_strings    = lan78xx_get_strings,
1620         .get_wol        = lan78xx_get_wol,
1621         .set_wol        = lan78xx_set_wol,
1622         .get_eee        = lan78xx_get_eee,
1623         .set_eee        = lan78xx_set_eee,
1624         .get_pauseparam = lan78xx_get_pause,
1625         .set_pauseparam = lan78xx_set_pause,
1626         .get_link_ksettings = lan78xx_get_link_ksettings,
1627         .set_link_ksettings = lan78xx_set_link_ksettings,
1628 };
1629
1630 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1631 {
1632         if (!netif_running(netdev))
1633                 return -EINVAL;
1634
1635         return phy_mii_ioctl(netdev->phydev, rq, cmd);
1636 }
1637
1638 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1639 {
1640         u32 addr_lo, addr_hi;
1641         int ret;
1642         u8 addr[6];
1643
1644         ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1645         ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1646
1647         addr[0] = addr_lo & 0xFF;
1648         addr[1] = (addr_lo >> 8) & 0xFF;
1649         addr[2] = (addr_lo >> 16) & 0xFF;
1650         addr[3] = (addr_lo >> 24) & 0xFF;
1651         addr[4] = addr_hi & 0xFF;
1652         addr[5] = (addr_hi >> 8) & 0xFF;
1653
1654         if (!is_valid_ether_addr(addr)) {
1655                 /* reading mac address from EEPROM or OTP */
1656                 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1657                                          addr) == 0) ||
1658                     (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1659                                       addr) == 0)) {
1660                         if (is_valid_ether_addr(addr)) {
1661                                 /* eeprom values are valid so use them */
1662                                 netif_dbg(dev, ifup, dev->net,
1663                                           "MAC address read from EEPROM");
1664                         } else {
1665                                 /* generate random MAC */
1666                                 random_ether_addr(addr);
1667                                 netif_dbg(dev, ifup, dev->net,
1668                                           "MAC address set to random addr");
1669                         }
1670
1671                         addr_lo = addr[0] | (addr[1] << 8) |
1672                                   (addr[2] << 16) | (addr[3] << 24);
1673                         addr_hi = addr[4] | (addr[5] << 8);
1674
1675                         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1676                         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1677                 } else {
1678                         /* generate random MAC */
1679                         random_ether_addr(addr);
1680                         netif_dbg(dev, ifup, dev->net,
1681                                   "MAC address set to random addr");
1682                 }
1683         }
1684
1685         ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1686         ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1687
1688         ether_addr_copy(dev->net->dev_addr, addr);
1689 }
1690
1691 /* MDIO read and write wrappers for phylib */
1692 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1693 {
1694         struct lan78xx_net *dev = bus->priv;
1695         u32 val, addr;
1696         int ret;
1697
1698         ret = usb_autopm_get_interface(dev->intf);
1699         if (ret < 0)
1700                 return ret;
1701
1702         mutex_lock(&dev->phy_mutex);
1703
1704         /* confirm MII not busy */
1705         ret = lan78xx_phy_wait_not_busy(dev);
1706         if (ret < 0)
1707                 goto done;
1708
1709         /* set the address, index & direction (read from PHY) */
1710         addr = mii_access(phy_id, idx, MII_READ);
1711         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1712
1713         ret = lan78xx_phy_wait_not_busy(dev);
1714         if (ret < 0)
1715                 goto done;
1716
1717         ret = lan78xx_read_reg(dev, MII_DATA, &val);
1718
1719         ret = (int)(val & 0xFFFF);
1720
1721 done:
1722         mutex_unlock(&dev->phy_mutex);
1723         usb_autopm_put_interface(dev->intf);
1724
1725         return ret;
1726 }
1727
1728 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1729                                  u16 regval)
1730 {
1731         struct lan78xx_net *dev = bus->priv;
1732         u32 val, addr;
1733         int ret;
1734
1735         ret = usb_autopm_get_interface(dev->intf);
1736         if (ret < 0)
1737                 return ret;
1738
1739         mutex_lock(&dev->phy_mutex);
1740
1741         /* confirm MII not busy */
1742         ret = lan78xx_phy_wait_not_busy(dev);
1743         if (ret < 0)
1744                 goto done;
1745
1746         val = (u32)regval;
1747         ret = lan78xx_write_reg(dev, MII_DATA, val);
1748
1749         /* set the address, index & direction (write to PHY) */
1750         addr = mii_access(phy_id, idx, MII_WRITE);
1751         ret = lan78xx_write_reg(dev, MII_ACC, addr);
1752
1753         ret = lan78xx_phy_wait_not_busy(dev);
1754         if (ret < 0)
1755                 goto done;
1756
1757 done:
1758         mutex_unlock(&dev->phy_mutex);
1759         usb_autopm_put_interface(dev->intf);
1760         return 0;
1761 }
1762
1763 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1764 {
1765         int ret;
1766
1767         dev->mdiobus = mdiobus_alloc();
1768         if (!dev->mdiobus) {
1769                 netdev_err(dev->net, "can't allocate MDIO bus\n");
1770                 return -ENOMEM;
1771         }
1772
1773         dev->mdiobus->priv = (void *)dev;
1774         dev->mdiobus->read = lan78xx_mdiobus_read;
1775         dev->mdiobus->write = lan78xx_mdiobus_write;
1776         dev->mdiobus->name = "lan78xx-mdiobus";
1777
1778         snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1779                  dev->udev->bus->busnum, dev->udev->devnum);
1780
1781         switch (dev->chipid) {
1782         case ID_REV_CHIP_ID_7800_:
1783         case ID_REV_CHIP_ID_7850_:
1784                 /* set to internal PHY id */
1785                 dev->mdiobus->phy_mask = ~(1 << 1);
1786                 break;
1787         case ID_REV_CHIP_ID_7801_:
1788                 /* scan thru PHYAD[2..0] */
1789                 dev->mdiobus->phy_mask = ~(0xFF);
1790                 break;
1791         }
1792
1793         ret = mdiobus_register(dev->mdiobus);
1794         if (ret) {
1795                 netdev_err(dev->net, "can't register MDIO bus\n");
1796                 goto exit1;
1797         }
1798
1799         netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1800         return 0;
1801 exit1:
1802         mdiobus_free(dev->mdiobus);
1803         return ret;
1804 }
1805
1806 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1807 {
1808         mdiobus_unregister(dev->mdiobus);
1809         mdiobus_free(dev->mdiobus);
1810 }
1811
1812 static void lan78xx_link_status_change(struct net_device *net)
1813 {
1814         struct phy_device *phydev = net->phydev;
1815         int ret, temp;
1816
1817         /* At forced 100 F/H mode, chip may fail to set mode correctly
1818          * when cable is switched between long(~50+m) and short one.
1819          * As workaround, set to 10 before setting to 100
1820          * at forced 100 F/H mode.
1821          */
1822         if (!phydev->autoneg && (phydev->speed == 100)) {
1823                 /* disable phy interrupt */
1824                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1825                 temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
1826                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1827
1828                 temp = phy_read(phydev, MII_BMCR);
1829                 temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
1830                 phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
1831                 temp |= BMCR_SPEED100;
1832                 phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
1833
1834                 /* clear pending interrupt generated while workaround */
1835                 temp = phy_read(phydev, LAN88XX_INT_STS);
1836
1837                 /* enable phy interrupt back */
1838                 temp = phy_read(phydev, LAN88XX_INT_MASK);
1839                 temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
1840                 ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
1841         }
1842 }
1843
1844 static int irq_map(struct irq_domain *d, unsigned int irq,
1845                    irq_hw_number_t hwirq)
1846 {
1847         struct irq_domain_data *data = d->host_data;
1848
1849         irq_set_chip_data(irq, data);
1850         irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
1851         irq_set_noprobe(irq);
1852
1853         return 0;
1854 }
1855
1856 static void irq_unmap(struct irq_domain *d, unsigned int irq)
1857 {
1858         irq_set_chip_and_handler(irq, NULL, NULL);
1859         irq_set_chip_data(irq, NULL);
1860 }
1861
1862 static const struct irq_domain_ops chip_domain_ops = {
1863         .map    = irq_map,
1864         .unmap  = irq_unmap,
1865 };
1866
1867 static void lan78xx_irq_mask(struct irq_data *irqd)
1868 {
1869         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1870
1871         data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
1872 }
1873
1874 static void lan78xx_irq_unmask(struct irq_data *irqd)
1875 {
1876         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1877
1878         data->irqenable |= BIT(irqd_to_hwirq(irqd));
1879 }
1880
1881 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
1882 {
1883         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1884
1885         mutex_lock(&data->irq_lock);
1886 }
1887
1888 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
1889 {
1890         struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
1891         struct lan78xx_net *dev =
1892                         container_of(data, struct lan78xx_net, domain_data);
1893         u32 buf;
1894         int ret;
1895
1896         /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1897          * are only two callbacks executed in non-atomic contex.
1898          */
1899         ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1900         if (buf != data->irqenable)
1901                 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
1902
1903         mutex_unlock(&data->irq_lock);
1904 }
1905
1906 static struct irq_chip lan78xx_irqchip = {
1907         .name                   = "lan78xx-irqs",
1908         .irq_mask               = lan78xx_irq_mask,
1909         .irq_unmask             = lan78xx_irq_unmask,
1910         .irq_bus_lock           = lan78xx_irq_bus_lock,
1911         .irq_bus_sync_unlock    = lan78xx_irq_bus_sync_unlock,
1912 };
1913
1914 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
1915 {
1916         struct device_node *of_node;
1917         struct irq_domain *irqdomain;
1918         unsigned int irqmap = 0;
1919         u32 buf;
1920         int ret = 0;
1921
1922         of_node = dev->udev->dev.parent->of_node;
1923
1924         mutex_init(&dev->domain_data.irq_lock);
1925
1926         lan78xx_read_reg(dev, INT_EP_CTL, &buf);
1927         dev->domain_data.irqenable = buf;
1928
1929         dev->domain_data.irqchip = &lan78xx_irqchip;
1930         dev->domain_data.irq_handler = handle_simple_irq;
1931
1932         irqdomain = irq_domain_add_simple(of_node, MAX_INT_EP, 0,
1933                                           &chip_domain_ops, &dev->domain_data);
1934         if (irqdomain) {
1935                 /* create mapping for PHY interrupt */
1936                 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
1937                 if (!irqmap) {
1938                         irq_domain_remove(irqdomain);
1939
1940                         irqdomain = NULL;
1941                         ret = -EINVAL;
1942                 }
1943         } else {
1944                 ret = -EINVAL;
1945         }
1946
1947         dev->domain_data.irqdomain = irqdomain;
1948         dev->domain_data.phyirq = irqmap;
1949
1950         return ret;
1951 }
1952
1953 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
1954 {
1955         if (dev->domain_data.phyirq > 0) {
1956                 irq_dispose_mapping(dev->domain_data.phyirq);
1957
1958                 if (dev->domain_data.irqdomain)
1959                         irq_domain_remove(dev->domain_data.irqdomain);
1960         }
1961         dev->domain_data.phyirq = 0;
1962         dev->domain_data.irqdomain = NULL;
1963 }
1964
1965 static int lan8835_fixup(struct phy_device *phydev)
1966 {
1967         int buf;
1968         int ret;
1969         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1970
1971         /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1972         buf = phy_read_mmd(phydev, MDIO_MMD_PCS, 0x8010);
1973         buf &= ~0x1800;
1974         buf |= 0x0800;
1975         phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8010, buf);
1976
1977         /* RGMII MAC TXC Delay Enable */
1978         ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
1979                                 MAC_RGMII_ID_TXC_DELAY_EN_);
1980
1981         /* RGMII TX DLL Tune Adjust */
1982         ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
1983
1984         dev->interface = PHY_INTERFACE_MODE_RGMII_TXID;
1985
1986         return 1;
1987 }
1988
1989 static int ksz9031rnx_fixup(struct phy_device *phydev)
1990 {
1991         struct lan78xx_net *dev = netdev_priv(phydev->attached_dev);
1992
1993         /* Micrel9301RNX PHY configuration */
1994         /* RGMII Control Signal Pad Skew */
1995         phy_write_mmd(phydev, MDIO_MMD_WIS, 4, 0x0077);
1996         /* RGMII RX Data Pad Skew */
1997         phy_write_mmd(phydev, MDIO_MMD_WIS, 5, 0x7777);
1998         /* RGMII RX Clock Pad Skew */
1999         phy_write_mmd(phydev, MDIO_MMD_WIS, 8, 0x1FF);
2000
2001         dev->interface = PHY_INTERFACE_MODE_RGMII_RXID;
2002
2003         return 1;
2004 }
2005
2006 static int lan78xx_phy_init(struct lan78xx_net *dev)
2007 {
2008         int ret;
2009         u32 mii_adv;
2010         struct phy_device *phydev;
2011
2012         phydev = phy_find_first(dev->mdiobus);
2013         if (!phydev) {
2014                 netdev_err(dev->net, "no PHY found\n");
2015                 return -EIO;
2016         }
2017
2018         if ((dev->chipid == ID_REV_CHIP_ID_7800_) ||
2019             (dev->chipid == ID_REV_CHIP_ID_7850_)) {
2020                 phydev->is_internal = true;
2021                 dev->interface = PHY_INTERFACE_MODE_GMII;
2022
2023         } else if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2024                 if (!phydev->drv) {
2025                         netdev_err(dev->net, "no PHY driver found\n");
2026                         return -EIO;
2027                 }
2028
2029                 dev->interface = PHY_INTERFACE_MODE_RGMII;
2030
2031                 /* external PHY fixup for KSZ9031RNX */
2032                 ret = phy_register_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0,
2033                                                  ksz9031rnx_fixup);
2034                 if (ret < 0) {
2035                         netdev_err(dev->net, "fail to register fixup\n");
2036                         return ret;
2037                 }
2038                 /* external PHY fixup for LAN8835 */
2039                 ret = phy_register_fixup_for_uid(PHY_LAN8835, 0xfffffff0,
2040                                                  lan8835_fixup);
2041                 if (ret < 0) {
2042                         netdev_err(dev->net, "fail to register fixup\n");
2043                         return ret;
2044                 }
2045                 /* add more external PHY fixup here if needed */
2046
2047                 phydev->is_internal = false;
2048         } else {
2049                 netdev_err(dev->net, "unknown ID found\n");
2050                 ret = -EIO;
2051                 goto error;
2052         }
2053
2054         /* if phyirq is not set, use polling mode in phylib */
2055         if (dev->domain_data.phyirq > 0)
2056                 phydev->irq = dev->domain_data.phyirq;
2057         else
2058                 phydev->irq = 0;
2059         netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2060
2061         /* set to AUTOMDIX */
2062         phydev->mdix = ETH_TP_MDI_AUTO;
2063
2064         ret = phy_connect_direct(dev->net, phydev,
2065                                  lan78xx_link_status_change,
2066                                  dev->interface);
2067         if (ret) {
2068                 netdev_err(dev->net, "can't attach PHY to %s\n",
2069                            dev->mdiobus->id);
2070                 return -EIO;
2071         }
2072
2073         /* MAC doesn't support 1000T Half */
2074         phydev->supported &= ~SUPPORTED_1000baseT_Half;
2075
2076         /* support both flow controls */
2077         dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2078         phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
2079         mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2080         phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
2081
2082         genphy_config_aneg(phydev);
2083
2084         dev->fc_autoneg = phydev->autoneg;
2085
2086         return 0;
2087
2088 error:
2089         phy_unregister_fixup_for_uid(PHY_KSZ9031RNX, 0xfffffff0);
2090         phy_unregister_fixup_for_uid(PHY_LAN8835, 0xfffffff0);
2091
2092         return ret;
2093 }
2094
2095 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2096 {
2097         int ret = 0;
2098         u32 buf;
2099         bool rxenabled;
2100
2101         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2102
2103         rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2104
2105         if (rxenabled) {
2106                 buf &= ~MAC_RX_RXEN_;
2107                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2108         }
2109
2110         /* add 4 to size for FCS */
2111         buf &= ~MAC_RX_MAX_SIZE_MASK_;
2112         buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2113
2114         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2115
2116         if (rxenabled) {
2117                 buf |= MAC_RX_RXEN_;
2118                 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2119         }
2120
2121         return 0;
2122 }
2123
2124 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2125 {
2126         struct sk_buff *skb;
2127         unsigned long flags;
2128         int count = 0;
2129
2130         spin_lock_irqsave(&q->lock, flags);
2131         while (!skb_queue_empty(q)) {
2132                 struct skb_data *entry;
2133                 struct urb *urb;
2134                 int ret;
2135
2136                 skb_queue_walk(q, skb) {
2137                         entry = (struct skb_data *)skb->cb;
2138                         if (entry->state != unlink_start)
2139                                 goto found;
2140                 }
2141                 break;
2142 found:
2143                 entry->state = unlink_start;
2144                 urb = entry->urb;
2145
2146                 /* Get reference count of the URB to avoid it to be
2147                  * freed during usb_unlink_urb, which may trigger
2148                  * use-after-free problem inside usb_unlink_urb since
2149                  * usb_unlink_urb is always racing with .complete
2150                  * handler(include defer_bh).
2151                  */
2152                 usb_get_urb(urb);
2153                 spin_unlock_irqrestore(&q->lock, flags);
2154                 /* during some PM-driven resume scenarios,
2155                  * these (async) unlinks complete immediately
2156                  */
2157                 ret = usb_unlink_urb(urb);
2158                 if (ret != -EINPROGRESS && ret != 0)
2159                         netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2160                 else
2161                         count++;
2162                 usb_put_urb(urb);
2163                 spin_lock_irqsave(&q->lock, flags);
2164         }
2165         spin_unlock_irqrestore(&q->lock, flags);
2166         return count;
2167 }
2168
2169 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2170 {
2171         struct lan78xx_net *dev = netdev_priv(netdev);
2172         int ll_mtu = new_mtu + netdev->hard_header_len;
2173         int old_hard_mtu = dev->hard_mtu;
2174         int old_rx_urb_size = dev->rx_urb_size;
2175         int ret;
2176
2177         /* no second zero-length packet read wanted after mtu-sized packets */
2178         if ((ll_mtu % dev->maxpacket) == 0)
2179                 return -EDOM;
2180
2181         ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
2182
2183         netdev->mtu = new_mtu;
2184
2185         dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
2186         if (dev->rx_urb_size == old_hard_mtu) {
2187                 dev->rx_urb_size = dev->hard_mtu;
2188                 if (dev->rx_urb_size > old_rx_urb_size) {
2189                         if (netif_running(dev->net)) {
2190                                 unlink_urbs(dev, &dev->rxq);
2191                                 tasklet_schedule(&dev->bh);
2192                         }
2193                 }
2194         }
2195
2196         return 0;
2197 }
2198
2199 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2200 {
2201         struct lan78xx_net *dev = netdev_priv(netdev);
2202         struct sockaddr *addr = p;
2203         u32 addr_lo, addr_hi;
2204         int ret;
2205
2206         if (netif_running(netdev))
2207                 return -EBUSY;
2208
2209         if (!is_valid_ether_addr(addr->sa_data))
2210                 return -EADDRNOTAVAIL;
2211
2212         ether_addr_copy(netdev->dev_addr, addr->sa_data);
2213
2214         addr_lo = netdev->dev_addr[0] |
2215                   netdev->dev_addr[1] << 8 |
2216                   netdev->dev_addr[2] << 16 |
2217                   netdev->dev_addr[3] << 24;
2218         addr_hi = netdev->dev_addr[4] |
2219                   netdev->dev_addr[5] << 8;
2220
2221         ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2222         ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2223
2224         return 0;
2225 }
2226
2227 /* Enable or disable Rx checksum offload engine */
2228 static int lan78xx_set_features(struct net_device *netdev,
2229                                 netdev_features_t features)
2230 {
2231         struct lan78xx_net *dev = netdev_priv(netdev);
2232         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2233         unsigned long flags;
2234         int ret;
2235
2236         spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2237
2238         if (features & NETIF_F_RXCSUM) {
2239                 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2240                 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2241         } else {
2242                 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2243                 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2244         }
2245
2246         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2247                 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2248         else
2249                 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2250
2251         spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2252
2253         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2254
2255         return 0;
2256 }
2257
2258 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2259 {
2260         struct lan78xx_priv *pdata =
2261                         container_of(param, struct lan78xx_priv, set_vlan);
2262         struct lan78xx_net *dev = pdata->dev;
2263
2264         lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2265                                DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2266 }
2267
2268 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2269                                    __be16 proto, u16 vid)
2270 {
2271         struct lan78xx_net *dev = netdev_priv(netdev);
2272         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2273         u16 vid_bit_index;
2274         u16 vid_dword_index;
2275
2276         vid_dword_index = (vid >> 5) & 0x7F;
2277         vid_bit_index = vid & 0x1F;
2278
2279         pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2280
2281         /* defer register writes to a sleepable context */
2282         schedule_work(&pdata->set_vlan);
2283
2284         return 0;
2285 }
2286
2287 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2288                                     __be16 proto, u16 vid)
2289 {
2290         struct lan78xx_net *dev = netdev_priv(netdev);
2291         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2292         u16 vid_bit_index;
2293         u16 vid_dword_index;
2294
2295         vid_dword_index = (vid >> 5) & 0x7F;
2296         vid_bit_index = vid & 0x1F;
2297
2298         pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2299
2300         /* defer register writes to a sleepable context */
2301         schedule_work(&pdata->set_vlan);
2302
2303         return 0;
2304 }
2305
2306 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2307 {
2308         int ret;
2309         u32 buf;
2310         u32 regs[6] = { 0 };
2311
2312         ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2313         if (buf & USB_CFG1_LTM_ENABLE_) {
2314                 u8 temp[2];
2315                 /* Get values from EEPROM first */
2316                 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2317                         if (temp[0] == 24) {
2318                                 ret = lan78xx_read_raw_eeprom(dev,
2319                                                               temp[1] * 2,
2320                                                               24,
2321                                                               (u8 *)regs);
2322                                 if (ret < 0)
2323                                         return;
2324                         }
2325                 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2326                         if (temp[0] == 24) {
2327                                 ret = lan78xx_read_raw_otp(dev,
2328                                                            temp[1] * 2,
2329                                                            24,
2330                                                            (u8 *)regs);
2331                                 if (ret < 0)
2332                                         return;
2333                         }
2334                 }
2335         }
2336
2337         lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2338         lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2339         lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2340         lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2341         lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2342         lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2343 }
2344
2345 static int lan78xx_reset(struct lan78xx_net *dev)
2346 {
2347         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2348         u32 buf;
2349         int ret = 0;
2350         unsigned long timeout;
2351         u8 sig;
2352
2353         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2354         buf |= HW_CFG_LRST_;
2355         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2356
2357         timeout = jiffies + HZ;
2358         do {
2359                 mdelay(1);
2360                 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2361                 if (time_after(jiffies, timeout)) {
2362                         netdev_warn(dev->net,
2363                                     "timeout on completion of LiteReset");
2364                         return -EIO;
2365                 }
2366         } while (buf & HW_CFG_LRST_);
2367
2368         lan78xx_init_mac_address(dev);
2369
2370         /* save DEVID for later usage */
2371         ret = lan78xx_read_reg(dev, ID_REV, &buf);
2372         dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2373         dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2374
2375         /* Respond to the IN token with a NAK */
2376         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2377         buf |= USB_CFG_BIR_;
2378         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2379
2380         /* Init LTM */
2381         lan78xx_init_ltm(dev);
2382
2383         if (dev->udev->speed == USB_SPEED_SUPER) {
2384                 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2385                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2386                 dev->rx_qlen = 4;
2387                 dev->tx_qlen = 4;
2388         } else if (dev->udev->speed == USB_SPEED_HIGH) {
2389                 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2390                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2391                 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2392                 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2393         } else {
2394                 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2395                 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2396                 dev->rx_qlen = 4;
2397                 dev->tx_qlen = 4;
2398         }
2399
2400         ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2401         ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2402
2403         ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2404         buf |= HW_CFG_MEF_;
2405         ret = lan78xx_write_reg(dev, HW_CFG, buf);
2406
2407         ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2408         buf |= USB_CFG_BCE_;
2409         ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2410
2411         /* set FIFO sizes */
2412         buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2413         ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2414
2415         buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2416         ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2417
2418         ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2419         ret = lan78xx_write_reg(dev, FLOW, 0);
2420         ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2421
2422         /* Don't need rfe_ctl_lock during initialisation */
2423         ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2424         pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2425         ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2426
2427         /* Enable or disable checksum offload engines */
2428         lan78xx_set_features(dev->net, dev->net->features);
2429
2430         lan78xx_set_multicast(dev->net);
2431
2432         /* reset PHY */
2433         ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2434         buf |= PMT_CTL_PHY_RST_;
2435         ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2436
2437         timeout = jiffies + HZ;
2438         do {
2439                 mdelay(1);
2440                 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2441                 if (time_after(jiffies, timeout)) {
2442                         netdev_warn(dev->net, "timeout waiting for PHY Reset");
2443                         return -EIO;
2444                 }
2445         } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2446
2447         ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2448         /* LAN7801 only has RGMII mode */
2449         if (dev->chipid == ID_REV_CHIP_ID_7801_)
2450                 buf &= ~MAC_CR_GMII_EN_;
2451
2452         if (dev->chipid == ID_REV_CHIP_ID_7800_) {
2453                 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
2454                 if (!ret && sig != EEPROM_INDICATOR) {
2455                         /* Implies there is no external eeprom. Set mac speed */
2456                         netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
2457                         buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2458                 }
2459         }
2460         ret = lan78xx_write_reg(dev, MAC_CR, buf);
2461
2462         ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2463         buf |= MAC_TX_TXEN_;
2464         ret = lan78xx_write_reg(dev, MAC_TX, buf);
2465
2466         ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2467         buf |= FCT_TX_CTL_EN_;
2468         ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2469
2470         ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2471
2472         ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2473         buf |= MAC_RX_RXEN_;
2474         ret = lan78xx_write_reg(dev, MAC_RX, buf);
2475
2476         ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2477         buf |= FCT_RX_CTL_EN_;
2478         ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2479
2480         return 0;
2481 }
2482
2483 static void lan78xx_init_stats(struct lan78xx_net *dev)
2484 {
2485         u32 *p;
2486         int i;
2487
2488         /* initialize for stats update
2489          * some counters are 20bits and some are 32bits
2490          */
2491         p = (u32 *)&dev->stats.rollover_max;
2492         for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2493                 p[i] = 0xFFFFF;
2494
2495         dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2496         dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2497         dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2498         dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2499         dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2500         dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2501         dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2502         dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2503         dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2504         dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2505
2506         set_bit(EVENT_STAT_UPDATE, &dev->flags);
2507 }
2508
2509 static int lan78xx_open(struct net_device *net)
2510 {
2511         struct lan78xx_net *dev = netdev_priv(net);
2512         int ret;
2513
2514         ret = usb_autopm_get_interface(dev->intf);
2515         if (ret < 0)
2516                 goto out;
2517
2518         ret = lan78xx_reset(dev);
2519         if (ret < 0)
2520                 goto done;
2521
2522         phy_start(net->phydev);
2523
2524         netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
2525
2526         /* for Link Check */
2527         if (dev->urb_intr) {
2528                 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2529                 if (ret < 0) {
2530                         netif_err(dev, ifup, dev->net,
2531                                   "intr submit %d\n", ret);
2532                         goto done;
2533                 }
2534         }
2535
2536         lan78xx_init_stats(dev);
2537
2538         set_bit(EVENT_DEV_OPEN, &dev->flags);
2539
2540         netif_start_queue(net);
2541
2542         dev->link_on = false;
2543
2544         lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2545 done:
2546         usb_autopm_put_interface(dev->intf);
2547
2548 out:
2549         return ret;
2550 }
2551
2552 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2553 {
2554         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2555         DECLARE_WAITQUEUE(wait, current);
2556         int temp;
2557
2558         /* ensure there are no more active urbs */
2559         add_wait_queue(&unlink_wakeup, &wait);
2560         set_current_state(TASK_UNINTERRUPTIBLE);
2561         dev->wait = &unlink_wakeup;
2562         temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2563
2564         /* maybe wait for deletions to finish. */
2565         while (!skb_queue_empty(&dev->rxq) &&
2566                !skb_queue_empty(&dev->txq) &&
2567                !skb_queue_empty(&dev->done)) {
2568                 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2569                 set_current_state(TASK_UNINTERRUPTIBLE);
2570                 netif_dbg(dev, ifdown, dev->net,
2571                           "waited for %d urb completions\n", temp);
2572         }
2573         set_current_state(TASK_RUNNING);
2574         dev->wait = NULL;
2575         remove_wait_queue(&unlink_wakeup, &wait);
2576 }
2577
2578 static int lan78xx_stop(struct net_device *net)
2579 {
2580         struct lan78xx_net              *dev = netdev_priv(net);
2581
2582         if (timer_pending(&dev->stat_monitor))
2583                 del_timer_sync(&dev->stat_monitor);
2584
2585         if (net->phydev)
2586                 phy_stop(net->phydev);
2587
2588         clear_bit(EVENT_DEV_OPEN, &dev->flags);
2589         netif_stop_queue(net);
2590
2591         netif_info(dev, ifdown, dev->net,
2592                    "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2593                    net->stats.rx_packets, net->stats.tx_packets,
2594                    net->stats.rx_errors, net->stats.tx_errors);
2595
2596         lan78xx_terminate_urbs(dev);
2597
2598         usb_kill_urb(dev->urb_intr);
2599
2600         skb_queue_purge(&dev->rxq_pause);
2601
2602         /* deferred work (task, timer, softirq) must also stop.
2603          * can't flush_scheduled_work() until we drop rtnl (later),
2604          * else workers could deadlock; so make workers a NOP.
2605          */
2606         dev->flags = 0;
2607         cancel_delayed_work_sync(&dev->wq);
2608         tasklet_kill(&dev->bh);
2609
2610         usb_autopm_put_interface(dev->intf);
2611
2612         return 0;
2613 }
2614
2615 static int lan78xx_linearize(struct sk_buff *skb)
2616 {
2617         return skb_linearize(skb);
2618 }
2619
2620 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2621                                        struct sk_buff *skb, gfp_t flags)
2622 {
2623         u32 tx_cmd_a, tx_cmd_b;
2624
2625         if (skb_cow_head(skb, TX_OVERHEAD)) {
2626                 dev_kfree_skb_any(skb);
2627                 return NULL;
2628         }
2629
2630         if (lan78xx_linearize(skb) < 0)
2631                 return NULL;
2632
2633         tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2634
2635         if (skb->ip_summed == CHECKSUM_PARTIAL)
2636                 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2637
2638         tx_cmd_b = 0;
2639         if (skb_is_gso(skb)) {
2640                 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2641
2642                 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2643
2644                 tx_cmd_a |= TX_CMD_A_LSO_;
2645         }
2646
2647         if (skb_vlan_tag_present(skb)) {
2648                 tx_cmd_a |= TX_CMD_A_IVTG_;
2649                 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2650         }
2651
2652         skb_push(skb, 4);
2653         cpu_to_le32s(&tx_cmd_b);
2654         memcpy(skb->data, &tx_cmd_b, 4);
2655
2656         skb_push(skb, 4);
2657         cpu_to_le32s(&tx_cmd_a);
2658         memcpy(skb->data, &tx_cmd_a, 4);
2659
2660         return skb;
2661 }
2662
2663 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2664                                struct sk_buff_head *list, enum skb_state state)
2665 {
2666         unsigned long flags;
2667         enum skb_state old_state;
2668         struct skb_data *entry = (struct skb_data *)skb->cb;
2669
2670         spin_lock_irqsave(&list->lock, flags);
2671         old_state = entry->state;
2672         entry->state = state;
2673
2674         __skb_unlink(skb, list);
2675         spin_unlock(&list->lock);
2676         spin_lock(&dev->done.lock);
2677
2678         __skb_queue_tail(&dev->done, skb);
2679         if (skb_queue_len(&dev->done) == 1)
2680                 tasklet_schedule(&dev->bh);
2681         spin_unlock_irqrestore(&dev->done.lock, flags);
2682
2683         return old_state;
2684 }
2685
2686 static void tx_complete(struct urb *urb)
2687 {
2688         struct sk_buff *skb = (struct sk_buff *)urb->context;
2689         struct skb_data *entry = (struct skb_data *)skb->cb;
2690         struct lan78xx_net *dev = entry->dev;
2691
2692         if (urb->status == 0) {
2693                 dev->net->stats.tx_packets += entry->num_of_packet;
2694                 dev->net->stats.tx_bytes += entry->length;
2695         } else {
2696                 dev->net->stats.tx_errors++;
2697
2698                 switch (urb->status) {
2699                 case -EPIPE:
2700                         lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2701                         break;
2702
2703                 /* software-driven interface shutdown */
2704                 case -ECONNRESET:
2705                 case -ESHUTDOWN:
2706                         break;
2707
2708                 case -EPROTO:
2709                 case -ETIME:
2710                 case -EILSEQ:
2711                         netif_stop_queue(dev->net);
2712                         break;
2713                 default:
2714                         netif_dbg(dev, tx_err, dev->net,
2715                                   "tx err %d\n", entry->urb->status);
2716                         break;
2717                 }
2718         }
2719
2720         usb_autopm_put_interface_async(dev->intf);
2721
2722         defer_bh(dev, skb, &dev->txq, tx_done);
2723 }
2724
2725 static void lan78xx_queue_skb(struct sk_buff_head *list,
2726                               struct sk_buff *newsk, enum skb_state state)
2727 {
2728         struct skb_data *entry = (struct skb_data *)newsk->cb;
2729
2730         __skb_queue_tail(list, newsk);
2731         entry->state = state;
2732 }
2733
2734 static netdev_tx_t
2735 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2736 {
2737         struct lan78xx_net *dev = netdev_priv(net);
2738         struct sk_buff *skb2 = NULL;
2739
2740         if (skb) {
2741                 skb_tx_timestamp(skb);
2742                 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2743         }
2744
2745         if (skb2) {
2746                 skb_queue_tail(&dev->txq_pend, skb2);
2747
2748                 /* throttle TX patch at slower than SUPER SPEED USB */
2749                 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2750                     (skb_queue_len(&dev->txq_pend) > 10))
2751                         netif_stop_queue(net);
2752         } else {
2753                 netif_dbg(dev, tx_err, dev->net,
2754                           "lan78xx_tx_prep return NULL\n");
2755                 dev->net->stats.tx_errors++;
2756                 dev->net->stats.tx_dropped++;
2757         }
2758
2759         tasklet_schedule(&dev->bh);
2760
2761         return NETDEV_TX_OK;
2762 }
2763
2764 static int
2765 lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2766 {
2767         int tmp;
2768         struct usb_host_interface *alt = NULL;
2769         struct usb_host_endpoint *in = NULL, *out = NULL;
2770         struct usb_host_endpoint *status = NULL;
2771
2772         for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2773                 unsigned ep;
2774
2775                 in = NULL;
2776                 out = NULL;
2777                 status = NULL;
2778                 alt = intf->altsetting + tmp;
2779
2780                 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2781                         struct usb_host_endpoint *e;
2782                         int intr = 0;
2783
2784                         e = alt->endpoint + ep;
2785                         switch (e->desc.bmAttributes) {
2786                         case USB_ENDPOINT_XFER_INT:
2787                                 if (!usb_endpoint_dir_in(&e->desc))
2788                                         continue;
2789                                 intr = 1;
2790                                 /* FALLTHROUGH */
2791                         case USB_ENDPOINT_XFER_BULK:
2792                                 break;
2793                         default:
2794                                 continue;
2795                         }
2796                         if (usb_endpoint_dir_in(&e->desc)) {
2797                                 if (!intr && !in)
2798                                         in = e;
2799                                 else if (intr && !status)
2800                                         status = e;
2801                         } else {
2802                                 if (!out)
2803                                         out = e;
2804                         }
2805                 }
2806                 if (in && out)
2807                         break;
2808         }
2809         if (!alt || !in || !out)
2810                 return -EINVAL;
2811
2812         dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2813                                        in->desc.bEndpointAddress &
2814                                        USB_ENDPOINT_NUMBER_MASK);
2815         dev->pipe_out = usb_sndbulkpipe(dev->udev,
2816                                         out->desc.bEndpointAddress &
2817                                         USB_ENDPOINT_NUMBER_MASK);
2818         dev->ep_intr = status;
2819
2820         return 0;
2821 }
2822
2823 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2824 {
2825         struct lan78xx_priv *pdata = NULL;
2826         int ret;
2827         int i;
2828
2829         ret = lan78xx_get_endpoints(dev, intf);
2830
2831         dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2832
2833         pdata = (struct lan78xx_priv *)(dev->data[0]);
2834         if (!pdata) {
2835                 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2836                 return -ENOMEM;
2837         }
2838
2839         pdata->dev = dev;
2840
2841         spin_lock_init(&pdata->rfe_ctl_lock);
2842         mutex_init(&pdata->dataport_mutex);
2843
2844         INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2845
2846         for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2847                 pdata->vlan_table[i] = 0;
2848
2849         INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2850
2851         dev->net->features = 0;
2852
2853         if (DEFAULT_TX_CSUM_ENABLE)
2854                 dev->net->features |= NETIF_F_HW_CSUM;
2855
2856         if (DEFAULT_RX_CSUM_ENABLE)
2857                 dev->net->features |= NETIF_F_RXCSUM;
2858
2859         if (DEFAULT_TSO_CSUM_ENABLE)
2860                 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2861
2862         dev->net->hw_features = dev->net->features;
2863
2864         ret = lan78xx_setup_irq_domain(dev);
2865         if (ret < 0) {
2866                 netdev_warn(dev->net,
2867                             "lan78xx_setup_irq_domain() failed : %d", ret);
2868                 goto out1;
2869         }
2870
2871         dev->net->hard_header_len += TX_OVERHEAD;
2872         dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2873
2874         /* Init all registers */
2875         ret = lan78xx_reset(dev);
2876         if (ret) {
2877                 netdev_warn(dev->net, "Registers INIT FAILED....");
2878                 goto out2;
2879         }
2880
2881         ret = lan78xx_mdio_init(dev);
2882         if (ret) {
2883                 netdev_warn(dev->net, "MDIO INIT FAILED.....");
2884                 goto out2;
2885         }
2886
2887         dev->net->flags |= IFF_MULTICAST;
2888
2889         pdata->wol = WAKE_MAGIC;
2890
2891         return ret;
2892
2893 out2:
2894         lan78xx_remove_irq_domain(dev);
2895
2896 out1:
2897         netdev_warn(dev->net, "Bind routine FAILED");
2898         cancel_work_sync(&pdata->set_multicast);
2899         cancel_work_sync(&pdata->set_vlan);
2900         kfree(pdata);
2901         return ret;
2902 }
2903
2904 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2905 {
2906         struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2907
2908         lan78xx_remove_irq_domain(dev);
2909
2910         lan78xx_remove_mdio(dev);
2911
2912         if (pdata) {
2913                 cancel_work_sync(&pdata->set_multicast);
2914                 cancel_work_sync(&pdata->set_vlan);
2915                 netif_dbg(dev, ifdown, dev->net, "free pdata");
2916                 kfree(pdata);
2917                 pdata = NULL;
2918                 dev->data[0] = 0;
2919         }
2920 }
2921
2922 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2923                                     struct sk_buff *skb,
2924                                     u32 rx_cmd_a, u32 rx_cmd_b)
2925 {
2926         if (!(dev->net->features & NETIF_F_RXCSUM) ||
2927             unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2928                 skb->ip_summed = CHECKSUM_NONE;
2929         } else {
2930                 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2931                 skb->ip_summed = CHECKSUM_COMPLETE;
2932         }
2933 }
2934
2935 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2936 {
2937         int             status;
2938
2939         if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2940                 skb_queue_tail(&dev->rxq_pause, skb);
2941                 return;
2942         }
2943
2944         dev->net->stats.rx_packets++;
2945         dev->net->stats.rx_bytes += skb->len;
2946
2947         skb->protocol = eth_type_trans(skb, dev->net);
2948
2949         netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2950                   skb->len + sizeof(struct ethhdr), skb->protocol);
2951         memset(skb->cb, 0, sizeof(struct skb_data));
2952
2953         if (skb_defer_rx_timestamp(skb))
2954                 return;
2955
2956         status = netif_rx(skb);
2957         if (status != NET_RX_SUCCESS)
2958                 netif_dbg(dev, rx_err, dev->net,
2959                           "netif_rx status %d\n", status);
2960 }
2961
2962 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2963 {
2964         if (skb->len < dev->net->hard_header_len)
2965                 return 0;
2966
2967         while (skb->len > 0) {
2968                 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2969                 u16 rx_cmd_c;
2970                 struct sk_buff *skb2;
2971                 unsigned char *packet;
2972
2973                 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2974                 le32_to_cpus(&rx_cmd_a);
2975                 skb_pull(skb, sizeof(rx_cmd_a));
2976
2977                 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2978                 le32_to_cpus(&rx_cmd_b);
2979                 skb_pull(skb, sizeof(rx_cmd_b));
2980
2981                 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2982                 le16_to_cpus(&rx_cmd_c);
2983                 skb_pull(skb, sizeof(rx_cmd_c));
2984
2985                 packet = skb->data;
2986
2987                 /* get the packet length */
2988                 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2989                 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2990
2991                 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2992                         netif_dbg(dev, rx_err, dev->net,
2993                                   "Error rx_cmd_a=0x%08x", rx_cmd_a);
2994                 } else {
2995                         /* last frame in this batch */
2996                         if (skb->len == size) {
2997                                 lan78xx_rx_csum_offload(dev, skb,
2998                                                         rx_cmd_a, rx_cmd_b);
2999
3000                                 skb_trim(skb, skb->len - 4); /* remove fcs */
3001                                 skb->truesize = size + sizeof(struct sk_buff);
3002
3003                                 return 1;
3004                         }
3005
3006                         skb2 = skb_clone(skb, GFP_ATOMIC);
3007                         if (unlikely(!skb2)) {
3008                                 netdev_warn(dev->net, "Error allocating skb");
3009                                 return 0;
3010                         }
3011
3012                         skb2->len = size;
3013                         skb2->data = packet;
3014                         skb_set_tail_pointer(skb2, size);
3015
3016                         lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3017
3018                         skb_trim(skb2, skb2->len - 4); /* remove fcs */
3019                         skb2->truesize = size + sizeof(struct sk_buff);
3020
3021                         lan78xx_skb_return(dev, skb2);
3022                 }
3023
3024                 skb_pull(skb, size);
3025
3026                 /* padding bytes before the next frame starts */
3027                 if (skb->len)
3028                         skb_pull(skb, align_count);
3029         }
3030
3031         return 1;
3032 }
3033
3034 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
3035 {
3036         if (!lan78xx_rx(dev, skb)) {
3037                 dev->net->stats.rx_errors++;
3038                 goto done;
3039         }
3040
3041         if (skb->len) {
3042                 lan78xx_skb_return(dev, skb);
3043                 return;
3044         }
3045
3046         netif_dbg(dev, rx_err, dev->net, "drop\n");
3047         dev->net->stats.rx_errors++;
3048 done:
3049         skb_queue_tail(&dev->done, skb);
3050 }
3051
3052 static void rx_complete(struct urb *urb);
3053
3054 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
3055 {
3056         struct sk_buff *skb;
3057         struct skb_data *entry;
3058         unsigned long lockflags;
3059         size_t size = dev->rx_urb_size;
3060         int ret = 0;
3061
3062         skb = netdev_alloc_skb_ip_align(dev->net, size);
3063         if (!skb) {
3064                 usb_free_urb(urb);
3065                 return -ENOMEM;
3066         }
3067
3068         entry = (struct skb_data *)skb->cb;
3069         entry->urb = urb;
3070         entry->dev = dev;
3071         entry->length = 0;
3072
3073         usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
3074                           skb->data, size, rx_complete, skb);
3075
3076         spin_lock_irqsave(&dev->rxq.lock, lockflags);
3077
3078         if (netif_device_present(dev->net) &&
3079             netif_running(dev->net) &&
3080             !test_bit(EVENT_RX_HALT, &dev->flags) &&
3081             !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3082                 ret = usb_submit_urb(urb, GFP_ATOMIC);
3083                 switch (ret) {
3084                 case 0:
3085                         lan78xx_queue_skb(&dev->rxq, skb, rx_start);
3086                         break;
3087                 case -EPIPE:
3088                         lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3089                         break;
3090                 case -ENODEV:
3091                         netif_dbg(dev, ifdown, dev->net, "device gone\n");
3092                         netif_device_detach(dev->net);
3093                         break;
3094                 case -EHOSTUNREACH:
3095                         ret = -ENOLINK;
3096                         break;
3097                 default:
3098                         netif_dbg(dev, rx_err, dev->net,
3099                                   "rx submit, %d\n", ret);
3100                         tasklet_schedule(&dev->bh);
3101                 }
3102         } else {
3103                 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
3104                 ret = -ENOLINK;
3105         }
3106         spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
3107         if (ret) {
3108                 dev_kfree_skb_any(skb);
3109                 usb_free_urb(urb);
3110         }
3111         return ret;
3112 }
3113
3114 static void rx_complete(struct urb *urb)
3115 {
3116         struct sk_buff  *skb = (struct sk_buff *)urb->context;
3117         struct skb_data *entry = (struct skb_data *)skb->cb;
3118         struct lan78xx_net *dev = entry->dev;
3119         int urb_status = urb->status;
3120         enum skb_state state;
3121
3122         skb_put(skb, urb->actual_length);
3123         state = rx_done;
3124         entry->urb = NULL;
3125
3126         switch (urb_status) {
3127         case 0:
3128                 if (skb->len < dev->net->hard_header_len) {
3129                         state = rx_cleanup;
3130                         dev->net->stats.rx_errors++;
3131                         dev->net->stats.rx_length_errors++;
3132                         netif_dbg(dev, rx_err, dev->net,
3133                                   "rx length %d\n", skb->len);
3134                 }
3135                 usb_mark_last_busy(dev->udev);
3136                 break;
3137         case -EPIPE:
3138                 dev->net->stats.rx_errors++;
3139                 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3140                 /* FALLTHROUGH */
3141         case -ECONNRESET:                               /* async unlink */
3142         case -ESHUTDOWN:                                /* hardware gone */
3143                 netif_dbg(dev, ifdown, dev->net,
3144                           "rx shutdown, code %d\n", urb_status);
3145                 state = rx_cleanup;
3146                 entry->urb = urb;
3147                 urb = NULL;
3148                 break;
3149         case -EPROTO:
3150         case -ETIME:
3151         case -EILSEQ:
3152                 dev->net->stats.rx_errors++;
3153                 state = rx_cleanup;
3154                 entry->urb = urb;
3155                 urb = NULL;
3156                 break;
3157
3158         /* data overrun ... flush fifo? */
3159         case -EOVERFLOW:
3160                 dev->net->stats.rx_over_errors++;
3161                 /* FALLTHROUGH */
3162
3163         default:
3164                 state = rx_cleanup;
3165                 dev->net->stats.rx_errors++;
3166                 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
3167                 break;
3168         }
3169
3170         state = defer_bh(dev, skb, &dev->rxq, state);
3171
3172         if (urb) {
3173                 if (netif_running(dev->net) &&
3174                     !test_bit(EVENT_RX_HALT, &dev->flags) &&
3175                     state != unlink_start) {
3176                         rx_submit(dev, urb, GFP_ATOMIC);
3177                         return;
3178                 }
3179                 usb_free_urb(urb);
3180         }
3181         netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
3182 }
3183
3184 static void lan78xx_tx_bh(struct lan78xx_net *dev)
3185 {
3186         int length;
3187         struct urb *urb = NULL;
3188         struct skb_data *entry;
3189         unsigned long flags;
3190         struct sk_buff_head *tqp = &dev->txq_pend;
3191         struct sk_buff *skb, *skb2;
3192         int ret;
3193         int count, pos;
3194         int skb_totallen, pkt_cnt;
3195
3196         skb_totallen = 0;
3197         pkt_cnt = 0;
3198         count = 0;
3199         length = 0;
3200         for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3201                 if (skb_is_gso(skb)) {
3202                         if (pkt_cnt) {
3203                                 /* handle previous packets first */
3204                                 break;
3205                         }
3206                         count = 1;
3207                         length = skb->len - TX_OVERHEAD;
3208                         skb2 = skb_dequeue(tqp);
3209                         goto gso_skb;
3210                 }
3211
3212                 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
3213                         break;
3214                 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3215                 pkt_cnt++;
3216         }
3217
3218         /* copy to a single skb */
3219         skb = alloc_skb(skb_totallen, GFP_ATOMIC);
3220         if (!skb)
3221                 goto drop;
3222
3223         skb_put(skb, skb_totallen);
3224
3225         for (count = pos = 0; count < pkt_cnt; count++) {
3226                 skb2 = skb_dequeue(tqp);
3227                 if (skb2) {
3228                         length += (skb2->len - TX_OVERHEAD);
3229                         memcpy(skb->data + pos, skb2->data, skb2->len);
3230                         pos += roundup(skb2->len, sizeof(u32));
3231