2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/proc_fs.h>
101 #include <linux/seq_file.h>
102 #include <linux/stat.h>
104 #include <net/pkt_sched.h>
105 #include <net/checksum.h>
106 #include <net/xfrm.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
129 #include <trace/events/napi.h>
130 #include <trace/events/net.h>
131 #include <trace/events/skb.h>
132 #include <linux/pci.h>
133 #include <linux/inetdevice.h>
134 #include <linux/cpu_rmap.h>
135 #include <linux/net_tstamp.h>
136 #include <linux/static_key.h>
137 #include <net/flow_keys.h>
139 #include "net-sysfs.h"
141 /* Instead of increasing this, you should create a hash table. */
142 #define MAX_GRO_SKBS 8
144 /* This should be increased if a protocol with a bigger head is added. */
145 #define GRO_MAX_HEAD (MAX_HEADER + 128)
148 * The list of packet types we will receive (as opposed to discard)
149 * and the routines to invoke.
151 * Why 16. Because with 16 the only overlap we get on a hash of the
152 * low nibble of the protocol value is RARP/SNAP/X.25.
154 * NOTE: That is no longer true with the addition of VLAN tags. Not
155 * sure which should go first, but I bet it won't make much
156 * difference if we are running VLANs. The good news is that
157 * this protocol won't be in the list unless compiled in, so
158 * the average user (w/out VLANs) will not be adversely affected.
175 #define PTYPE_HASH_SIZE (16)
176 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
178 static DEFINE_SPINLOCK(ptype_lock);
179 static DEFINE_SPINLOCK(offload_lock);
180 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
181 static struct list_head ptype_all __read_mostly; /* Taps */
182 static struct list_head offload_base __read_mostly;
185 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
188 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
190 * Writers must hold the rtnl semaphore while they loop through the
191 * dev_base_head list, and hold dev_base_lock for writing when they do the
192 * actual updates. This allows pure readers to access the list even
193 * while a writer is preparing to update it.
195 * To put it another way, dev_base_lock is held for writing only to
196 * protect against pure readers; the rtnl semaphore provides the
197 * protection against other writers.
199 * See, for example usages, register_netdevice() and
200 * unregister_netdevice(), which must be called with the rtnl
203 DEFINE_RWLOCK(dev_base_lock);
204 EXPORT_SYMBOL(dev_base_lock);
206 static inline void dev_base_seq_inc(struct net *net)
208 while (++net->dev_base_seq == 0);
211 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
213 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
215 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
218 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
220 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
223 static inline void rps_lock(struct softnet_data *sd)
226 spin_lock(&sd->input_pkt_queue.lock);
230 static inline void rps_unlock(struct softnet_data *sd)
233 spin_unlock(&sd->input_pkt_queue.lock);
237 /* Device list insertion */
238 static int list_netdevice(struct net_device *dev)
240 struct net *net = dev_net(dev);
244 write_lock_bh(&dev_base_lock);
245 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
246 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
247 hlist_add_head_rcu(&dev->index_hlist,
248 dev_index_hash(net, dev->ifindex));
249 write_unlock_bh(&dev_base_lock);
251 dev_base_seq_inc(net);
256 /* Device list removal
257 * caller must respect a RCU grace period before freeing/reusing dev
259 static void unlist_netdevice(struct net_device *dev)
263 /* Unlink dev from the device chain */
264 write_lock_bh(&dev_base_lock);
265 list_del_rcu(&dev->dev_list);
266 hlist_del_rcu(&dev->name_hlist);
267 hlist_del_rcu(&dev->index_hlist);
268 write_unlock_bh(&dev_base_lock);
270 dev_base_seq_inc(dev_net(dev));
277 static RAW_NOTIFIER_HEAD(netdev_chain);
280 * Device drivers call our routines to queue packets here. We empty the
281 * queue in the local softnet handler.
284 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
285 EXPORT_PER_CPU_SYMBOL(softnet_data);
287 #ifdef CONFIG_LOCKDEP
289 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
290 * according to dev->type
292 static const unsigned short netdev_lock_type[] =
293 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
294 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
295 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
296 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
297 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
298 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
299 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
300 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
301 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
302 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
303 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
304 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
305 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
306 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
307 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
309 static const char *const netdev_lock_name[] =
310 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
311 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
312 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
313 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
314 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
315 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
316 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
317 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
318 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
319 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
320 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
321 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
322 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
323 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
324 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
326 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
327 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
329 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
333 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
334 if (netdev_lock_type[i] == dev_type)
336 /* the last key is used by default */
337 return ARRAY_SIZE(netdev_lock_type) - 1;
340 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
341 unsigned short dev_type)
345 i = netdev_lock_pos(dev_type);
346 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
347 netdev_lock_name[i]);
350 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
354 i = netdev_lock_pos(dev->type);
355 lockdep_set_class_and_name(&dev->addr_list_lock,
356 &netdev_addr_lock_key[i],
357 netdev_lock_name[i]);
360 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
361 unsigned short dev_type)
364 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
369 /*******************************************************************************
371 Protocol management and registration routines
373 *******************************************************************************/
376 * Add a protocol ID to the list. Now that the input handler is
377 * smarter we can dispense with all the messy stuff that used to be
380 * BEWARE!!! Protocol handlers, mangling input packets,
381 * MUST BE last in hash buckets and checking protocol handlers
382 * MUST start from promiscuous ptype_all chain in net_bh.
383 * It is true now, do not change it.
384 * Explanation follows: if protocol handler, mangling packet, will
385 * be the first on list, it is not able to sense, that packet
386 * is cloned and should be copied-on-write, so that it will
387 * change it and subsequent readers will get broken packet.
391 static inline struct list_head *ptype_head(const struct packet_type *pt)
393 if (pt->type == htons(ETH_P_ALL))
396 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
400 * dev_add_pack - add packet handler
401 * @pt: packet type declaration
403 * Add a protocol handler to the networking stack. The passed &packet_type
404 * is linked into kernel lists and may not be freed until it has been
405 * removed from the kernel lists.
407 * This call does not sleep therefore it can not
408 * guarantee all CPU's that are in middle of receiving packets
409 * will see the new packet type (until the next received packet).
412 void dev_add_pack(struct packet_type *pt)
414 struct list_head *head = ptype_head(pt);
416 spin_lock(&ptype_lock);
417 list_add_rcu(&pt->list, head);
418 spin_unlock(&ptype_lock);
420 EXPORT_SYMBOL(dev_add_pack);
423 * __dev_remove_pack - remove packet handler
424 * @pt: packet type declaration
426 * Remove a protocol handler that was previously added to the kernel
427 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
428 * from the kernel lists and can be freed or reused once this function
431 * The packet type might still be in use by receivers
432 * and must not be freed until after all the CPU's have gone
433 * through a quiescent state.
435 void __dev_remove_pack(struct packet_type *pt)
437 struct list_head *head = ptype_head(pt);
438 struct packet_type *pt1;
440 spin_lock(&ptype_lock);
442 list_for_each_entry(pt1, head, list) {
444 list_del_rcu(&pt->list);
449 pr_warn("dev_remove_pack: %p not found\n", pt);
451 spin_unlock(&ptype_lock);
453 EXPORT_SYMBOL(__dev_remove_pack);
456 * dev_remove_pack - remove packet handler
457 * @pt: packet type declaration
459 * Remove a protocol handler that was previously added to the kernel
460 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
461 * from the kernel lists and can be freed or reused once this function
464 * This call sleeps to guarantee that no CPU is looking at the packet
467 void dev_remove_pack(struct packet_type *pt)
469 __dev_remove_pack(pt);
473 EXPORT_SYMBOL(dev_remove_pack);
477 * dev_add_offload - register offload handlers
478 * @po: protocol offload declaration
480 * Add protocol offload handlers to the networking stack. The passed
481 * &proto_offload is linked into kernel lists and may not be freed until
482 * it has been removed from the kernel lists.
484 * This call does not sleep therefore it can not
485 * guarantee all CPU's that are in middle of receiving packets
486 * will see the new offload handlers (until the next received packet).
488 void dev_add_offload(struct packet_offload *po)
490 struct list_head *head = &offload_base;
492 spin_lock(&offload_lock);
493 list_add_rcu(&po->list, head);
494 spin_unlock(&offload_lock);
496 EXPORT_SYMBOL(dev_add_offload);
499 * __dev_remove_offload - remove offload handler
500 * @po: packet offload declaration
502 * Remove a protocol offload handler that was previously added to the
503 * kernel offload handlers by dev_add_offload(). The passed &offload_type
504 * is removed from the kernel lists and can be freed or reused once this
507 * The packet type might still be in use by receivers
508 * and must not be freed until after all the CPU's have gone
509 * through a quiescent state.
511 void __dev_remove_offload(struct packet_offload *po)
513 struct list_head *head = &offload_base;
514 struct packet_offload *po1;
516 spin_lock(&offload_lock);
518 list_for_each_entry(po1, head, list) {
520 list_del_rcu(&po->list);
525 pr_warn("dev_remove_offload: %p not found\n", po);
527 spin_unlock(&offload_lock);
529 EXPORT_SYMBOL(__dev_remove_offload);
532 * dev_remove_offload - remove packet offload handler
533 * @po: packet offload declaration
535 * Remove a packet offload handler that was previously added to the kernel
536 * offload handlers by dev_add_offload(). The passed &offload_type is
537 * removed from the kernel lists and can be freed or reused once this
540 * This call sleeps to guarantee that no CPU is looking at the packet
543 void dev_remove_offload(struct packet_offload *po)
545 __dev_remove_offload(po);
549 EXPORT_SYMBOL(dev_remove_offload);
551 /******************************************************************************
553 Device Boot-time Settings Routines
555 *******************************************************************************/
557 /* Boot time configuration table */
558 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
561 * netdev_boot_setup_add - add new setup entry
562 * @name: name of the device
563 * @map: configured settings for the device
565 * Adds new setup entry to the dev_boot_setup list. The function
566 * returns 0 on error and 1 on success. This is a generic routine to
569 static int netdev_boot_setup_add(char *name, struct ifmap *map)
571 struct netdev_boot_setup *s;
575 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
576 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
577 memset(s[i].name, 0, sizeof(s[i].name));
578 strlcpy(s[i].name, name, IFNAMSIZ);
579 memcpy(&s[i].map, map, sizeof(s[i].map));
584 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
588 * netdev_boot_setup_check - check boot time settings
589 * @dev: the netdevice
591 * Check boot time settings for the device.
592 * The found settings are set for the device to be used
593 * later in the device probing.
594 * Returns 0 if no settings found, 1 if they are.
596 int netdev_boot_setup_check(struct net_device *dev)
598 struct netdev_boot_setup *s = dev_boot_setup;
601 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
602 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
603 !strcmp(dev->name, s[i].name)) {
604 dev->irq = s[i].map.irq;
605 dev->base_addr = s[i].map.base_addr;
606 dev->mem_start = s[i].map.mem_start;
607 dev->mem_end = s[i].map.mem_end;
613 EXPORT_SYMBOL(netdev_boot_setup_check);
617 * netdev_boot_base - get address from boot time settings
618 * @prefix: prefix for network device
619 * @unit: id for network device
621 * Check boot time settings for the base address of device.
622 * The found settings are set for the device to be used
623 * later in the device probing.
624 * Returns 0 if no settings found.
626 unsigned long netdev_boot_base(const char *prefix, int unit)
628 const struct netdev_boot_setup *s = dev_boot_setup;
632 sprintf(name, "%s%d", prefix, unit);
635 * If device already registered then return base of 1
636 * to indicate not to probe for this interface
638 if (__dev_get_by_name(&init_net, name))
641 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
642 if (!strcmp(name, s[i].name))
643 return s[i].map.base_addr;
648 * Saves at boot time configured settings for any netdevice.
650 int __init netdev_boot_setup(char *str)
655 str = get_options(str, ARRAY_SIZE(ints), ints);
660 memset(&map, 0, sizeof(map));
664 map.base_addr = ints[2];
666 map.mem_start = ints[3];
668 map.mem_end = ints[4];
670 /* Add new entry to the list */
671 return netdev_boot_setup_add(str, &map);
674 __setup("netdev=", netdev_boot_setup);
676 /*******************************************************************************
678 Device Interface Subroutines
680 *******************************************************************************/
683 * __dev_get_by_name - find a device by its name
684 * @net: the applicable net namespace
685 * @name: name to find
687 * Find an interface by name. Must be called under RTNL semaphore
688 * or @dev_base_lock. If the name is found a pointer to the device
689 * is returned. If the name is not found then %NULL is returned. The
690 * reference counters are not incremented so the caller must be
691 * careful with locks.
694 struct net_device *__dev_get_by_name(struct net *net, const char *name)
696 struct hlist_node *p;
697 struct net_device *dev;
698 struct hlist_head *head = dev_name_hash(net, name);
700 hlist_for_each_entry(dev, p, head, name_hlist)
701 if (!strncmp(dev->name, name, IFNAMSIZ))
706 EXPORT_SYMBOL(__dev_get_by_name);
709 * dev_get_by_name_rcu - find a device by its name
710 * @net: the applicable net namespace
711 * @name: name to find
713 * Find an interface by name.
714 * If the name is found a pointer to the device is returned.
715 * If the name is not found then %NULL is returned.
716 * The reference counters are not incremented so the caller must be
717 * careful with locks. The caller must hold RCU lock.
720 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
722 struct hlist_node *p;
723 struct net_device *dev;
724 struct hlist_head *head = dev_name_hash(net, name);
726 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
727 if (!strncmp(dev->name, name, IFNAMSIZ))
732 EXPORT_SYMBOL(dev_get_by_name_rcu);
735 * dev_get_by_name - find a device by its name
736 * @net: the applicable net namespace
737 * @name: name to find
739 * Find an interface by name. This can be called from any
740 * context and does its own locking. The returned handle has
741 * the usage count incremented and the caller must use dev_put() to
742 * release it when it is no longer needed. %NULL is returned if no
743 * matching device is found.
746 struct net_device *dev_get_by_name(struct net *net, const char *name)
748 struct net_device *dev;
751 dev = dev_get_by_name_rcu(net, name);
757 EXPORT_SYMBOL(dev_get_by_name);
760 * __dev_get_by_index - find a device by its ifindex
761 * @net: the applicable net namespace
762 * @ifindex: index of device
764 * Search for an interface by index. Returns %NULL if the device
765 * is not found or a pointer to the device. The device has not
766 * had its reference counter increased so the caller must be careful
767 * about locking. The caller must hold either the RTNL semaphore
771 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
773 struct hlist_node *p;
774 struct net_device *dev;
775 struct hlist_head *head = dev_index_hash(net, ifindex);
777 hlist_for_each_entry(dev, p, head, index_hlist)
778 if (dev->ifindex == ifindex)
783 EXPORT_SYMBOL(__dev_get_by_index);
786 * dev_get_by_index_rcu - find a device by its ifindex
787 * @net: the applicable net namespace
788 * @ifindex: index of device
790 * Search for an interface by index. Returns %NULL if the device
791 * is not found or a pointer to the device. The device has not
792 * had its reference counter increased so the caller must be careful
793 * about locking. The caller must hold RCU lock.
796 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
798 struct hlist_node *p;
799 struct net_device *dev;
800 struct hlist_head *head = dev_index_hash(net, ifindex);
802 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
803 if (dev->ifindex == ifindex)
808 EXPORT_SYMBOL(dev_get_by_index_rcu);
812 * dev_get_by_index - find a device by its ifindex
813 * @net: the applicable net namespace
814 * @ifindex: index of device
816 * Search for an interface by index. Returns NULL if the device
817 * is not found or a pointer to the device. The device returned has
818 * had a reference added and the pointer is safe until the user calls
819 * dev_put to indicate they have finished with it.
822 struct net_device *dev_get_by_index(struct net *net, int ifindex)
824 struct net_device *dev;
827 dev = dev_get_by_index_rcu(net, ifindex);
833 EXPORT_SYMBOL(dev_get_by_index);
836 * dev_getbyhwaddr_rcu - find a device by its hardware address
837 * @net: the applicable net namespace
838 * @type: media type of device
839 * @ha: hardware address
841 * Search for an interface by MAC address. Returns NULL if the device
842 * is not found or a pointer to the device.
843 * The caller must hold RCU or RTNL.
844 * The returned device has not had its ref count increased
845 * and the caller must therefore be careful about locking
849 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
852 struct net_device *dev;
854 for_each_netdev_rcu(net, dev)
855 if (dev->type == type &&
856 !memcmp(dev->dev_addr, ha, dev->addr_len))
861 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
863 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
865 struct net_device *dev;
868 for_each_netdev(net, dev)
869 if (dev->type == type)
874 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
876 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
878 struct net_device *dev, *ret = NULL;
881 for_each_netdev_rcu(net, dev)
882 if (dev->type == type) {
890 EXPORT_SYMBOL(dev_getfirstbyhwtype);
893 * dev_get_by_flags_rcu - find any device with given flags
894 * @net: the applicable net namespace
895 * @if_flags: IFF_* values
896 * @mask: bitmask of bits in if_flags to check
898 * Search for any interface with the given flags. Returns NULL if a device
899 * is not found or a pointer to the device. Must be called inside
900 * rcu_read_lock(), and result refcount is unchanged.
903 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
906 struct net_device *dev, *ret;
909 for_each_netdev_rcu(net, dev) {
910 if (((dev->flags ^ if_flags) & mask) == 0) {
917 EXPORT_SYMBOL(dev_get_by_flags_rcu);
920 * dev_valid_name - check if name is okay for network device
923 * Network device names need to be valid file names to
924 * to allow sysfs to work. We also disallow any kind of
927 bool dev_valid_name(const char *name)
931 if (strlen(name) >= IFNAMSIZ)
933 if (!strcmp(name, ".") || !strcmp(name, ".."))
937 if (*name == '/' || isspace(*name))
943 EXPORT_SYMBOL(dev_valid_name);
946 * __dev_alloc_name - allocate a name for a device
947 * @net: network namespace to allocate the device name in
948 * @name: name format string
949 * @buf: scratch buffer and result name string
951 * Passed a format string - eg "lt%d" it will try and find a suitable
952 * id. It scans list of devices to build up a free map, then chooses
953 * the first empty slot. The caller must hold the dev_base or rtnl lock
954 * while allocating the name and adding the device in order to avoid
956 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
957 * Returns the number of the unit assigned or a negative errno code.
960 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
964 const int max_netdevices = 8*PAGE_SIZE;
965 unsigned long *inuse;
966 struct net_device *d;
968 p = strnchr(name, IFNAMSIZ-1, '%');
971 * Verify the string as this thing may have come from
972 * the user. There must be either one "%d" and no other "%"
975 if (p[1] != 'd' || strchr(p + 2, '%'))
978 /* Use one page as a bit array of possible slots */
979 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
983 for_each_netdev(net, d) {
984 if (!sscanf(d->name, name, &i))
986 if (i < 0 || i >= max_netdevices)
989 /* avoid cases where sscanf is not exact inverse of printf */
990 snprintf(buf, IFNAMSIZ, name, i);
991 if (!strncmp(buf, d->name, IFNAMSIZ))
995 i = find_first_zero_bit(inuse, max_netdevices);
996 free_page((unsigned long) inuse);
1000 snprintf(buf, IFNAMSIZ, name, i);
1001 if (!__dev_get_by_name(net, buf))
1004 /* It is possible to run out of possible slots
1005 * when the name is long and there isn't enough space left
1006 * for the digits, or if all bits are used.
1012 * dev_alloc_name - allocate a name for a device
1014 * @name: name format string
1016 * Passed a format string - eg "lt%d" it will try and find a suitable
1017 * id. It scans list of devices to build up a free map, then chooses
1018 * the first empty slot. The caller must hold the dev_base or rtnl lock
1019 * while allocating the name and adding the device in order to avoid
1021 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1022 * Returns the number of the unit assigned or a negative errno code.
1025 int dev_alloc_name(struct net_device *dev, const char *name)
1031 BUG_ON(!dev_net(dev));
1033 ret = __dev_alloc_name(net, name, buf);
1035 strlcpy(dev->name, buf, IFNAMSIZ);
1038 EXPORT_SYMBOL(dev_alloc_name);
1040 static int dev_alloc_name_ns(struct net *net,
1041 struct net_device *dev,
1047 ret = __dev_alloc_name(net, name, buf);
1049 strlcpy(dev->name, buf, IFNAMSIZ);
1053 static int dev_get_valid_name(struct net *net,
1054 struct net_device *dev,
1059 if (!dev_valid_name(name))
1062 if (strchr(name, '%'))
1063 return dev_alloc_name_ns(net, dev, name);
1064 else if (__dev_get_by_name(net, name))
1066 else if (dev->name != name)
1067 strlcpy(dev->name, name, IFNAMSIZ);
1073 * dev_change_name - change name of a device
1075 * @newname: name (or format string) must be at least IFNAMSIZ
1077 * Change name of a device, can pass format strings "eth%d".
1080 int dev_change_name(struct net_device *dev, const char *newname)
1082 char oldname[IFNAMSIZ];
1088 BUG_ON(!dev_net(dev));
1091 if (dev->flags & IFF_UP)
1094 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
1097 memcpy(oldname, dev->name, IFNAMSIZ);
1099 err = dev_get_valid_name(net, dev, newname);
1104 ret = device_rename(&dev->dev, dev->name);
1106 memcpy(dev->name, oldname, IFNAMSIZ);
1110 write_lock_bh(&dev_base_lock);
1111 hlist_del_rcu(&dev->name_hlist);
1112 write_unlock_bh(&dev_base_lock);
1116 write_lock_bh(&dev_base_lock);
1117 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1118 write_unlock_bh(&dev_base_lock);
1120 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1121 ret = notifier_to_errno(ret);
1124 /* err >= 0 after dev_alloc_name() or stores the first errno */
1127 memcpy(dev->name, oldname, IFNAMSIZ);
1130 pr_err("%s: name change rollback failed: %d\n",
1139 * dev_set_alias - change ifalias of a device
1141 * @alias: name up to IFALIASZ
1142 * @len: limit of bytes to copy from info
1144 * Set ifalias for a device,
1146 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1152 if (len >= IFALIASZ)
1157 kfree(dev->ifalias);
1158 dev->ifalias = NULL;
1163 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1166 dev->ifalias = new_ifalias;
1168 strlcpy(dev->ifalias, alias, len+1);
1174 * netdev_features_change - device changes features
1175 * @dev: device to cause notification
1177 * Called to indicate a device has changed features.
1179 void netdev_features_change(struct net_device *dev)
1181 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1183 EXPORT_SYMBOL(netdev_features_change);
1186 * netdev_state_change - device changes state
1187 * @dev: device to cause notification
1189 * Called to indicate a device has changed state. This function calls
1190 * the notifier chains for netdev_chain and sends a NEWLINK message
1191 * to the routing socket.
1193 void netdev_state_change(struct net_device *dev)
1195 if (dev->flags & IFF_UP) {
1196 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1197 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1200 EXPORT_SYMBOL(netdev_state_change);
1203 * netdev_notify_peers - notify network peers about existence of @dev
1204 * @dev: network device
1206 * Generate traffic such that interested network peers are aware of
1207 * @dev, such as by generating a gratuitous ARP. This may be used when
1208 * a device wants to inform the rest of the network about some sort of
1209 * reconfiguration such as a failover event or virtual machine
1212 void netdev_notify_peers(struct net_device *dev)
1215 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1218 EXPORT_SYMBOL(netdev_notify_peers);
1221 * dev_load - load a network module
1222 * @net: the applicable net namespace
1223 * @name: name of interface
1225 * If a network interface is not present and the process has suitable
1226 * privileges this function loads the module. If module loading is not
1227 * available in this kernel then it becomes a nop.
1230 void dev_load(struct net *net, const char *name)
1232 struct net_device *dev;
1236 dev = dev_get_by_name_rcu(net, name);
1240 if (no_module && capable(CAP_NET_ADMIN))
1241 no_module = request_module("netdev-%s", name);
1242 if (no_module && capable(CAP_SYS_MODULE)) {
1243 if (!request_module("%s", name))
1244 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1248 EXPORT_SYMBOL(dev_load);
1250 static int __dev_open(struct net_device *dev)
1252 const struct net_device_ops *ops = dev->netdev_ops;
1257 if (!netif_device_present(dev))
1260 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1261 ret = notifier_to_errno(ret);
1265 set_bit(__LINK_STATE_START, &dev->state);
1267 if (ops->ndo_validate_addr)
1268 ret = ops->ndo_validate_addr(dev);
1270 if (!ret && ops->ndo_open)
1271 ret = ops->ndo_open(dev);
1274 clear_bit(__LINK_STATE_START, &dev->state);
1276 dev->flags |= IFF_UP;
1277 net_dmaengine_get();
1278 dev_set_rx_mode(dev);
1280 add_device_randomness(dev->dev_addr, dev->addr_len);
1287 * dev_open - prepare an interface for use.
1288 * @dev: device to open
1290 * Takes a device from down to up state. The device's private open
1291 * function is invoked and then the multicast lists are loaded. Finally
1292 * the device is moved into the up state and a %NETDEV_UP message is
1293 * sent to the netdev notifier chain.
1295 * Calling this function on an active interface is a nop. On a failure
1296 * a negative errno code is returned.
1298 int dev_open(struct net_device *dev)
1302 if (dev->flags & IFF_UP)
1305 ret = __dev_open(dev);
1309 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1310 call_netdevice_notifiers(NETDEV_UP, dev);
1314 EXPORT_SYMBOL(dev_open);
1316 static int __dev_close_many(struct list_head *head)
1318 struct net_device *dev;
1323 list_for_each_entry(dev, head, unreg_list) {
1324 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1326 clear_bit(__LINK_STATE_START, &dev->state);
1328 /* Synchronize to scheduled poll. We cannot touch poll list, it
1329 * can be even on different cpu. So just clear netif_running().
1331 * dev->stop() will invoke napi_disable() on all of it's
1332 * napi_struct instances on this device.
1334 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1337 dev_deactivate_many(head);
1339 list_for_each_entry(dev, head, unreg_list) {
1340 const struct net_device_ops *ops = dev->netdev_ops;
1343 * Call the device specific close. This cannot fail.
1344 * Only if device is UP
1346 * We allow it to be called even after a DETACH hot-plug
1352 dev->flags &= ~IFF_UP;
1353 net_dmaengine_put();
1359 static int __dev_close(struct net_device *dev)
1364 list_add(&dev->unreg_list, &single);
1365 retval = __dev_close_many(&single);
1370 static int dev_close_many(struct list_head *head)
1372 struct net_device *dev, *tmp;
1373 LIST_HEAD(tmp_list);
1375 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1376 if (!(dev->flags & IFF_UP))
1377 list_move(&dev->unreg_list, &tmp_list);
1379 __dev_close_many(head);
1381 list_for_each_entry(dev, head, unreg_list) {
1382 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1383 call_netdevice_notifiers(NETDEV_DOWN, dev);
1386 /* rollback_registered_many needs the complete original list */
1387 list_splice(&tmp_list, head);
1392 * dev_close - shutdown an interface.
1393 * @dev: device to shutdown
1395 * This function moves an active device into down state. A
1396 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1397 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1400 int dev_close(struct net_device *dev)
1402 if (dev->flags & IFF_UP) {
1405 list_add(&dev->unreg_list, &single);
1406 dev_close_many(&single);
1411 EXPORT_SYMBOL(dev_close);
1415 * dev_disable_lro - disable Large Receive Offload on a device
1418 * Disable Large Receive Offload (LRO) on a net device. Must be
1419 * called under RTNL. This is needed if received packets may be
1420 * forwarded to another interface.
1422 void dev_disable_lro(struct net_device *dev)
1425 * If we're trying to disable lro on a vlan device
1426 * use the underlying physical device instead
1428 if (is_vlan_dev(dev))
1429 dev = vlan_dev_real_dev(dev);
1431 dev->wanted_features &= ~NETIF_F_LRO;
1432 netdev_update_features(dev);
1434 if (unlikely(dev->features & NETIF_F_LRO))
1435 netdev_WARN(dev, "failed to disable LRO!\n");
1437 EXPORT_SYMBOL(dev_disable_lro);
1440 static int dev_boot_phase = 1;
1443 * register_netdevice_notifier - register a network notifier block
1446 * Register a notifier to be called when network device events occur.
1447 * The notifier passed is linked into the kernel structures and must
1448 * not be reused until it has been unregistered. A negative errno code
1449 * is returned on a failure.
1451 * When registered all registration and up events are replayed
1452 * to the new notifier to allow device to have a race free
1453 * view of the network device list.
1456 int register_netdevice_notifier(struct notifier_block *nb)
1458 struct net_device *dev;
1459 struct net_device *last;
1464 err = raw_notifier_chain_register(&netdev_chain, nb);
1470 for_each_netdev(net, dev) {
1471 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1472 err = notifier_to_errno(err);
1476 if (!(dev->flags & IFF_UP))
1479 nb->notifier_call(nb, NETDEV_UP, dev);
1490 for_each_netdev(net, dev) {
1494 if (dev->flags & IFF_UP) {
1495 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1496 nb->notifier_call(nb, NETDEV_DOWN, dev);
1498 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1503 raw_notifier_chain_unregister(&netdev_chain, nb);
1506 EXPORT_SYMBOL(register_netdevice_notifier);
1509 * unregister_netdevice_notifier - unregister a network notifier block
1512 * Unregister a notifier previously registered by
1513 * register_netdevice_notifier(). The notifier is unlinked into the
1514 * kernel structures and may then be reused. A negative errno code
1515 * is returned on a failure.
1517 * After unregistering unregister and down device events are synthesized
1518 * for all devices on the device list to the removed notifier to remove
1519 * the need for special case cleanup code.
1522 int unregister_netdevice_notifier(struct notifier_block *nb)
1524 struct net_device *dev;
1529 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1534 for_each_netdev(net, dev) {
1535 if (dev->flags & IFF_UP) {
1536 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1537 nb->notifier_call(nb, NETDEV_DOWN, dev);
1539 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1546 EXPORT_SYMBOL(unregister_netdevice_notifier);
1549 * call_netdevice_notifiers - call all network notifier blocks
1550 * @val: value passed unmodified to notifier function
1551 * @dev: net_device pointer passed unmodified to notifier function
1553 * Call all network notifier blocks. Parameters and return value
1554 * are as for raw_notifier_call_chain().
1557 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1560 return raw_notifier_call_chain(&netdev_chain, val, dev);
1562 EXPORT_SYMBOL(call_netdevice_notifiers);
1564 static struct static_key netstamp_needed __read_mostly;
1565 #ifdef HAVE_JUMP_LABEL
1566 /* We are not allowed to call static_key_slow_dec() from irq context
1567 * If net_disable_timestamp() is called from irq context, defer the
1568 * static_key_slow_dec() calls.
1570 static atomic_t netstamp_needed_deferred;
1573 void net_enable_timestamp(void)
1575 #ifdef HAVE_JUMP_LABEL
1576 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1580 static_key_slow_dec(&netstamp_needed);
1584 WARN_ON(in_interrupt());
1585 static_key_slow_inc(&netstamp_needed);
1587 EXPORT_SYMBOL(net_enable_timestamp);
1589 void net_disable_timestamp(void)
1591 #ifdef HAVE_JUMP_LABEL
1592 if (in_interrupt()) {
1593 atomic_inc(&netstamp_needed_deferred);
1597 static_key_slow_dec(&netstamp_needed);
1599 EXPORT_SYMBOL(net_disable_timestamp);
1601 static inline void net_timestamp_set(struct sk_buff *skb)
1603 skb->tstamp.tv64 = 0;
1604 if (static_key_false(&netstamp_needed))
1605 __net_timestamp(skb);
1608 #define net_timestamp_check(COND, SKB) \
1609 if (static_key_false(&netstamp_needed)) { \
1610 if ((COND) && !(SKB)->tstamp.tv64) \
1611 __net_timestamp(SKB); \
1614 static int net_hwtstamp_validate(struct ifreq *ifr)
1616 struct hwtstamp_config cfg;
1617 enum hwtstamp_tx_types tx_type;
1618 enum hwtstamp_rx_filters rx_filter;
1619 int tx_type_valid = 0;
1620 int rx_filter_valid = 0;
1622 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1625 if (cfg.flags) /* reserved for future extensions */
1628 tx_type = cfg.tx_type;
1629 rx_filter = cfg.rx_filter;
1632 case HWTSTAMP_TX_OFF:
1633 case HWTSTAMP_TX_ON:
1634 case HWTSTAMP_TX_ONESTEP_SYNC:
1639 switch (rx_filter) {
1640 case HWTSTAMP_FILTER_NONE:
1641 case HWTSTAMP_FILTER_ALL:
1642 case HWTSTAMP_FILTER_SOME:
1643 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1644 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1645 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1646 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1647 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1648 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1649 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1650 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1651 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1652 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1653 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1654 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1655 rx_filter_valid = 1;
1659 if (!tx_type_valid || !rx_filter_valid)
1665 static inline bool is_skb_forwardable(struct net_device *dev,
1666 struct sk_buff *skb)
1670 if (!(dev->flags & IFF_UP))
1673 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1674 if (skb->len <= len)
1677 /* if TSO is enabled, we don't care about the length as the packet
1678 * could be forwarded without being segmented before
1680 if (skb_is_gso(skb))
1687 * dev_forward_skb - loopback an skb to another netif
1689 * @dev: destination network device
1690 * @skb: buffer to forward
1693 * NET_RX_SUCCESS (no congestion)
1694 * NET_RX_DROP (packet was dropped, but freed)
1696 * dev_forward_skb can be used for injecting an skb from the
1697 * start_xmit function of one device into the receive queue
1698 * of another device.
1700 * The receiving device may be in another namespace, so
1701 * we have to clear all information in the skb that could
1702 * impact namespace isolation.
1704 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1706 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1707 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1708 atomic_long_inc(&dev->rx_dropped);
1717 if (unlikely(!is_skb_forwardable(dev, skb))) {
1718 atomic_long_inc(&dev->rx_dropped);
1725 skb->tstamp.tv64 = 0;
1726 skb->pkt_type = PACKET_HOST;
1727 skb->protocol = eth_type_trans(skb, dev);
1731 return netif_rx(skb);
1733 EXPORT_SYMBOL_GPL(dev_forward_skb);
1735 static inline int deliver_skb(struct sk_buff *skb,
1736 struct packet_type *pt_prev,
1737 struct net_device *orig_dev)
1739 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1741 atomic_inc(&skb->users);
1742 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1745 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1747 if (!ptype->af_packet_priv || !skb->sk)
1750 if (ptype->id_match)
1751 return ptype->id_match(ptype, skb->sk);
1752 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1759 * Support routine. Sends outgoing frames to any network
1760 * taps currently in use.
1763 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1765 struct packet_type *ptype;
1766 struct sk_buff *skb2 = NULL;
1767 struct packet_type *pt_prev = NULL;
1770 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1771 /* Never send packets back to the socket
1772 * they originated from - MvS (miquels@drinkel.ow.org)
1774 if ((ptype->dev == dev || !ptype->dev) &&
1775 (!skb_loop_sk(ptype, skb))) {
1777 deliver_skb(skb2, pt_prev, skb->dev);
1782 skb2 = skb_clone(skb, GFP_ATOMIC);
1786 net_timestamp_set(skb2);
1788 /* skb->nh should be correctly
1789 set by sender, so that the second statement is
1790 just protection against buggy protocols.
1792 skb_reset_mac_header(skb2);
1794 if (skb_network_header(skb2) < skb2->data ||
1795 skb2->network_header > skb2->tail) {
1796 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1797 ntohs(skb2->protocol),
1799 skb_reset_network_header(skb2);
1802 skb2->transport_header = skb2->network_header;
1803 skb2->pkt_type = PACKET_OUTGOING;
1808 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1813 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1814 * @dev: Network device
1815 * @txq: number of queues available
1817 * If real_num_tx_queues is changed the tc mappings may no longer be
1818 * valid. To resolve this verify the tc mapping remains valid and if
1819 * not NULL the mapping. With no priorities mapping to this
1820 * offset/count pair it will no longer be used. In the worst case TC0
1821 * is invalid nothing can be done so disable priority mappings. If is
1822 * expected that drivers will fix this mapping if they can before
1823 * calling netif_set_real_num_tx_queues.
1825 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1828 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1830 /* If TC0 is invalidated disable TC mapping */
1831 if (tc->offset + tc->count > txq) {
1832 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1837 /* Invalidated prio to tc mappings set to TC0 */
1838 for (i = 1; i < TC_BITMASK + 1; i++) {
1839 int q = netdev_get_prio_tc_map(dev, i);
1841 tc = &dev->tc_to_txq[q];
1842 if (tc->offset + tc->count > txq) {
1843 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1845 netdev_set_prio_tc_map(dev, i, 0);
1851 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1852 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1854 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1858 if (txq < 1 || txq > dev->num_tx_queues)
1861 if (dev->reg_state == NETREG_REGISTERED ||
1862 dev->reg_state == NETREG_UNREGISTERING) {
1865 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1871 netif_setup_tc(dev, txq);
1873 if (txq < dev->real_num_tx_queues)
1874 qdisc_reset_all_tx_gt(dev, txq);
1877 dev->real_num_tx_queues = txq;
1880 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1884 * netif_set_real_num_rx_queues - set actual number of RX queues used
1885 * @dev: Network device
1886 * @rxq: Actual number of RX queues
1888 * This must be called either with the rtnl_lock held or before
1889 * registration of the net device. Returns 0 on success, or a
1890 * negative error code. If called before registration, it always
1893 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1897 if (rxq < 1 || rxq > dev->num_rx_queues)
1900 if (dev->reg_state == NETREG_REGISTERED) {
1903 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1909 dev->real_num_rx_queues = rxq;
1912 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1916 * netif_get_num_default_rss_queues - default number of RSS queues
1918 * This routine should set an upper limit on the number of RSS queues
1919 * used by default by multiqueue devices.
1921 int netif_get_num_default_rss_queues(void)
1923 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
1925 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
1927 static inline void __netif_reschedule(struct Qdisc *q)
1929 struct softnet_data *sd;
1930 unsigned long flags;
1932 local_irq_save(flags);
1933 sd = &__get_cpu_var(softnet_data);
1934 q->next_sched = NULL;
1935 *sd->output_queue_tailp = q;
1936 sd->output_queue_tailp = &q->next_sched;
1937 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1938 local_irq_restore(flags);
1941 void __netif_schedule(struct Qdisc *q)
1943 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1944 __netif_reschedule(q);
1946 EXPORT_SYMBOL(__netif_schedule);
1948 void dev_kfree_skb_irq(struct sk_buff *skb)
1950 if (atomic_dec_and_test(&skb->users)) {
1951 struct softnet_data *sd;
1952 unsigned long flags;
1954 local_irq_save(flags);
1955 sd = &__get_cpu_var(softnet_data);
1956 skb->next = sd->completion_queue;
1957 sd->completion_queue = skb;
1958 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1959 local_irq_restore(flags);
1962 EXPORT_SYMBOL(dev_kfree_skb_irq);
1964 void dev_kfree_skb_any(struct sk_buff *skb)
1966 if (in_irq() || irqs_disabled())
1967 dev_kfree_skb_irq(skb);
1971 EXPORT_SYMBOL(dev_kfree_skb_any);
1975 * netif_device_detach - mark device as removed
1976 * @dev: network device
1978 * Mark device as removed from system and therefore no longer available.
1980 void netif_device_detach(struct net_device *dev)
1982 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1983 netif_running(dev)) {
1984 netif_tx_stop_all_queues(dev);
1987 EXPORT_SYMBOL(netif_device_detach);
1990 * netif_device_attach - mark device as attached
1991 * @dev: network device
1993 * Mark device as attached from system and restart if needed.
1995 void netif_device_attach(struct net_device *dev)
1997 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1998 netif_running(dev)) {
1999 netif_tx_wake_all_queues(dev);
2000 __netdev_watchdog_up(dev);
2003 EXPORT_SYMBOL(netif_device_attach);
2005 static void skb_warn_bad_offload(const struct sk_buff *skb)
2007 static const netdev_features_t null_features = 0;
2008 struct net_device *dev = skb->dev;
2009 const char *driver = "";
2011 if (dev && dev->dev.parent)
2012 driver = dev_driver_string(dev->dev.parent);
2014 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2015 "gso_type=%d ip_summed=%d\n",
2016 driver, dev ? &dev->features : &null_features,
2017 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2018 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2019 skb_shinfo(skb)->gso_type, skb->ip_summed);
2023 * Invalidate hardware checksum when packet is to be mangled, and
2024 * complete checksum manually on outgoing path.
2026 int skb_checksum_help(struct sk_buff *skb)
2029 int ret = 0, offset;
2031 if (skb->ip_summed == CHECKSUM_COMPLETE)
2032 goto out_set_summed;
2034 if (unlikely(skb_shinfo(skb)->gso_size)) {
2035 skb_warn_bad_offload(skb);
2039 offset = skb_checksum_start_offset(skb);
2040 BUG_ON(offset >= skb_headlen(skb));
2041 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2043 offset += skb->csum_offset;
2044 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2046 if (skb_cloned(skb) &&
2047 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2048 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2053 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2055 skb->ip_summed = CHECKSUM_NONE;
2059 EXPORT_SYMBOL(skb_checksum_help);
2062 * skb_gso_segment - Perform segmentation on skb.
2063 * @skb: buffer to segment
2064 * @features: features for the output path (see dev->features)
2066 * This function segments the given skb and returns a list of segments.
2068 * It may return NULL if the skb requires no segmentation. This is
2069 * only possible when GSO is used for verifying header integrity.
2071 struct sk_buff *skb_gso_segment(struct sk_buff *skb,
2072 netdev_features_t features)
2074 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2075 struct packet_offload *ptype;
2076 __be16 type = skb->protocol;
2077 int vlan_depth = ETH_HLEN;
2080 while (type == htons(ETH_P_8021Q)) {
2081 struct vlan_hdr *vh;
2083 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2084 return ERR_PTR(-EINVAL);
2086 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2087 type = vh->h_vlan_encapsulated_proto;
2088 vlan_depth += VLAN_HLEN;
2091 skb_reset_mac_header(skb);
2092 skb->mac_len = skb->network_header - skb->mac_header;
2093 __skb_pull(skb, skb->mac_len);
2095 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2096 skb_warn_bad_offload(skb);
2098 if (skb_header_cloned(skb) &&
2099 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2100 return ERR_PTR(err);
2104 list_for_each_entry_rcu(ptype, &offload_base, list) {
2105 if (ptype->type == type && ptype->callbacks.gso_segment) {
2106 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2107 err = ptype->callbacks.gso_send_check(skb);
2108 segs = ERR_PTR(err);
2109 if (err || skb_gso_ok(skb, features))
2111 __skb_push(skb, (skb->data -
2112 skb_network_header(skb)));
2114 segs = ptype->callbacks.gso_segment(skb, features);
2120 __skb_push(skb, skb->data - skb_mac_header(skb));
2124 EXPORT_SYMBOL(skb_gso_segment);
2126 /* Take action when hardware reception checksum errors are detected. */
2128 void netdev_rx_csum_fault(struct net_device *dev)
2130 if (net_ratelimit()) {
2131 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2135 EXPORT_SYMBOL(netdev_rx_csum_fault);
2138 /* Actually, we should eliminate this check as soon as we know, that:
2139 * 1. IOMMU is present and allows to map all the memory.
2140 * 2. No high memory really exists on this machine.
2143 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2145 #ifdef CONFIG_HIGHMEM
2147 if (!(dev->features & NETIF_F_HIGHDMA)) {
2148 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2149 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2150 if (PageHighMem(skb_frag_page(frag)))
2155 if (PCI_DMA_BUS_IS_PHYS) {
2156 struct device *pdev = dev->dev.parent;
2160 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2161 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2162 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2163 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2172 void (*destructor)(struct sk_buff *skb);
2175 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2177 static void dev_gso_skb_destructor(struct sk_buff *skb)
2179 struct dev_gso_cb *cb;
2182 struct sk_buff *nskb = skb->next;
2184 skb->next = nskb->next;
2187 } while (skb->next);
2189 cb = DEV_GSO_CB(skb);
2191 cb->destructor(skb);
2195 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2196 * @skb: buffer to segment
2197 * @features: device features as applicable to this skb
2199 * This function segments the given skb and stores the list of segments
2202 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2204 struct sk_buff *segs;
2206 segs = skb_gso_segment(skb, features);
2208 /* Verifying header integrity only. */
2213 return PTR_ERR(segs);
2216 DEV_GSO_CB(skb)->destructor = skb->destructor;
2217 skb->destructor = dev_gso_skb_destructor;
2222 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2224 return ((features & NETIF_F_GEN_CSUM) ||
2225 ((features & NETIF_F_V4_CSUM) &&
2226 protocol == htons(ETH_P_IP)) ||
2227 ((features & NETIF_F_V6_CSUM) &&
2228 protocol == htons(ETH_P_IPV6)) ||
2229 ((features & NETIF_F_FCOE_CRC) &&
2230 protocol == htons(ETH_P_FCOE)));
2233 static netdev_features_t harmonize_features(struct sk_buff *skb,
2234 __be16 protocol, netdev_features_t features)
2236 if (skb->ip_summed != CHECKSUM_NONE &&
2237 !can_checksum_protocol(features, protocol)) {
2238 features &= ~NETIF_F_ALL_CSUM;
2239 features &= ~NETIF_F_SG;
2240 } else if (illegal_highdma(skb->dev, skb)) {
2241 features &= ~NETIF_F_SG;
2247 netdev_features_t netif_skb_features(struct sk_buff *skb)
2249 __be16 protocol = skb->protocol;
2250 netdev_features_t features = skb->dev->features;
2252 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2253 features &= ~NETIF_F_GSO_MASK;
2255 if (protocol == htons(ETH_P_8021Q)) {
2256 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2257 protocol = veh->h_vlan_encapsulated_proto;
2258 } else if (!vlan_tx_tag_present(skb)) {
2259 return harmonize_features(skb, protocol, features);
2262 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2264 if (protocol != htons(ETH_P_8021Q)) {
2265 return harmonize_features(skb, protocol, features);
2267 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2268 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2269 return harmonize_features(skb, protocol, features);
2272 EXPORT_SYMBOL(netif_skb_features);
2275 * Returns true if either:
2276 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2277 * 2. skb is fragmented and the device does not support SG.
2279 static inline int skb_needs_linearize(struct sk_buff *skb,
2282 return skb_is_nonlinear(skb) &&
2283 ((skb_has_frag_list(skb) &&
2284 !(features & NETIF_F_FRAGLIST)) ||
2285 (skb_shinfo(skb)->nr_frags &&
2286 !(features & NETIF_F_SG)));
2289 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2290 struct netdev_queue *txq)
2292 const struct net_device_ops *ops = dev->netdev_ops;
2293 int rc = NETDEV_TX_OK;
2294 unsigned int skb_len;
2296 if (likely(!skb->next)) {
2297 netdev_features_t features;
2300 * If device doesn't need skb->dst, release it right now while
2301 * its hot in this cpu cache
2303 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2306 features = netif_skb_features(skb);
2308 if (vlan_tx_tag_present(skb) &&
2309 !(features & NETIF_F_HW_VLAN_TX)) {
2310 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2317 if (netif_needs_gso(skb, features)) {
2318 if (unlikely(dev_gso_segment(skb, features)))
2323 if (skb_needs_linearize(skb, features) &&
2324 __skb_linearize(skb))
2327 /* If packet is not checksummed and device does not
2328 * support checksumming for this protocol, complete
2329 * checksumming here.
2331 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2332 skb_set_transport_header(skb,
2333 skb_checksum_start_offset(skb));
2334 if (!(features & NETIF_F_ALL_CSUM) &&
2335 skb_checksum_help(skb))
2340 if (!list_empty(&ptype_all))
2341 dev_queue_xmit_nit(skb, dev);
2344 rc = ops->ndo_start_xmit(skb, dev);
2345 trace_net_dev_xmit(skb, rc, dev, skb_len);
2346 if (rc == NETDEV_TX_OK)
2347 txq_trans_update(txq);
2353 struct sk_buff *nskb = skb->next;
2355 skb->next = nskb->next;
2359 * If device doesn't need nskb->dst, release it right now while
2360 * its hot in this cpu cache
2362 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2365 if (!list_empty(&ptype_all))
2366 dev_queue_xmit_nit(nskb, dev);
2368 skb_len = nskb->len;
2369 rc = ops->ndo_start_xmit(nskb, dev);
2370 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2371 if (unlikely(rc != NETDEV_TX_OK)) {
2372 if (rc & ~NETDEV_TX_MASK)
2373 goto out_kfree_gso_skb;
2374 nskb->next = skb->next;
2378 txq_trans_update(txq);
2379 if (unlikely(netif_xmit_stopped(txq) && skb->next))
2380 return NETDEV_TX_BUSY;
2381 } while (skb->next);
2384 if (likely(skb->next == NULL))
2385 skb->destructor = DEV_GSO_CB(skb)->destructor;
2392 static u32 hashrnd __read_mostly;
2395 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2396 * to be used as a distribution range.
2398 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2399 unsigned int num_tx_queues)
2403 u16 qcount = num_tx_queues;
2405 if (skb_rx_queue_recorded(skb)) {
2406 hash = skb_get_rx_queue(skb);
2407 while (unlikely(hash >= num_tx_queues))
2408 hash -= num_tx_queues;
2413 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2414 qoffset = dev->tc_to_txq[tc].offset;
2415 qcount = dev->tc_to_txq[tc].count;
2418 if (skb->sk && skb->sk->sk_hash)
2419 hash = skb->sk->sk_hash;
2421 hash = (__force u16) skb->protocol;
2422 hash = jhash_1word(hash, hashrnd);
2424 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2426 EXPORT_SYMBOL(__skb_tx_hash);
2428 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2430 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2431 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2432 dev->name, queue_index,
2433 dev->real_num_tx_queues);
2439 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2442 struct xps_dev_maps *dev_maps;
2443 struct xps_map *map;
2444 int queue_index = -1;
2447 dev_maps = rcu_dereference(dev->xps_maps);
2449 map = rcu_dereference(
2450 dev_maps->cpu_map[raw_smp_processor_id()]);
2453 queue_index = map->queues[0];
2456 if (skb->sk && skb->sk->sk_hash)
2457 hash = skb->sk->sk_hash;
2459 hash = (__force u16) skb->protocol ^
2461 hash = jhash_1word(hash, hashrnd);
2462 queue_index = map->queues[
2463 ((u64)hash * map->len) >> 32];
2465 if (unlikely(queue_index >= dev->real_num_tx_queues))
2477 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
2478 struct sk_buff *skb)
2481 const struct net_device_ops *ops = dev->netdev_ops;
2483 if (dev->real_num_tx_queues == 1)
2485 else if (ops->ndo_select_queue) {
2486 queue_index = ops->ndo_select_queue(dev, skb);
2487 queue_index = dev_cap_txqueue(dev, queue_index);
2489 struct sock *sk = skb->sk;
2490 queue_index = sk_tx_queue_get(sk);
2492 if (queue_index < 0 || skb->ooo_okay ||
2493 queue_index >= dev->real_num_tx_queues) {
2494 int old_index = queue_index;
2496 queue_index = get_xps_queue(dev, skb);
2497 if (queue_index < 0)
2498 queue_index = skb_tx_hash(dev, skb);
2500 if (queue_index != old_index && sk) {
2501 struct dst_entry *dst =
2502 rcu_dereference_check(sk->sk_dst_cache, 1);
2504 if (dst && skb_dst(skb) == dst)
2505 sk_tx_queue_set(sk, queue_index);
2510 skb_set_queue_mapping(skb, queue_index);
2511 return netdev_get_tx_queue(dev, queue_index);
2514 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2515 struct net_device *dev,
2516 struct netdev_queue *txq)
2518 spinlock_t *root_lock = qdisc_lock(q);
2522 qdisc_skb_cb(skb)->pkt_len = skb->len;
2523 qdisc_calculate_pkt_len(skb, q);
2525 * Heuristic to force contended enqueues to serialize on a
2526 * separate lock before trying to get qdisc main lock.
2527 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2528 * and dequeue packets faster.
2530 contended = qdisc_is_running(q);
2531 if (unlikely(contended))
2532 spin_lock(&q->busylock);
2534 spin_lock(root_lock);
2535 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2538 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2539 qdisc_run_begin(q)) {
2541 * This is a work-conserving queue; there are no old skbs
2542 * waiting to be sent out; and the qdisc is not running -
2543 * xmit the skb directly.
2545 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2548 qdisc_bstats_update(q, skb);
2550 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2551 if (unlikely(contended)) {
2552 spin_unlock(&q->busylock);
2559 rc = NET_XMIT_SUCCESS;
2562 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2563 if (qdisc_run_begin(q)) {
2564 if (unlikely(contended)) {
2565 spin_unlock(&q->busylock);
2571 spin_unlock(root_lock);
2572 if (unlikely(contended))
2573 spin_unlock(&q->busylock);
2577 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2578 static void skb_update_prio(struct sk_buff *skb)
2580 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2582 if (!skb->priority && skb->sk && map) {
2583 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2585 if (prioidx < map->priomap_len)
2586 skb->priority = map->priomap[prioidx];
2590 #define skb_update_prio(skb)
2593 static DEFINE_PER_CPU(int, xmit_recursion);
2594 #define RECURSION_LIMIT 10
2597 * dev_loopback_xmit - loop back @skb
2598 * @skb: buffer to transmit
2600 int dev_loopback_xmit(struct sk_buff *skb)
2602 skb_reset_mac_header(skb);
2603 __skb_pull(skb, skb_network_offset(skb));
2604 skb->pkt_type = PACKET_LOOPBACK;
2605 skb->ip_summed = CHECKSUM_UNNECESSARY;
2606 WARN_ON(!skb_dst(skb));
2611 EXPORT_SYMBOL(dev_loopback_xmit);
2614 * dev_queue_xmit - transmit a buffer
2615 * @skb: buffer to transmit
2617 * Queue a buffer for transmission to a network device. The caller must
2618 * have set the device and priority and built the buffer before calling
2619 * this function. The function can be called from an interrupt.
2621 * A negative errno code is returned on a failure. A success does not
2622 * guarantee the frame will be transmitted as it may be dropped due
2623 * to congestion or traffic shaping.
2625 * -----------------------------------------------------------------------------------
2626 * I notice this method can also return errors from the queue disciplines,
2627 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2630 * Regardless of the return value, the skb is consumed, so it is currently
2631 * difficult to retry a send to this method. (You can bump the ref count
2632 * before sending to hold a reference for retry if you are careful.)
2634 * When calling this method, interrupts MUST be enabled. This is because
2635 * the BH enable code must have IRQs enabled so that it will not deadlock.
2638 int dev_queue_xmit(struct sk_buff *skb)
2640 struct net_device *dev = skb->dev;
2641 struct netdev_queue *txq;
2645 /* Disable soft irqs for various locks below. Also
2646 * stops preemption for RCU.
2650 skb_update_prio(skb);
2652 txq = netdev_pick_tx(dev, skb);
2653 q = rcu_dereference_bh(txq->qdisc);
2655 #ifdef CONFIG_NET_CLS_ACT
2656 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2658 trace_net_dev_queue(skb);
2660 rc = __dev_xmit_skb(skb, q, dev, txq);
2664 /* The device has no queue. Common case for software devices:
2665 loopback, all the sorts of tunnels...
2667 Really, it is unlikely that netif_tx_lock protection is necessary
2668 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2670 However, it is possible, that they rely on protection
2673 Check this and shot the lock. It is not prone from deadlocks.
2674 Either shot noqueue qdisc, it is even simpler 8)
2676 if (dev->flags & IFF_UP) {
2677 int cpu = smp_processor_id(); /* ok because BHs are off */
2679 if (txq->xmit_lock_owner != cpu) {
2681 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2682 goto recursion_alert;
2684 HARD_TX_LOCK(dev, txq, cpu);
2686 if (!netif_xmit_stopped(txq)) {
2687 __this_cpu_inc(xmit_recursion);
2688 rc = dev_hard_start_xmit(skb, dev, txq);
2689 __this_cpu_dec(xmit_recursion);
2690 if (dev_xmit_complete(rc)) {
2691 HARD_TX_UNLOCK(dev, txq);
2695 HARD_TX_UNLOCK(dev, txq);
2696 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2699 /* Recursion is detected! It is possible,
2703 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2709 rcu_read_unlock_bh();
2714 rcu_read_unlock_bh();
2717 EXPORT_SYMBOL(dev_queue_xmit);
2720 /*=======================================================================
2722 =======================================================================*/
2724 int netdev_max_backlog __read_mostly = 1000;
2725 EXPORT_SYMBOL(netdev_max_backlog);
2727 int netdev_tstamp_prequeue __read_mostly = 1;
2728 int netdev_budget __read_mostly = 300;
2729 int weight_p __read_mostly = 64; /* old backlog weight */
2731 /* Called with irq disabled */
2732 static inline void ____napi_schedule(struct softnet_data *sd,
2733 struct napi_struct *napi)
2735 list_add_tail(&napi->poll_list, &sd->poll_list);
2736 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2740 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2741 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2742 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2743 * if hash is a canonical 4-tuple hash over transport ports.
2745 void __skb_get_rxhash(struct sk_buff *skb)
2747 struct flow_keys keys;
2750 if (!skb_flow_dissect(skb, &keys))
2756 /* get a consistent hash (same value on both flow directions) */
2757 if (((__force u32)keys.dst < (__force u32)keys.src) ||
2758 (((__force u32)keys.dst == (__force u32)keys.src) &&
2759 ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
2760 swap(keys.dst, keys.src);
2761 swap(keys.port16[0], keys.port16[1]);
2764 hash = jhash_3words((__force u32)keys.dst,
2765 (__force u32)keys.src,
2766 (__force u32)keys.ports, hashrnd);
2772 EXPORT_SYMBOL(__skb_get_rxhash);
2776 /* One global table that all flow-based protocols share. */
2777 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2778 EXPORT_SYMBOL(rps_sock_flow_table);
2780 struct static_key rps_needed __read_mostly;
2782 static struct rps_dev_flow *
2783 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2784 struct rps_dev_flow *rflow, u16 next_cpu)
2786 if (next_cpu != RPS_NO_CPU) {
2787 #ifdef CONFIG_RFS_ACCEL
2788 struct netdev_rx_queue *rxqueue;
2789 struct rps_dev_flow_table *flow_table;
2790 struct rps_dev_flow *old_rflow;
2795 /* Should we steer this flow to a different hardware queue? */
2796 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2797 !(dev->features & NETIF_F_NTUPLE))
2799 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2800 if (rxq_index == skb_get_rx_queue(skb))
2803 rxqueue = dev->_rx + rxq_index;
2804 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2807 flow_id = skb->rxhash & flow_table->mask;
2808 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2809 rxq_index, flow_id);
2813 rflow = &flow_table->flows[flow_id];
2815 if (old_rflow->filter == rflow->filter)
2816 old_rflow->filter = RPS_NO_FILTER;
2820 per_cpu(softnet_data, next_cpu).input_queue_head;
2823 rflow->cpu = next_cpu;
2828 * get_rps_cpu is called from netif_receive_skb and returns the target
2829 * CPU from the RPS map of the receiving queue for a given skb.
2830 * rcu_read_lock must be held on entry.
2832 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2833 struct rps_dev_flow **rflowp)
2835 struct netdev_rx_queue *rxqueue;
2836 struct rps_map *map;
2837 struct rps_dev_flow_table *flow_table;
2838 struct rps_sock_flow_table *sock_flow_table;
2842 if (skb_rx_queue_recorded(skb)) {
2843 u16 index = skb_get_rx_queue(skb);
2844 if (unlikely(index >= dev->real_num_rx_queues)) {
2845 WARN_ONCE(dev->real_num_rx_queues > 1,
2846 "%s received packet on queue %u, but number "
2847 "of RX queues is %u\n",
2848 dev->name, index, dev->real_num_rx_queues);
2851 rxqueue = dev->_rx + index;
2855 map = rcu_dereference(rxqueue->rps_map);
2857 if (map->len == 1 &&
2858 !rcu_access_pointer(rxqueue->rps_flow_table)) {
2859 tcpu = map->cpus[0];
2860 if (cpu_online(tcpu))
2864 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2868 skb_reset_network_header(skb);
2869 if (!skb_get_rxhash(skb))
2872 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2873 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2874 if (flow_table && sock_flow_table) {
2876 struct rps_dev_flow *rflow;
2878 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2881 next_cpu = sock_flow_table->ents[skb->rxhash &
2882 sock_flow_table->mask];
2885 * If the desired CPU (where last recvmsg was done) is
2886 * different from current CPU (one in the rx-queue flow
2887 * table entry), switch if one of the following holds:
2888 * - Current CPU is unset (equal to RPS_NO_CPU).
2889 * - Current CPU is offline.
2890 * - The current CPU's queue tail has advanced beyond the
2891 * last packet that was enqueued using this table entry.
2892 * This guarantees that all previous packets for the flow
2893 * have been dequeued, thus preserving in order delivery.
2895 if (unlikely(tcpu != next_cpu) &&
2896 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2897 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2898 rflow->last_qtail)) >= 0)) {
2900 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2903 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2911 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2913 if (cpu_online(tcpu)) {
2923 #ifdef CONFIG_RFS_ACCEL
2926 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2927 * @dev: Device on which the filter was set
2928 * @rxq_index: RX queue index
2929 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2930 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2932 * Drivers that implement ndo_rx_flow_steer() should periodically call
2933 * this function for each installed filter and remove the filters for
2934 * which it returns %true.
2936 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
2937 u32 flow_id, u16 filter_id)
2939 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
2940 struct rps_dev_flow_table *flow_table;
2941 struct rps_dev_flow *rflow;
2946 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2947 if (flow_table && flow_id <= flow_table->mask) {
2948 rflow = &flow_table->flows[flow_id];
2949 cpu = ACCESS_ONCE(rflow->cpu);
2950 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
2951 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
2952 rflow->last_qtail) <
2953 (int)(10 * flow_table->mask)))
2959 EXPORT_SYMBOL(rps_may_expire_flow);
2961 #endif /* CONFIG_RFS_ACCEL */
2963 /* Called from hardirq (IPI) context */
2964 static void rps_trigger_softirq(void *data)
2966 struct softnet_data *sd = data;
2968 ____napi_schedule(sd, &sd->backlog);
2972 #endif /* CONFIG_RPS */
2975 * Check if this softnet_data structure is another cpu one
2976 * If yes, queue it to our IPI list and return 1
2979 static int rps_ipi_queued(struct softnet_data *sd)
2982 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2985 sd->rps_ipi_next = mysd->rps_ipi_list;
2986 mysd->rps_ipi_list = sd;
2988 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2991 #endif /* CONFIG_RPS */
2996 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2997 * queue (may be a remote CPU queue).
2999 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3000 unsigned int *qtail)
3002 struct softnet_data *sd;
3003 unsigned long flags;
3005 sd = &per_cpu(softnet_data, cpu);
3007 local_irq_save(flags);
3010 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
3011 if (skb_queue_len(&sd->input_pkt_queue)) {
3013 __skb_queue_tail(&sd->input_pkt_queue, skb);
3014 input_queue_tail_incr_save(sd, qtail);
3016 local_irq_restore(flags);
3017 return NET_RX_SUCCESS;
3020 /* Schedule NAPI for backlog device
3021 * We can use non atomic operation since we own the queue lock
3023 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3024 if (!rps_ipi_queued(sd))
3025 ____napi_schedule(sd, &sd->backlog);
3033 local_irq_restore(flags);
3035 atomic_long_inc(&skb->dev->rx_dropped);
3041 * netif_rx - post buffer to the network code
3042 * @skb: buffer to post
3044 * This function receives a packet from a device driver and queues it for
3045 * the upper (protocol) levels to process. It always succeeds. The buffer
3046 * may be dropped during processing for congestion control or by the
3050 * NET_RX_SUCCESS (no congestion)
3051 * NET_RX_DROP (packet was dropped)
3055 int netif_rx(struct sk_buff *skb)
3059 /* if netpoll wants it, pretend we never saw it */
3060 if (netpoll_rx(skb))
3063 net_timestamp_check(netdev_tstamp_prequeue, skb);
3065 trace_netif_rx(skb);
3067 if (static_key_false(&rps_needed)) {
3068 struct rps_dev_flow voidflow, *rflow = &voidflow;
3074 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3076 cpu = smp_processor_id();
3078 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3086 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3091 EXPORT_SYMBOL(netif_rx);
3093 int netif_rx_ni(struct sk_buff *skb)
3098 err = netif_rx(skb);
3099 if (local_softirq_pending())
3105 EXPORT_SYMBOL(netif_rx_ni);
3107 static void net_tx_action(struct softirq_action *h)
3109 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3111 if (sd->completion_queue) {
3112 struct sk_buff *clist;
3114 local_irq_disable();
3115 clist = sd->completion_queue;
3116 sd->completion_queue = NULL;
3120 struct sk_buff *skb = clist;
3121 clist = clist->next;
3123 WARN_ON(atomic_read(&skb->users));
3124 trace_kfree_skb(skb, net_tx_action);
3129 if (sd->output_queue) {
3132 local_irq_disable();
3133 head = sd->output_queue;
3134 sd->output_queue = NULL;
3135 sd->output_queue_tailp = &sd->output_queue;
3139 struct Qdisc *q = head;
3140 spinlock_t *root_lock;
3142 head = head->next_sched;
3144 root_lock = qdisc_lock(q);
3145 if (spin_trylock(root_lock)) {
3146 smp_mb__before_clear_bit();
3147 clear_bit(__QDISC_STATE_SCHED,
3150 spin_unlock(root_lock);
3152 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3154 __netif_reschedule(q);
3156 smp_mb__before_clear_bit();
3157 clear_bit(__QDISC_STATE_SCHED,
3165 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3166 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3167 /* This hook is defined here for ATM LANE */
3168 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3169 unsigned char *addr) __read_mostly;
3170 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3173 #ifdef CONFIG_NET_CLS_ACT
3174 /* TODO: Maybe we should just force sch_ingress to be compiled in
3175 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3176 * a compare and 2 stores extra right now if we dont have it on
3177 * but have CONFIG_NET_CLS_ACT
3178 * NOTE: This doesn't stop any functionality; if you dont have
3179 * the ingress scheduler, you just can't add policies on ingress.
3182 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3184 struct net_device *dev = skb->dev;
3185 u32 ttl = G_TC_RTTL(skb->tc_verd);
3186 int result = TC_ACT_OK;
3189 if (unlikely(MAX_RED_LOOP < ttl++)) {
3190 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3191 skb->skb_iif, dev->ifindex);
3195 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3196 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3199 if (q != &noop_qdisc) {
3200 spin_lock(qdisc_lock(q));
3201 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3202 result = qdisc_enqueue_root(skb, q);
3203 spin_unlock(qdisc_lock(q));
3209 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3210 struct packet_type **pt_prev,
3211 int *ret, struct net_device *orig_dev)
3213 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3215 if (!rxq || rxq->qdisc == &noop_qdisc)
3219 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3223 switch (ing_filter(skb, rxq)) {
3237 * netdev_rx_handler_register - register receive handler
3238 * @dev: device to register a handler for
3239 * @rx_handler: receive handler to register
3240 * @rx_handler_data: data pointer that is used by rx handler
3242 * Register a receive hander for a device. This handler will then be
3243 * called from __netif_receive_skb. A negative errno code is returned
3246 * The caller must hold the rtnl_mutex.
3248 * For a general description of rx_handler, see enum rx_handler_result.
3250 int netdev_rx_handler_register(struct net_device *dev,
3251 rx_handler_func_t *rx_handler,
3252 void *rx_handler_data)
3256 if (dev->rx_handler)
3259 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3260 rcu_assign_pointer(dev->rx_handler, rx_handler);
3264 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3267 * netdev_rx_handler_unregister - unregister receive handler
3268 * @dev: device to unregister a handler from
3270 * Unregister a receive hander from a device.
3272 * The caller must hold the rtnl_mutex.
3274 void netdev_rx_handler_unregister(struct net_device *dev)
3278 RCU_INIT_POINTER(dev->rx_handler, NULL);
3279 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3281 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3284 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3285 * the special handling of PFMEMALLOC skbs.
3287 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3289 switch (skb->protocol) {
3290 case __constant_htons(ETH_P_ARP):
3291 case __constant_htons(ETH_P_IP):
3292 case __constant_htons(ETH_P_IPV6):
3293 case __constant_htons(ETH_P_8021Q):
3300 static int __netif_receive_skb(struct sk_buff *skb)
3302 struct packet_type *ptype, *pt_prev;
3303 rx_handler_func_t *rx_handler;
3304 struct net_device *orig_dev;
3305 struct net_device *null_or_dev;
3306 bool deliver_exact = false;
3307 int ret = NET_RX_DROP;
3309 unsigned long pflags = current->flags;
3311 net_timestamp_check(!netdev_tstamp_prequeue, skb);
3313 trace_netif_receive_skb(skb);
3316 * PFMEMALLOC skbs are special, they should
3317 * - be delivered to SOCK_MEMALLOC sockets only
3318 * - stay away from userspace
3319 * - have bounded memory usage
3321 * Use PF_MEMALLOC as this saves us from propagating the allocation
3322 * context down to all allocation sites.
3324 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3325 current->flags |= PF_MEMALLOC;
3327 /* if we've gotten here through NAPI, check netpoll */
3328 if (netpoll_receive_skb(skb))
3331 orig_dev = skb->dev;
3333 skb_reset_network_header(skb);
3334 skb_reset_transport_header(skb);
3335 skb_reset_mac_len(skb);
3342 skb->skb_iif = skb->dev->ifindex;
3344 __this_cpu_inc(softnet_data.processed);
3346 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3347 skb = vlan_untag(skb);
3352 #ifdef CONFIG_NET_CLS_ACT
3353 if (skb->tc_verd & TC_NCLS) {
3354 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3359 if (sk_memalloc_socks() && skb_pfmemalloc(skb))
3362 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3363 if (!ptype->dev || ptype->dev == skb->dev) {
3365 ret = deliver_skb(skb, pt_prev, orig_dev);
3371 #ifdef CONFIG_NET_CLS_ACT
3372 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3378 if (sk_memalloc_socks() && skb_pfmemalloc(skb)
3379 && !skb_pfmemalloc_protocol(skb))
3382 if (vlan_tx_tag_present(skb)) {
3384 ret = deliver_skb(skb, pt_prev, orig_dev);
3387 if (vlan_do_receive(&skb))
3389 else if (unlikely(!skb))
3393 rx_handler = rcu_dereference(skb->dev->rx_handler);
3396 ret = deliver_skb(skb, pt_prev, orig_dev);
3399 switch (rx_handler(&skb)) {
3400 case RX_HANDLER_CONSUMED:
3402 case RX_HANDLER_ANOTHER:
3404 case RX_HANDLER_EXACT:
3405 deliver_exact = true;
3406 case RX_HANDLER_PASS:
3413 if (vlan_tx_nonzero_tag_present(skb))
3414 skb->pkt_type = PACKET_OTHERHOST;
3416 /* deliver only exact match when indicated */
3417 null_or_dev = deliver_exact ? skb->dev : NULL;
3419 type = skb->protocol;
3420 list_for_each_entry_rcu(ptype,
3421 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3422 if (ptype->type == type &&
3423 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3424 ptype->dev == orig_dev)) {
3426 ret = deliver_skb(skb, pt_prev, orig_dev);
3432 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3435 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3438 atomic_long_inc(&skb->dev->rx_dropped);
3440 /* Jamal, now you will not able to escape explaining
3441 * me how you were going to use this. :-)
3449 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3454 * netif_receive_skb - process receive buffer from network
3455 * @skb: buffer to process
3457 * netif_receive_skb() is the main receive data processing function.
3458 * It always succeeds. The buffer may be dropped during processing
3459 * for congestion control or by the protocol layers.
3461 * This function may only be called from softirq context and interrupts
3462 * should be enabled.
3464 * Return values (usually ignored):
3465 * NET_RX_SUCCESS: no congestion
3466 * NET_RX_DROP: packet was dropped
3468 int netif_receive_skb(struct sk_buff *skb)
3470 net_timestamp_check(netdev_tstamp_prequeue, skb);
3472 if (skb_defer_rx_timestamp(skb))
3473 return NET_RX_SUCCESS;
3476 if (static_key_false(&rps_needed)) {
3477 struct rps_dev_flow voidflow, *rflow = &voidflow;
3482 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3485 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3492 return __netif_receive_skb(skb);
3494 EXPORT_SYMBOL(netif_receive_skb);
3496 /* Network device is going away, flush any packets still pending
3497 * Called with irqs disabled.
3499 static void flush_backlog(void *arg)
3501 struct net_device *dev = arg;
3502 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3503 struct sk_buff *skb, *tmp;
3506 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3507 if (skb->dev == dev) {
3508 __skb_unlink(skb, &sd->input_pkt_queue);
3510 input_queue_head_incr(sd);
3515 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3516 if (skb->dev == dev) {
3517 __skb_unlink(skb, &sd->process_queue);
3519 input_queue_head_incr(sd);
3524 static int napi_gro_complete(struct sk_buff *skb)
3526 struct packet_offload *ptype;
3527 __be16 type = skb->protocol;
3528 struct list_head *head = &offload_base;
3531 if (NAPI_GRO_CB(skb)->count == 1) {
3532 skb_shinfo(skb)->gso_size = 0;
3537 list_for_each_entry_rcu(ptype, head, list) {
3538 if (ptype->type != type || !ptype->callbacks.gro_complete)
3541 err = ptype->callbacks.gro_complete(skb);
3547 WARN_ON(&ptype->list == head);
3549 return NET_RX_SUCCESS;
3553 return netif_receive_skb(skb);
3556 /* napi->gro_list contains packets ordered by age.
3557 * youngest packets at the head of it.
3558 * Complete skbs in reverse order to reduce latencies.
3560 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
3562 struct sk_buff *skb, *prev = NULL;
3564 /* scan list and build reverse chain */
3565 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3570 for (skb = prev; skb; skb = prev) {
3573 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3577 napi_gro_complete(skb);
3581 napi->gro_list = NULL;
3583 EXPORT_SYMBOL(napi_gro_flush);
3585 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3587 struct sk_buff **pp = NULL;
3588 struct packet_offload *ptype;
3589 __be16 type = skb->protocol;
3590 struct list_head *head = &offload_base;
3593 enum gro_result ret;
3595 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3598 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3602 list_for_each_entry_rcu(ptype, head, list) {
3603 if (ptype->type != type || !ptype->callbacks.gro_receive)
3606 skb_set_network_header(skb, skb_gro_offset(skb));
3607 mac_len = skb->network_header - skb->mac_header;
3608 skb->mac_len = mac_len;
3609 NAPI_GRO_CB(skb)->same_flow = 0;
3610 NAPI_GRO_CB(skb)->flush = 0;
3611 NAPI_GRO_CB(skb)->free = 0;
3613 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
3618 if (&ptype->list == head)
3621 same_flow = NAPI_GRO_CB(skb)->same_flow;
3622 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3625 struct sk_buff *nskb = *pp;
3629 napi_gro_complete(nskb);
3636 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3640 NAPI_GRO_CB(skb)->count = 1;
3641 NAPI_GRO_CB(skb)->age = jiffies;
3642 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3643 skb->next = napi->gro_list;
3644 napi->gro_list = skb;
3648 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3649 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3651 BUG_ON(skb->end - skb->tail < grow);
3653 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3656 skb->data_len -= grow;
3658 skb_shinfo(skb)->frags[0].page_offset += grow;
3659 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3661 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3662 skb_frag_unref(skb, 0);
3663 memmove(skb_shinfo(skb)->frags,
3664 skb_shinfo(skb)->frags + 1,
3665 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3676 EXPORT_SYMBOL(dev_gro_receive);
3678 static inline gro_result_t
3679 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3682 unsigned int maclen = skb->dev->hard_header_len;
3684 for (p = napi->gro_list; p; p = p->next) {
3685 unsigned long diffs;
3687 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3688 diffs |= p->vlan_tci ^ skb->vlan_tci;
3689 if (maclen == ETH_HLEN)
3690 diffs |= compare_ether_header(skb_mac_header(p),
3691 skb_gro_mac_header(skb));
3693 diffs = memcmp(skb_mac_header(p),
3694 skb_gro_mac_header(skb),
3696 NAPI_GRO_CB(p)->same_flow = !diffs;
3697 NAPI_GRO_CB(p)->flush = 0;
3700 return dev_gro_receive(napi, skb);
3703 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3707 if (netif_receive_skb(skb))
3715 case GRO_MERGED_FREE:
3716 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3717 kmem_cache_free(skbuff_head_cache, skb);
3729 EXPORT_SYMBOL(napi_skb_finish);
3731 static void skb_gro_reset_offset(struct sk_buff *skb)
3733 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3734 const skb_frag_t *frag0 = &pinfo->frags[0];
3736 NAPI_GRO_CB(skb)->data_offset = 0;
3737 NAPI_GRO_CB(skb)->frag0 = NULL;
3738 NAPI_GRO_CB(skb)->frag0_len = 0;
3740 if (skb->mac_header == skb->tail &&
3742 !PageHighMem(skb_frag_page(frag0))) {
3743 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3744 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3748 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3750 skb_gro_reset_offset(skb);
3752 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3754 EXPORT_SYMBOL(napi_gro_receive);
3756 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3758 __skb_pull(skb, skb_headlen(skb));
3759 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3760 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3762 skb->dev = napi->dev;
3768 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3770 struct sk_buff *skb = napi->skb;
3773 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3779 EXPORT_SYMBOL(napi_get_frags);
3781 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3787 skb->protocol = eth_type_trans(skb, skb->dev);
3789 if (ret == GRO_HELD)
3790 skb_gro_pull(skb, -ETH_HLEN);
3791 else if (netif_receive_skb(skb))
3796 case GRO_MERGED_FREE:
3797 napi_reuse_skb(napi, skb);
3806 EXPORT_SYMBOL(napi_frags_finish);
3808 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3810 struct sk_buff *skb = napi->skb;
3817 skb_reset_mac_header(skb);
3818 skb_gro_reset_offset(skb);
3820 off = skb_gro_offset(skb);
3821 hlen = off + sizeof(*eth);
3822 eth = skb_gro_header_fast(skb, off);
3823 if (skb_gro_header_hard(skb, hlen)) {
3824 eth = skb_gro_header_slow(skb, hlen, off);
3825 if (unlikely(!eth)) {
3826 napi_reuse_skb(napi, skb);
3832 skb_gro_pull(skb, sizeof(*eth));
3835 * This works because the only protocols we care about don't require
3836 * special handling. We'll fix it up properly at the end.
3838 skb->protocol = eth->h_proto;
3844 gro_result_t napi_gro_frags(struct napi_struct *napi)
3846 struct sk_buff *skb = napi_frags_skb(napi);
3851 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3853 EXPORT_SYMBOL(napi_gro_frags);
3856 * net_rps_action sends any pending IPI's for rps.
3857 * Note: called with local irq disabled, but exits with local irq enabled.
3859 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3862 struct softnet_data *remsd = sd->rps_ipi_list;
3865 sd->rps_ipi_list = NULL;
3869 /* Send pending IPI's to kick RPS processing on remote cpus. */
3871 struct softnet_data *next = remsd->rps_ipi_next;
3873 if (cpu_online(remsd->cpu))
3874 __smp_call_function_single(remsd->cpu,
3883 static int process_backlog(struct napi_struct *napi, int quota)
3886 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3889 /* Check if we have pending ipi, its better to send them now,
3890 * not waiting net_rx_action() end.
3892 if (sd->rps_ipi_list) {
3893 local_irq_disable();
3894 net_rps_action_and_irq_enable(sd);
3897 napi->weight = weight_p;
3898 local_irq_disable();
3899 while (work < quota) {
3900 struct sk_buff *skb;
3903 while ((skb = __skb_dequeue(&sd->process_queue))) {
3905 __netif_receive_skb(skb);
3906 local_irq_disable();
3907 input_queue_head_incr(sd);
3908 if (++work >= quota) {
3915 qlen = skb_queue_len(&sd->input_pkt_queue);
3917 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3918 &sd->process_queue);
3920 if (qlen < quota - work) {
3922 * Inline a custom version of __napi_complete().
3923 * only current cpu owns and manipulates this napi,
3924 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3925 * we can use a plain write instead of clear_bit(),
3926 * and we dont need an smp_mb() memory barrier.
3928 list_del(&napi->poll_list);
3931 quota = work + qlen;
3941 * __napi_schedule - schedule for receive
3942 * @n: entry to schedule
3944 * The entry's receive function will be scheduled to run
3946 void __napi_schedule(struct napi_struct *n)
3948 unsigned long flags;
3950 local_irq_save(flags);
3951 ____napi_schedule(&__get_cpu_var(softnet_data), n);
3952 local_irq_restore(flags);
3954 EXPORT_SYMBOL(__napi_schedule);
3956 void __napi_complete(struct napi_struct *n)
3958 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3959 BUG_ON(n->gro_list);
3961 list_del(&n->poll_list);
3962 smp_mb__before_clear_bit();
3963 clear_bit(NAPI_STATE_SCHED, &n->state);
3965 EXPORT_SYMBOL(__napi_complete);
3967 void napi_complete(struct napi_struct *n)
3969 unsigned long flags;
3972 * don't let napi dequeue from the cpu poll list
3973 * just in case its running on a different cpu
3975 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3978 napi_gro_flush(n, false);
3979 local_irq_save(flags);
3981 local_irq_restore(flags);
3983 EXPORT_SYMBOL(napi_complete);
3985 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3986 int (*poll)(struct napi_struct *, int), int weight)
3988 INIT_LIST_HEAD(&napi->poll_list);
3989 napi->gro_count = 0;
3990 napi->gro_list = NULL;
3993 napi->weight = weight;
3994 list_add(&napi->dev_list, &dev->napi_list);
3996 #ifdef CONFIG_NETPOLL
3997 spin_lock_init(&napi->poll_lock);
3998 napi->poll_owner = -1;
4000 set_bit(NAPI_STATE_SCHED, &napi->state);
4002 EXPORT_SYMBOL(netif_napi_add);
4004 void netif_napi_del(struct napi_struct *napi)
4006 struct sk_buff *skb, *next;
4008 list_del_init(&napi->dev_list);
4009 napi_free_frags(napi);
4011 for (skb = napi->gro_list; skb; skb = next) {
4017 napi->gro_list = NULL;
4018 napi->gro_count = 0;
4020 EXPORT_SYMBOL(netif_napi_del);
4022 static void net_rx_action(struct softirq_action *h)
4024 struct softnet_data *sd = &__get_cpu_var(softnet_data);
4025 unsigned long time_limit = jiffies + 2;
4026 int budget = netdev_budget;
4029 local_irq_disable();
4031 while (!list_empty(&sd->poll_list)) {
4032 struct napi_struct *n;
4035 /* If softirq window is exhuasted then punt.
4036 * Allow this to run for 2 jiffies since which will allow
4037 * an average latency of 1.5/HZ.
4039 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
4044 /* Even though interrupts have been re-enabled, this
4045 * access is safe because interrupts can only add new
4046 * entries to the tail of this list, and only ->poll()
4047 * calls can remove this head entry from the list.
4049 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
4051 have = netpoll_poll_lock(n);
4055 /* This NAPI_STATE_SCHED test is for avoiding a race
4056 * with netpoll's poll_napi(). Only the entity which
4057 * obtains the lock and sees NAPI_STATE_SCHED set will
4058 * actually make the ->poll() call. Therefore we avoid
4059 * accidentally calling ->poll() when NAPI is not scheduled.
4062 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4063 work = n->poll(n, weight);
4067 WARN_ON_ONCE(work > weight);
4071 local_irq_disable();
4073 /* Drivers must not modify the NAPI state if they
4074 * consume the entire weight. In such cases this code
4075 * still "owns" the NAPI instance and therefore can
4076 * move the instance around on the list at-will.
4078 if (unlikely(work == weight)) {
4079 if (unlikely(napi_disable_pending(n))) {
4082 local_irq_disable();
4085 /* flush too old packets
4086 * If HZ < 1000, flush all packets.
4089 napi_gro_flush(n, HZ >= 1000);
4090 local_irq_disable();
4092 list_move_tail(&n->poll_list, &sd->poll_list);
4096 netpoll_poll_unlock(have);
4099 net_rps_action_and_irq_enable(sd);
4101 #ifdef CONFIG_NET_DMA
4103 * There may not be any more sk_buffs coming right now, so push
4104 * any pending DMA copies to hardware
4106 dma_issue_pending_all();
4113 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4117 static gifconf_func_t *gifconf_list[NPROTO];
4120 * register_gifconf - register a SIOCGIF handler
4121 * @family: Address family
4122 * @gifconf: Function handler
4124 * Register protocol dependent address dumping routines. The handler
4125 * that is passed must not be freed or reused until it has been replaced
4126 * by another handler.
4128 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
4130 if (family >= NPROTO)
4132 gifconf_list[family] = gifconf;
4135 EXPORT_SYMBOL(register_gifconf);
4139 * Map an interface index to its name (SIOCGIFNAME)
4143 * We need this ioctl for efficient implementation of the
4144 * if_indextoname() function required by the IPv6 API. Without
4145 * it, we would have to search all the interfaces to find a
4149 static int dev_ifname(struct net *net, struct ifreq __user *arg)
4151 struct net_device *dev;
4155 * Fetch the caller's info block.
4158 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
4162 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
4168 strcpy(ifr.ifr_name, dev->name);
4171 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
4177 * Perform a SIOCGIFCONF call. This structure will change
4178 * size eventually, and there is nothing I can do about it.
4179 * Thus we will need a 'compatibility mode'.
4182 static int dev_ifconf(struct net *net, char __user *arg)
4185 struct net_device *dev;
4192 * Fetch the caller's info block.
4195 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4202 * Loop over the interfaces, and write an info block for each.
4206 for_each_netdev(net, dev) {
4207 for (i = 0; i < NPROTO; i++) {
4208 if (gifconf_list[i]) {
4211 done = gifconf_list[i](dev, NULL, 0);
4213 done = gifconf_list[i](dev, pos + total,
4223 * All done. Write the updated control block back to the caller.
4225 ifc.ifc_len = total;
4228 * Both BSD and Solaris return 0 here, so we do too.
4230 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4233 #ifdef CONFIG_PROC_FS
4235 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4237 #define get_bucket(x) ((x) >> BUCKET_SPACE)
4238 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4239 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4241 static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4243 struct net *net = seq_file_net(seq);
4244 struct net_device *dev;
4245 struct hlist_node *p;
4246 struct hlist_head *h;
4247 unsigned int count = 0, offset = get_offset(*pos);
4249 h = &net->dev_name_head[get_bucket(*pos)];
4250 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4251 if (++count == offset)
4258 static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4260 struct net_device *dev;
4261 unsigned int bucket;
4264 dev = dev_from_same_bucket(seq, pos);
4268 bucket = get_bucket(*pos) + 1;
4269 *pos = set_bucket_offset(bucket, 1);
4270 } while (bucket < NETDEV_HASHENTRIES);
4276 * This is invoked by the /proc filesystem handler to display a device
4279 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4284 return SEQ_START_TOKEN;
4286 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4289 return dev_from_bucket(seq, pos);
4292 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4295 return dev_from_bucket(seq, pos);
4298 void dev_seq_stop(struct seq_file *seq, void *v)
4304 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4306 struct rtnl_link_stats64 temp;
4307 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
4309 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4310 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4311 dev->name, stats->rx_bytes, stats->rx_packets,
4313 stats->rx_dropped + stats->rx_missed_errors,
4314 stats->rx_fifo_errors,
4315 stats->rx_length_errors + stats->rx_over_errors +
4316 stats->rx_crc_errors + stats->rx_frame_errors,
4317 stats->rx_compressed, stats->multicast,
4318 stats->tx_bytes, stats->tx_packets,
4319 stats->tx_errors, stats->tx_dropped,
4320 stats->tx_fifo_errors, stats->collisions,
4321 stats->tx_carrier_errors +
4322 stats->tx_aborted_errors +
4323 stats->tx_window_errors +
4324 stats->tx_heartbeat_errors,
4325 stats->tx_compressed);
4329 * Called from the PROCfs module. This now uses the new arbitrary sized
4330 * /proc/net interface to create /proc/net/dev
4332 static int dev_seq_show(struct seq_file *seq, void *v)
4334 if (v == SEQ_START_TOKEN)
4335 seq_puts(seq, "Inter-| Receive "
4337 " face |bytes packets errs drop fifo frame "
4338 "compressed multicast|bytes packets errs "
4339 "drop fifo colls carrier compressed\n");
4341 dev_seq_printf_stats(seq, v);
4345 static struct softnet_data *softnet_get_online(loff_t *pos)
4347 struct softnet_data *sd = NULL;
4349 while (*pos < nr_cpu_ids)
4350 if (cpu_online(*pos)) {
4351 sd = &per_cpu(softnet_data, *pos);
4358 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4360 return softnet_get_online(pos);
4363 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4366 return softnet_get_online(pos);
4369 static void softnet_seq_stop(struct seq_file *seq, void *v)
4373 static int softnet_seq_show(struct seq_file *seq, void *v)
4375 struct softnet_data *sd = v;
4377 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
4378 sd->processed, sd->dropped, sd->time_squeeze, 0,
4379 0, 0, 0, 0, /* was fastroute */
4380 sd->cpu_collision, sd->received_rps);
4384 static const struct seq_operations dev_seq_ops = {
4385 .start = dev_seq_start,
4386 .next = dev_seq_next,
4387 .stop = dev_seq_stop,
4388 .show = dev_seq_show,
4391 static int dev_seq_open(struct inode *inode, struct file *file)
4393 return seq_open_net(inode, file, &dev_seq_ops,
4394 sizeof(struct seq_net_private));
4397 static const struct file_operations dev_seq_fops = {
4398 .owner = THIS_MODULE,
4399 .open = dev_seq_open,
4401 .llseek = seq_lseek,
4402 .release = seq_release_net,
4405 static const struct seq_operations softnet_seq_ops = {
4406 .start = softnet_seq_start,
4407 .next = softnet_seq_next,
4408 .stop = softnet_seq_stop,
4409 .show = softnet_seq_show,
4412 static int softnet_seq_open(struct inode *inode, struct file *file)
4414 return seq_open(file, &softnet_seq_ops);
4417 static const struct file_operations softnet_seq_fops = {
4418 .owner = THIS_MODULE,
4419 .open = softnet_seq_open,
4421 .llseek = seq_lseek,
4422 .release = seq_release,
4425 static void *ptype_get_idx(loff_t pos)
4427 struct packet_type *pt = NULL;
4431 list_for_each_entry_rcu(pt, &ptype_all, list) {
4437 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
4438 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4447 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
4451 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4454 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4456 struct packet_type *pt;
4457 struct list_head *nxt;
4461 if (v == SEQ_START_TOKEN)
4462 return ptype_get_idx(0);
4465 nxt = pt->list.next;
4466 if (pt->type == htons(ETH_P_ALL)) {
4467 if (nxt != &ptype_all)
4470 nxt = ptype_base[0].next;
4472 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4474 while (nxt == &ptype_base[hash]) {
4475 if (++hash >= PTYPE_HASH_SIZE)
4477 nxt = ptype_base[hash].next;
4480 return list_entry(nxt, struct packet_type, list);
4483 static void ptype_seq_stop(struct seq_file *seq, void *v)
4489 static int ptype_seq_show(struct seq_file *seq, void *v)
4491 struct packet_type *pt = v;
4493 if (v == SEQ_START_TOKEN)
4494 seq_puts(seq, "Type Device Function\n");
4495 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4496 if (pt->type == htons(ETH_P_ALL))
4497 seq_puts(seq, "ALL ");
4499 seq_printf(seq, "%04x", ntohs(pt->type));
4501 seq_printf(seq, " %-8s %pF\n",
4502 pt->dev ? pt->dev->name : "", pt->func);
4508 static const struct seq_operations ptype_seq_ops = {
4509 .start = ptype_seq_start,
4510 .next = ptype_seq_next,
4511 .stop = ptype_seq_stop,
4512 .show = ptype_seq_show,
4515 static int ptype_seq_open(struct inode *inode, struct file *file)
4517 return seq_open_net(inode, file, &ptype_seq_ops,
4518 sizeof(struct seq_net_private));
4521 static const struct file_operations ptype_seq_fops = {
4522 .owner = THIS_MODULE,
4523 .open = ptype_seq_open,
4525 .llseek = seq_lseek,
4526 .release = seq_release_net,
4530 static int __net_init dev_proc_net_init(struct net *net)
4534 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
4536 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
4538 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4541 if (wext_proc_init(net))
4547 proc_net_remove(net, "ptype");
4549 proc_net_remove(net, "softnet_stat");
4551 proc_net_remove(net, "dev");
4555 static void __net_exit dev_proc_net_exit(struct net *net)
4557 wext_proc_exit(net);
4559 proc_net_remove(net, "ptype");
4560 proc_net_remove(net, "softnet_stat");
4561 proc_net_remove(net, "dev");
4564 static struct pernet_operations __net_initdata dev_proc_ops = {
4565 .init = dev_proc_net_init,
4566 .exit = dev_proc_net_exit,
4569 static int __init dev_proc_init(void)
4571 return register_pernet_subsys(&dev_proc_ops);
4574 #define dev_proc_init() 0
4575 #endif /* CONFIG_PROC_FS */
4579 * netdev_set_master - set up master pointer
4580 * @slave: slave device
4581 * @master: new master device
4583 * Changes the master device of the slave. Pass %NULL to break the
4584 * bonding. The caller must hold the RTNL semaphore. On a failure
4585 * a negative errno code is returned. On success the reference counts
4586 * are adjusted and the function returns zero.
4588 int netdev_set_master(struct net_device *slave, struct net_device *master)
4590 struct net_device *old = slave->master;
4600 slave->master = master;
4606 EXPORT_SYMBOL(netdev_set_master);
4609 * netdev_set_bond_master - set up bonding master/slave pair
4610 * @slave: slave device
4611 * @master: new master device
4613 * Changes the master device of the slave. Pass %NULL to break the
4614 * bonding. The caller must hold the RTNL semaphore. On a failure
4615 * a negative errno code is returned. On success %RTM_NEWLINK is sent
4616 * to the routing socket and the function returns zero.
4618 int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
4624 err = netdev_set_master(slave, master);
4628 slave->flags |= IFF_SLAVE;
4630 slave->flags &= ~IFF_SLAVE;
4632 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4635 EXPORT_SYMBOL(netdev_set_bond_master);
4637 static void dev_change_rx_flags(struct net_device *dev, int flags)
4639 const struct net_device_ops *ops = dev->netdev_ops;
4641 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4642 ops->ndo_change_rx_flags(dev, flags);
4645 static int __dev_set_promiscuity(struct net_device *dev, int inc)
4647 unsigned int old_flags = dev->flags;
4653 dev->flags |= IFF_PROMISC;
4654 dev->promiscuity += inc;
4655 if (dev->promiscuity == 0) {
4658 * If inc causes overflow, untouch promisc and return error.
4661 dev->flags &= ~IFF_PROMISC;
4663 dev->promiscuity -= inc;
4664 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4669 if (dev->flags != old_flags) {
4670 pr_info("device %s %s promiscuous mode\n",
4672 dev->flags & IFF_PROMISC ? "entered" : "left");
4673 if (audit_enabled) {
4674 current_uid_gid(&uid, &gid);
4675 audit_log(current->audit_context, GFP_ATOMIC,
4676 AUDIT_ANOM_PROMISCUOUS,
4677 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4678 dev->name, (dev->flags & IFF_PROMISC),
4679 (old_flags & IFF_PROMISC),
4680 from_kuid(&init_user_ns, audit_get_loginuid(current)),
4681 from_kuid(&init_user_ns, uid),
4682 from_kgid(&init_user_ns, gid),
4683 audit_get_sessionid(current));
4686 dev_change_rx_flags(dev, IFF_PROMISC);
4692 * dev_set_promiscuity - update promiscuity count on a device
4696 * Add or remove promiscuity from a device. While the count in the device
4697 * remains above zero the interface remains promiscuous. Once it hits zero
4698 * the device reverts back to normal filtering operation. A negative inc
4699 * value is used to drop promiscuity on the device.
4700 * Return 0 if successful or a negative errno code on error.
4702 int dev_set_promiscuity(struct net_device *dev, int inc)
4704 unsigned int old_flags = dev->flags;
4707 err = __dev_set_promiscuity(dev, inc);
4710 if (dev->flags != old_flags)
4711 dev_set_rx_mode(dev);
4714 EXPORT_SYMBOL(dev_set_promiscuity);
4717 * dev_set_allmulti - update allmulti count on a device
4721 * Add or remove reception of all multicast frames to a device. While the
4722 * count in the device remains above zero the interface remains listening
4723 * to all interfaces. Once it hits zero the device reverts back to normal
4724 * filtering operation. A negative @inc value is used to drop the counter
4725 * when releasing a resource needing all multicasts.
4726 * Return 0 if successful or a negative errno code on error.
4729 int dev_set_allmulti(struct net_device *dev, int inc)
4731 unsigned int old_flags = dev->flags;
4735 dev->flags |= IFF_ALLMULTI;
4736 dev->allmulti += inc;
4737 if (dev->allmulti == 0) {
4740 * If inc causes overflow, untouch allmulti and return error.
4743 dev->flags &= ~IFF_ALLMULTI;
4745 dev->allmulti -= inc;
4746 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4751 if (dev->flags ^ old_flags) {
4752 dev_change_rx_flags(dev, IFF_ALLMULTI);
4753 dev_set_rx_mode(dev);
4757 EXPORT_SYMBOL(dev_set_allmulti);
4760 * Upload unicast and multicast address lists to device and
4761 * configure RX filtering. When the device doesn't support unicast
4762 * filtering it is put in promiscuous mode while unicast addresses
4765 void __dev_set_rx_mode(struct net_device *dev)
4767 const struct net_device_ops *ops = dev->netdev_ops;
4769 /* dev_open will call this function so the list will stay sane. */
4770 if (!(dev->flags&IFF_UP))
4773 if (!netif_device_present(dev))
4776 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4777 /* Unicast addresses changes may only happen under the rtnl,
4778 * therefore calling __dev_set_promiscuity here is safe.
4780 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4781 __dev_set_promiscuity(dev, 1);
4782 dev->uc_promisc = true;
4783 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4784 __dev_set_promiscuity(dev, -1);
4785 dev->uc_promisc = false;
4789 if (ops->ndo_set_rx_mode)
4790 ops->ndo_set_rx_mode(dev);
4793 void dev_set_rx_mode(struct net_device *dev)
4795 netif_addr_lock_bh(dev);
4796 __dev_set_rx_mode(dev);
4797 netif_addr_unlock_bh(dev);
4801 * dev_get_flags - get flags reported to userspace
4804 * Get the combination of flag bits exported through APIs to userspace.
4806 unsigned int dev_get_flags(const struct net_device *dev)
4810 flags = (dev->flags & ~(IFF_PROMISC |
4815 (dev->gflags & (IFF_PROMISC |
4818 if (netif_running(dev)) {
4819 if (netif_oper_up(dev))
4820 flags |= IFF_RUNNING;
4821 if (netif_carrier_ok(dev))
4822 flags |= IFF_LOWER_UP;
4823 if (netif_dormant(dev))
4824 flags |= IFF_DORMANT;
4829 EXPORT_SYMBOL(dev_get_flags);
4831 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4833 unsigned int old_flags = dev->flags;
4839 * Set the flags on our device.
4842 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4843 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4845 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4849 * Load in the correct multicast list now the flags have changed.
4852 if ((old_flags ^ flags) & IFF_MULTICAST)
4853 dev_change_rx_flags(dev, IFF_MULTICAST);
4855 dev_set_rx_mode(dev);
4858 * Have we downed the interface. We handle IFF_UP ourselves
4859 * according to user attempts to set it, rather than blindly
4864 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4865 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4868 dev_set_rx_mode(dev);
4871 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4872 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4874 dev->gflags ^= IFF_PROMISC;
4875 dev_set_promiscuity(dev, inc);
4878 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4879 is important. Some (broken) drivers set IFF_PROMISC, when
4880 IFF_ALLMULTI is requested not asking us and not reporting.
4882 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4883 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4885 dev->gflags ^= IFF_ALLMULTI;
4886 dev_set_allmulti(dev, inc);
4892 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4894 unsigned int changes = dev->flags ^ old_flags;
4896 if (changes & IFF_UP) {
4897 if (dev->flags & IFF_UP)
4898 call_netdevice_notifiers(NETDEV_UP, dev);
4900 call_netdevice_notifiers(NETDEV_DOWN, dev);
4903 if (dev->flags & IFF_UP &&
4904 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4905 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4909 * dev_change_flags - change device settings
4911 * @flags: device state flags
4913 * Change settings on device based state flags. The flags are
4914 * in the userspace exported format.
4916 int dev_change_flags(struct net_device *dev, unsigned int flags)
4919 unsigned int changes, old_flags = dev->flags;
4921 ret = __dev_change_flags(dev, flags);
4925 changes = old_flags ^ dev->flags;
4927 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4929 __dev_notify_flags(dev, old_flags);
4932 EXPORT_SYMBOL(dev_change_flags);
4935 * dev_set_mtu - Change maximum transfer unit
4937 * @new_mtu: new transfer unit
4939 * Change the maximum transfer size of the network device.
4941 int dev_set_mtu(struct net_device *dev, int new_mtu)
4943 const struct net_device_ops *ops = dev->netdev_ops;
4946 if (new_mtu == dev->mtu)
4949 /* MTU must be positive. */
4953 if (!netif_device_present(dev))
4957 if (ops->ndo_change_mtu)
4958 err = ops->ndo_change_mtu(dev, new_mtu);
4962 if (!err && dev->flags & IFF_UP)
4963 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4966 EXPORT_SYMBOL(dev_set_mtu);
4969 * dev_set_group - Change group this device belongs to
4971 * @new_group: group this device should belong to
4973 void dev_set_group(struct net_device *dev, int new_group)
4975 dev->group = new_group;
4977 EXPORT_SYMBOL(dev_set_group);
4980 * dev_set_mac_address - Change Media Access Control Address
4984 * Change the hardware (MAC) address of the device
4986 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4988 const struct net_device_ops *ops = dev->netdev_ops;
4991 if (!ops->ndo_set_mac_address)
4993 if (sa->sa_family != dev->type)
4995 if (!netif_device_present(dev))
4997 err = ops->ndo_set_mac_address(dev, sa);
4999 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5000 add_device_randomness(dev->dev_addr, dev->addr_len);
5003 EXPORT_SYMBOL(dev_set_mac_address);
5006 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
5008 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
5011 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
5017 case SIOCGIFFLAGS: /* Get interface flags */
5018 ifr->ifr_flags = (short) dev_get_flags(dev);
5021 case SIOCGIFMETRIC: /* Get the metric on the interface
5022 (currently unused) */
5023 ifr->ifr_metric = 0;
5026 case SIOCGIFMTU: /* Get the MTU of a device */
5027 ifr->ifr_mtu = dev->mtu;
5032 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
5034 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
5035 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5036 ifr->ifr_hwaddr.sa_family = dev->type;
5044 ifr->ifr_map.mem_start = dev->mem_start;
5045 ifr->ifr_map.mem_end = dev->mem_end;
5046 ifr->ifr_map.base_addr = dev->base_addr;
5047 ifr->ifr_map.irq = dev->irq;
5048 ifr->ifr_map.dma = dev->dma;
5049 ifr->ifr_map.port = dev->if_port;
5053 ifr->ifr_ifindex = dev->ifindex;
5057 ifr->ifr_qlen = dev->tx_queue_len;
5061 /* dev_ioctl() should ensure this case
5073 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
5075 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
5078 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
5079 const struct net_device_ops *ops;
5084 ops = dev->netdev_ops;
5087 case SIOCSIFFLAGS: /* Set interface flags */
5088 return dev_change_flags(dev, ifr->ifr_flags);
5090 case SIOCSIFMETRIC: /* Set the metric on the interface
5091 (currently unused) */
5094 case SIOCSIFMTU: /* Set the MTU of a device */
5095 return dev_set_mtu(dev, ifr->ifr_mtu);
5098 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
5100 case SIOCSIFHWBROADCAST:
5101 if (ifr->ifr_hwaddr.sa_family != dev->type)
5103 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
5104 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
5105 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5109 if (ops->ndo_set_config) {
5110 if (!netif_device_present(dev))
5112 return ops->ndo_set_config(dev, &ifr->ifr_map);
5117 if (!ops->ndo_set_rx_mode ||
5118 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5120 if (!netif_device_present(dev))
5122 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
5125 if (!ops->ndo_set_rx_mode ||
5126 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
5128 if (!netif_device_present(dev))
5130 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
5133 if (ifr->ifr_qlen < 0)
5135 dev->tx_queue_len = ifr->ifr_qlen;
5139 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
5140 return dev_change_name(dev, ifr->ifr_newname);
5143 err = net_hwtstamp_validate(ifr);
5149 * Unknown or private ioctl
5152 if ((cmd >= SIOCDEVPRIVATE &&
5153 cmd <= SIOCDEVPRIVATE + 15) ||
5154 cmd == SIOCBONDENSLAVE ||
5155 cmd == SIOCBONDRELEASE ||
5156 cmd == SIOCBONDSETHWADDR ||
5157 cmd == SIOCBONDSLAVEINFOQUERY ||
5158 cmd == SIOCBONDINFOQUERY ||
5159 cmd == SIOCBONDCHANGEACTIVE ||
5160 cmd == SIOCGMIIPHY ||
5161 cmd == SIOCGMIIREG ||
5162 cmd == SIOCSMIIREG ||
5163 cmd == SIOCBRADDIF ||
5164 cmd == SIOCBRDELIF ||
5165 cmd == SIOCSHWTSTAMP ||
5166 cmd == SIOCWANDEV) {
5168 if (ops->ndo_do_ioctl) {
5169 if (netif_device_present(dev))
5170 err = ops->ndo_do_ioctl(dev, ifr, cmd);
5182 * This function handles all "interface"-type I/O control requests. The actual
5183 * 'doing' part of this is dev_ifsioc above.
5187 * dev_ioctl - network device ioctl
5188 * @net: the applicable net namespace
5189 * @cmd: command to issue
5190 * @arg: pointer to a struct ifreq in user space
5192 * Issue ioctl functions to devices. This is normally called by the
5193 * user space syscall interfaces but can sometimes be useful for
5194 * other purposes. The return value is the return from the syscall if
5195 * positive or a negative errno code on error.
5198 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5204 /* One special case: SIOCGIFCONF takes ifconf argument
5205 and requires shared lock, because it sleeps writing
5209 if (cmd == SIOCGIFCONF) {
5211 ret = dev_ifconf(net, (char __user *) arg);
5215 if (cmd == SIOCGIFNAME)
5216 return dev_ifname(net, (struct ifreq __user *)arg);
5218 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5221 ifr.ifr_name[IFNAMSIZ-1] = 0;
5223 colon = strchr(ifr.ifr_name, ':');
5228 * See which interface the caller is talking about.
5233 * These ioctl calls:
5234 * - can be done by all.
5235 * - atomic and do not require locking.
5246 dev_load(net, ifr.ifr_name);
5248 ret = dev_ifsioc_locked(net, &ifr, cmd);
5253 if (copy_to_user(arg, &ifr,
5254 sizeof(struct ifreq)))
5260 dev_load(net, ifr.ifr_name);
5262 ret = dev_ethtool(net, &ifr);
5267 if (copy_to_user(arg, &ifr,
5268 sizeof(struct ifreq)))
5274 * These ioctl calls:
5275 * - require superuser power.
5276 * - require strict serialization.
5282 if (!capable(CAP_NET_ADMIN))
5284 dev_load(net, ifr.ifr_name);
5286 ret = dev_ifsioc(net, &ifr, cmd);
5291 if (copy_to_user(arg, &ifr,
5292 sizeof(struct ifreq)))
5298 * These ioctl calls:
5299 * - require superuser power.
5300 * - require strict serialization.
5301 * - do not return a value
5311 case SIOCSIFHWBROADCAST:
5314 case SIOCBONDENSLAVE:
5315 case SIOCBONDRELEASE:
5316 case SIOCBONDSETHWADDR:
5317 case SIOCBONDCHANGEACTIVE:
5321 if (!capable(CAP_NET_ADMIN))
5324 case SIOCBONDSLAVEINFOQUERY:
5325 case SIOCBONDINFOQUERY:
5326 dev_load(net, ifr.ifr_name);
5328 ret = dev_ifsioc(net, &ifr, cmd);
5333 /* Get the per device memory space. We can add this but
5334 * currently do not support it */
5336 /* Set the per device memory buffer space.
5337 * Not applicable in our case */
5342 * Unknown or private ioctl.
5345 if (cmd == SIOCWANDEV ||
5346 (cmd >= SIOCDEVPRIVATE &&
5347 cmd <= SIOCDEVPRIVATE + 15)) {
5348 dev_load(net, ifr.ifr_name);
5350 ret = dev_ifsioc(net, &ifr, cmd);
5352 if (!ret && copy_to_user(arg, &ifr,
5353 sizeof(struct ifreq)))
5357 /* Take care of Wireless Extensions */
5358 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5359 return wext_handle_ioctl(net, &ifr, cmd, arg);
5366 * dev_new_index - allocate an ifindex
5367 * @net: the applicable net namespace
5369 * Returns a suitable unique value for a new device interface
5370 * number. The caller must hold the rtnl semaphore or the
5371 * dev_base_lock to be sure it remains unique.
5373 static int dev_new_index(struct net *net)
5375 int ifindex = net->ifindex;
5379 if (!__dev_get_by_index(net, ifindex))
5380 return net->ifindex = ifindex;
5384 /* Delayed registration/unregisteration */
5385 static LIST_HEAD(net_todo_list);
5387 static void net_set_todo(struct net_device *dev)
5389 list_add_tail(&dev->todo_list, &net_todo_list);
5392 static void rollback_registered_many(struct list_head *head)
5394 struct net_device *dev, *tmp;
5396 BUG_ON(dev_boot_phase);
5399 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5400 /* Some devices call without registering
5401 * for initialization unwind. Remove those
5402 * devices and proceed with the remaining.
5404 if (dev->reg_state == NETREG_UNINITIALIZED) {
5405 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5409 list_del(&dev->unreg_list);
5412 dev->dismantle = true;
5413 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5416 /* If device is running, close it first. */
5417 dev_close_many(head);
5419 list_for_each_entry(dev, head, unreg_list) {
5420 /* And unlink it from device chain. */
5421 unlist_netdevice(dev);
5423 dev->reg_state = NETREG_UNREGISTERING;
5428 list_for_each_entry(dev, head, unreg_list) {
5429 /* Shutdown queueing discipline. */
5433 /* Notify protocols, that we are about to destroy
5434 this device. They should clean all the things.
5436 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5438 if (!dev->rtnl_link_ops ||
5439 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5440 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5443 * Flush the unicast and multicast chains
5448 if (dev->netdev_ops->ndo_uninit)
5449 dev->netdev_ops->ndo_uninit(dev);
5451 /* Notifier chain MUST detach us from master device. */
5452 WARN_ON(dev->master);
5454 /* Remove entries from kobject tree */
5455 netdev_unregister_kobject(dev);
5460 list_for_each_entry(dev, head, unreg_list)
5464 static void rollback_registered(struct net_device *dev)
5468 list_add(&dev->unreg_list, &single);
5469 rollback_registered_many(&single);
5473 static netdev_features_t netdev_fix_features(struct net_device *dev,
5474 netdev_features_t features)
5476 /* Fix illegal checksum combinations */
5477 if ((features & NETIF_F_HW_CSUM) &&
5478 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5479 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5480 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5483 /* Fix illegal SG+CSUM combinations. */
5484 if ((features & NETIF_F_SG) &&
5485 !(features & NETIF_F_ALL_CSUM)) {
5487 "Dropping NETIF_F_SG since no checksum feature.\n");
5488 features &= ~NETIF_F_SG;
5491 /* TSO requires that SG is present as well. */
5492 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5493 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5494 features &= ~NETIF_F_ALL_TSO;
5497 /* TSO ECN requires that TSO is present as well. */
5498 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5499 features &= ~NETIF_F_TSO_ECN;
5501 /* Software GSO depends on SG. */
5502 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5503 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5504 features &= ~NETIF_F_GSO;
5507 /* UFO needs SG and checksumming */
5508 if (features & NETIF_F_UFO) {
5509 /* maybe split UFO into V4 and V6? */
5510 if (!((features & NETIF_F_GEN_CSUM) ||
5511 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5512 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5514 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5515 features &= ~NETIF_F_UFO;
5518 if (!(features & NETIF_F_SG)) {
5520 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5521 features &= ~NETIF_F_UFO;
5528 int __netdev_update_features(struct net_device *dev)
5530 netdev_features_t features;
5535 features = netdev_get_wanted_features(dev);
5537 if (dev->netdev_ops->ndo_fix_features)
5538 features = dev->netdev_ops->ndo_fix_features(dev, features);
5540 /* driver might be less strict about feature dependencies */
5541 features = netdev_fix_features(dev, features);
5543 if (dev->features == features)
5546 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5547 &dev->features, &features);
5549 if (dev->netdev_ops->ndo_set_features)
5550 err = dev->netdev_ops->ndo_set_features(dev, features);
5552 if (unlikely(err < 0)) {
5554 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5555 err, &features, &dev->features);
5560 dev->features = features;
5566 * netdev_update_features - recalculate device features
5567 * @dev: the device to check
5569 * Recalculate dev->features set and send notifications if it
5570 * has changed. Should be called after driver or hardware dependent
5571 * conditions might have changed that influence the features.
5573 void netdev_update_features(struct net_device *dev)
5575 if (__netdev_update_features(dev))
5576 netdev_features_change(dev);
5578 EXPORT_SYMBOL(netdev_update_features);
5581 * netdev_change_features - recalculate device features
5582 * @dev: the device to check
5584 * Recalculate dev->features set and send notifications even
5585 * if they have not changed. Should be called instead of
5586 * netdev_update_features() if also dev->vlan_features might
5587 * have changed to allow the changes to be propagated to stacked
5590 void netdev_change_features(struct net_device *dev)
5592 __netdev_update_features(dev);
5593 netdev_features_change(dev);
5595 EXPORT_SYMBOL(netdev_change_features);
5598 * netif_stacked_transfer_operstate - transfer operstate
5599 * @rootdev: the root or lower level device to transfer state from
5600 * @dev: the device to transfer operstate to
5602 * Transfer operational state from root to device. This is normally
5603 * called when a stacking relationship exists between the root
5604 * device and the device(a leaf device).
5606 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5607 struct net_device *dev)
5609 if (rootdev->operstate == IF_OPER_DORMANT)
5610 netif_dormant_on(dev);
5612 netif_dormant_off(dev);
5614 if (netif_carrier_ok(rootdev)) {
5615 if (!netif_carrier_ok(dev))
5616 netif_carrier_on(dev);
5618 if (netif_carrier_ok(dev))
5619 netif_carrier_off(dev);
5622 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5625 static int netif_alloc_rx_queues(struct net_device *dev)
5627 unsigned int i, count = dev->num_rx_queues;
5628 struct netdev_rx_queue *rx;
5632 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5634 pr_err("netdev: Unable to allocate %u rx queues\n", count);
5639 for (i = 0; i < count; i++)
5645 static void netdev_init_one_queue(struct net_device *dev,
5646 struct netdev_queue *queue, void *_unused)
5648 /* Initialize queue lock */
5649 spin_lock_init(&queue->_xmit_lock);
5650 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5651 queue->xmit_lock_owner = -1;
5652 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5655 dql_init(&queue->dql, HZ);
5659 static int netif_alloc_netdev_queues(struct net_device *dev)
5661 unsigned int count = dev->num_tx_queues;
5662 struct netdev_queue *tx;
5666 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5668 pr_err("netdev: Unable to allocate %u tx queues\n", count);
5673 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5674 spin_lock_init(&dev->tx_global_lock);
5680 * register_netdevice - register a network device
5681 * @dev: device to register
5683 * Take a completed network device structure and add it to the kernel
5684 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5685 * chain. 0 is returned on success. A negative errno code is returned
5686 * on a failure to set up the device, or if the name is a duplicate.
5688 * Callers must hold the rtnl semaphore. You may want
5689 * register_netdev() instead of this.
5692 * The locking appears insufficient to guarantee two parallel registers
5693 * will not get the same name.
5696 int register_netdevice(struct net_device *dev)
5699 struct net *net = dev_net(dev);
5701 BUG_ON(dev_boot_phase);
5706 /* When net_device's are persistent, this will be fatal. */
5707 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5710 spin_lock_init(&dev->addr_list_lock);
5711 netdev_set_addr_lockdep_class(dev);
5715 ret = dev_get_valid_name(net, dev, dev->name);
5719 /* Init, if this function is available */
5720 if (dev->netdev_ops->ndo_init) {
5721 ret = dev->netdev_ops->ndo_init(dev);
5731 dev->ifindex = dev_new_index(net);
5732 else if (__dev_get_by_index(net, dev->ifindex))
5735 if (dev->iflink == -1)
5736 dev->iflink = dev->ifindex;
5738 /* Transfer changeable features to wanted_features and enable
5739 * software offloads (GSO and GRO).
5741 dev->hw_features |= NETIF_F_SOFT_FEATURES;
5742 dev->features |= NETIF_F_SOFT_FEATURES;
5743 dev->wanted_features = dev->features & dev->hw_features;
5745 /* Turn on no cache copy if HW is doing checksum */
5746 if (!(dev->flags & IFF_LOOPBACK)) {
5747 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5748 if (dev->features & NETIF_F_ALL_CSUM) {
5749 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5750 dev->features |= NETIF_F_NOCACHE_COPY;
5754 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5756 dev->vlan_features |= NETIF_F_HIGHDMA;
5758 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5759 ret = notifier_to_errno(ret);
5763 ret = netdev_register_kobject(dev);
5766 dev->reg_state = NETREG_REGISTERED;
5768 __netdev_update_features(dev);
5771 * Default initial state at registry is that the
5772 * device is present.
5775 set_bit(__LINK_STATE_PRESENT, &dev->state);
5777 linkwatch_init_dev(dev);
5779 dev_init_scheduler(dev);
5781 list_netdevice(dev);
5782 add_device_randomness(dev->dev_addr, dev->addr_len);
5784 /* Notify protocols, that a new device appeared. */
5785 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5786 ret = notifier_to_errno(ret);
5788 rollback_registered(dev);
5789 dev->reg_state = NETREG_UNREGISTERED;
5792 * Prevent userspace races by waiting until the network
5793 * device is fully setup before sending notifications.
5795 if (!dev->rtnl_link_ops ||
5796 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5797 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5803 if (dev->netdev_ops->ndo_uninit)
5804 dev->netdev_ops->ndo_uninit(dev);
5807 EXPORT_SYMBOL(register_netdevice);
5810 * init_dummy_netdev - init a dummy network device for NAPI
5811 * @dev: device to init
5813 * This takes a network device structure and initialize the minimum
5814 * amount of fields so it can be used to schedule NAPI polls without
5815 * registering a full blown interface. This is to be used by drivers
5816 * that need to tie several hardware interfaces to a single NAPI
5817 * poll scheduler due to HW limitations.
5819 int init_dummy_netdev(struct net_device *dev)
5821 /* Clear everything. Note we don't initialize spinlocks
5822 * are they aren't supposed to be taken by any of the
5823 * NAPI code and this dummy netdev is supposed to be
5824 * only ever used for NAPI polls
5826 memset(dev, 0, sizeof(struct net_device));
5828 /* make sure we BUG if trying to hit standard
5829 * register/unregister code path
5831 dev->reg_state = NETREG_DUMMY;
5833 /* NAPI wants this */
5834 INIT_LIST_HEAD(&dev->napi_list);
5836 /* a dummy interface is started by default */
5837 set_bit(__LINK_STATE_PRESENT, &dev->state);
5838 set_bit(__LINK_STATE_START, &dev->state);
5840 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5841 * because users of this 'device' dont need to change
5847 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5851 * register_netdev - register a network device
5852 * @dev: device to register
5854 * Take a completed network device structure and add it to the kernel
5855 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5856 * chain. 0 is returned on success. A negative errno code is returned
5857 * on a failure to set up the device, or if the name is a duplicate.
5859 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5860 * and expands the device name if you passed a format string to
5863 int register_netdev(struct net_device *dev)
5868 err = register_netdevice(dev);
5872 EXPORT_SYMBOL(register_netdev);
5874 int netdev_refcnt_read(const struct net_device *dev)
5878 for_each_possible_cpu(i)
5879 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5882 EXPORT_SYMBOL(netdev_refcnt_read);
5885 * netdev_wait_allrefs - wait until all references are gone.
5886 * @dev: target net_device
5888 * This is called when unregistering network devices.
5890 * Any protocol or device that holds a reference should register
5891 * for netdevice notification, and cleanup and put back the
5892 * reference if they receive an UNREGISTER event.
5893 * We can get stuck here if buggy protocols don't correctly
5896 static void netdev_wait_allrefs(struct net_device *dev)
5898 unsigned long rebroadcast_time, warning_time;
5901 linkwatch_forget_dev(dev);
5903 rebroadcast_time = warning_time = jiffies;
5904 refcnt = netdev_refcnt_read(dev);
5906 while (refcnt != 0) {
5907 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5910 /* Rebroadcast unregister notification */
5911 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5917 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5918 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5920 /* We must not have linkwatch events
5921 * pending on unregister. If this
5922 * happens, we simply run the queue
5923 * unscheduled, resulting in a noop
5926 linkwatch_run_queue();
5931 rebroadcast_time = jiffies;
5936 refcnt = netdev_refcnt_read(dev);
5938 if (time_after(jiffies, warning_time + 10 * HZ)) {
5939 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5941 warning_time = jiffies;
5950 * register_netdevice(x1);
5951 * register_netdevice(x2);
5953 * unregister_netdevice(y1);
5954 * unregister_netdevice(y2);
5960 * We are invoked by rtnl_unlock().
5961 * This allows us to deal with problems:
5962 * 1) We can delete sysfs objects which invoke hotplug
5963 * without deadlocking with linkwatch via keventd.
5964 * 2) Since we run with the RTNL semaphore not held, we can sleep
5965 * safely in order to wait for the netdev refcnt to drop to zero.
5967 * We must not return until all unregister events added during
5968 * the interval the lock was held have been completed.
5970 void netdev_run_todo(void)
5972 struct list_head list;
5974 /* Snapshot list, allow later requests */
5975 list_replace_init(&net_todo_list, &list);
5980 /* Wait for rcu callbacks to finish before next phase */
5981 if (!list_empty(&list))
5984 while (!list_empty(&list)) {
5985 struct net_device *dev
5986 = list_first_entry(&list, struct net_device, todo_list);
5987 list_del(&dev->todo_list);
5990 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
5993 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5994 pr_err("network todo '%s' but state %d\n",
5995 dev->name, dev->reg_state);
6000 dev->reg_state = NETREG_UNREGISTERED;
6002 on_each_cpu(flush_backlog, dev, 1);
6004 netdev_wait_allrefs(dev);
6007 BUG_ON(netdev_refcnt_read(dev));
6008 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6009 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
6010 WARN_ON(dev->dn_ptr);
6012 if (dev->destructor)
6013 dev->destructor(dev);
6015 /* Free network device */
6016 kobject_put(&dev->dev.kobj);
6020 /* Convert net_device_stats to rtnl_link_stats64. They have the same
6021 * fields in the same order, with only the type differing.
6023 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6024 const struct net_device_stats *netdev_stats)
6026 #if BITS_PER_LONG == 64
6027 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6028 memcpy(stats64, netdev_stats, sizeof(*stats64));
6030 size_t i, n = sizeof(*stats64) / sizeof(u64);
6031 const unsigned long *src = (const unsigned long *)netdev_stats;
6032 u64 *dst = (u64 *)stats64;
6034 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6035 sizeof(*stats64) / sizeof(u64));
6036 for (i = 0; i < n; i++)
6040 EXPORT_SYMBOL(netdev_stats_to_stats64);
6043 * dev_get_stats - get network device statistics
6044 * @dev: device to get statistics from
6045 * @storage: place to store stats
6047 * Get network statistics from device. Return @storage.
6048 * The device driver may provide its own method by setting
6049 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6050 * otherwise the internal statistics structure is used.
6052 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6053 struct rtnl_link_stats64 *storage)
6055 const struct net_device_ops *ops = dev->netdev_ops;
6057 if (ops->ndo_get_stats64) {
6058 memset(storage, 0, sizeof(*storage));
6059 ops->ndo_get_stats64(dev, storage);
6060 } else if (ops->ndo_get_stats) {
6061 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
6063 netdev_stats_to_stats64(storage, &dev->stats);
6065 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
6068 EXPORT_SYMBOL(dev_get_stats);
6070 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6072 struct netdev_queue *queue = dev_ingress_queue(dev);
6074 #ifdef CONFIG_NET_CLS_ACT
6077 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6080 netdev_init_one_queue(dev, queue, NULL);
6081 queue->qdisc = &noop_qdisc;
6082 queue->qdisc_sleeping = &noop_qdisc;
6083 rcu_assign_pointer(dev->ingress_queue, queue);
6088 static const struct ethtool_ops default_ethtool_ops;
6091 * alloc_netdev_mqs - allocate network device
6092 * @sizeof_priv: size of private data to allocate space for
6093 * @name: device name format string
6094 * @setup: callback to initialize device
6095 * @txqs: the number of TX subqueues to allocate
6096 * @rxqs: the number of RX subqueues to allocate
6098 * Allocates a struct net_device with private data area for driver use
6099 * and performs basic initialization. Also allocates subquue structs
6100 * for each queue on the device.
6102 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6103 void (*setup)(struct net_device *),
6104 unsigned int txqs, unsigned int rxqs)
6106 struct net_device *dev;
6108 struct net_device *p;
6110 BUG_ON(strlen(name) >= sizeof(dev->name));
6113 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6119 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6124 alloc_size = sizeof(struct net_device);
6126 /* ensure 32-byte alignment of private area */
6127 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6128 alloc_size += sizeof_priv;
6130 /* ensure 32-byte alignment of whole construct */
6131 alloc_size += NETDEV_ALIGN - 1;
6133 p = kzalloc(alloc_size, GFP_KERNEL);
6135 pr_err("alloc_netdev: Unable to allocate device\n");
6139 dev = PTR_ALIGN(p, NETDEV_ALIGN);
6140 dev->padded = (char *)dev - (char *)p;
6142 dev->pcpu_refcnt = alloc_percpu(int);
6143 if (!dev->pcpu_refcnt)
6146 if (dev_addr_init(dev))
6152 dev_net_set(dev, &init_net);
6154 dev->gso_max_size = GSO_MAX_SIZE;
6155 dev->gso_max_segs = GSO_MAX_SEGS;
6157 INIT_LIST_HEAD(&dev->napi_list);
6158 INIT_LIST_HEAD(&dev->unreg_list);
6159 INIT_LIST_HEAD(&dev->link_watch_list);
6160 dev->priv_flags = IFF_XMIT_DST_RELEASE;
6163 dev->num_tx_queues = txqs;
6164 dev->real_num_tx_queues = txqs;
6165 if (netif_alloc_netdev_queues(dev))
6169 dev->num_rx_queues = rxqs;
6170 dev->real_num_rx_queues = rxqs;
6171 if (netif_alloc_rx_queues(dev))
6175 strcpy(dev->name, name);
6176 dev->group = INIT_NETDEV_GROUP;
6177 if (!dev->ethtool_ops)
6178 dev->ethtool_ops = &default_ethtool_ops;
6186 free_percpu(dev->pcpu_refcnt);
6196 EXPORT_SYMBOL(alloc_netdev_mqs);
6199 * free_netdev - free network device
6202 * This function does the last stage of destroying an allocated device
6203 * interface. The reference to the device object is released.
6204 * If this is the last reference then it will be freed.
6206 void free_netdev(struct net_device *dev)
6208 struct napi_struct *p, *n;
6210 release_net(dev_net(dev));
6217 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6219 /* Flush device addresses */
6220 dev_addr_flush(dev);
6222 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6225 free_percpu(dev->pcpu_refcnt);
6226 dev->pcpu_refcnt = NULL;
6228 /* Compatibility with error handling in drivers */
6229 if (dev->reg_state == NETREG_UNINITIALIZED) {
6230 kfree((char *)dev - dev->padded);
6234 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6235 dev->reg_state = NETREG_RELEASED;
6237 /* will free via device release */
6238 put_device(&dev->dev);
6240 EXPORT_SYMBOL(free_netdev);
6243 * synchronize_net - Synchronize with packet receive processing
6245 * Wait for packets currently being received to be done.
6246 * Does not block later packets from starting.
6248 void synchronize_net(void)
6251 if (rtnl_is_locked())
6252 synchronize_rcu_expedited();
6256 EXPORT_SYMBOL(synchronize_net);
6259 * unregister_netdevice_queue - remove device from the kernel
6263 * This function shuts down a device interface and removes it
6264 * from the kernel tables.
6265 * If head not NULL, device is queued to be unregistered later.
6267 * Callers must hold the rtnl semaphore. You may want
6268 * unregister_netdev() instead of this.
6271 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6276 list_move_tail(&dev->unreg_list, head);
6278 rollback_registered(dev);
6279 /* Finish processing unregister after unlock */
6283 EXPORT_SYMBOL(unregister_netdevice_queue);
6286 * unregister_netdevice_many - unregister many devices
6287 * @head: list of devices
6289 void unregister_netdevice_many(struct list_head *head)
6291 struct net_device *dev;
6293 if (!list_empty(head)) {
6294 rollback_registered_many(head);
6295 list_for_each_entry(dev, head, unreg_list)
6299 EXPORT_SYMBOL(unregister_netdevice_many);
6302 * unregister_netdev - remove device from the kernel
6305 * This function shuts down a device interface and removes it
6306 * from the kernel tables.
6308 * This is just a wrapper for unregister_netdevice that takes
6309 * the rtnl semaphore. In general you want to use this and not
6310 * unregister_netdevice.
6312 void unregister_netdev(struct net_device *dev)
6315 unregister_netdevice(dev);
6318 EXPORT_SYMBOL(unregister_netdev);
6321 * dev_change_net_namespace - move device to different nethost namespace
6323 * @net: network namespace
6324 * @pat: If not NULL name pattern to try if the current device name
6325 * is already taken in the destination network namespace.
6327 * This function shuts down a device interface and moves it
6328 * to a new network namespace. On success 0 is returned, on
6329 * a failure a netagive errno code is returned.
6331 * Callers must hold the rtnl semaphore.
6334 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6340 /* Don't allow namespace local devices to be moved. */
6342 if (dev->features & NETIF_F_NETNS_LOCAL)
6345 /* Ensure the device has been registrered */
6346 if (dev->reg_state != NETREG_REGISTERED)
6349 /* Get out if there is nothing todo */
6351 if (net_eq(dev_net(dev), net))
6354 /* Pick the destination device name, and ensure
6355 * we can use it in the destination network namespace.
6358 if (__dev_get_by_name(net, dev->name)) {
6359 /* We get here if we can't use the current device name */
6362 if (dev_get_valid_name(net, dev, pat) < 0)
6367 * And now a mini version of register_netdevice unregister_netdevice.
6370 /* If device is running close it first. */
6373 /* And unlink it from device chain */
6375 unlist_netdevice(dev);
6379 /* Shutdown queueing discipline. */
6382 /* Notify protocols, that we are about to destroy
6383 this device. They should clean all the things.
6385 Note that dev->reg_state stays at NETREG_REGISTERED.
6386 This is wanted because this way 8021q and macvlan know
6387 the device is just moving and can keep their slaves up.
6389 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6391 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6392 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6395 * Flush the unicast and multicast chains
6400 /* Actually switch the network namespace */
6401 dev_net_set(dev, net);
6403 /* If there is an ifindex conflict assign a new one */
6404 if (__dev_get_by_index(net, dev->ifindex)) {
6405 int iflink = (dev->iflink == dev->ifindex);
6406 dev->ifindex = dev_new_index(net);
6408 dev->iflink = dev->ifindex;
6411 /* Fixup kobjects */
6412 err = device_rename(&dev->dev, dev->name);
6415 /* Add the device back in the hashes */
6416 list_netdevice(dev);
6418 /* Notify protocols, that a new device appeared. */
6419 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6422 * Prevent userspace races by waiting until the network
6423 * device is fully setup before sending notifications.
6425 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6432 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6434 static int dev_cpu_callback(struct notifier_block *nfb,
6435 unsigned long action,
6438 struct sk_buff **list_skb;
6439 struct sk_buff *skb;
6440 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6441 struct softnet_data *sd, *oldsd;
6443 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
6446 local_irq_disable();
6447 cpu = smp_processor_id();
6448 sd = &per_cpu(softnet_data, cpu);
6449 oldsd = &per_cpu(softnet_data, oldcpu);
6451 /* Find end of our completion_queue. */
6452 list_skb = &sd->completion_queue;
6454 list_skb = &(*list_skb)->next;
6455 /* Append completion queue from offline CPU. */
6456 *list_skb = oldsd->completion_queue;
6457 oldsd->completion_queue = NULL;
6459 /* Append output queue from offline CPU. */
6460 if (oldsd->output_queue) {
6461 *sd->output_queue_tailp = oldsd->output_queue;
6462 sd->output_queue_tailp = oldsd->output_queue_tailp;
6463 oldsd->output_queue = NULL;
6464 oldsd->output_queue_tailp = &oldsd->output_queue;
6466 /* Append NAPI poll list from offline CPU. */
6467 if (!list_empty(&oldsd->poll_list)) {
6468 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6469 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6472 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6475 /* Process offline CPU's input_pkt_queue */
6476 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6478 input_queue_head_incr(oldsd);
6480 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6482 input_queue_head_incr(oldsd);
6490 * netdev_increment_features - increment feature set by one
6491 * @all: current feature set
6492 * @one: new feature set
6493 * @mask: mask feature set
6495 * Computes a new feature set after adding a device with feature set
6496 * @one to the master device with current feature set @all. Will not
6497 * enable anything that is off in @mask. Returns the new feature set.
6499 netdev_features_t netdev_increment_features(netdev_features_t all,
6500 netdev_features_t one, netdev_features_t mask)
6502 if (mask & NETIF_F_GEN_CSUM)
6503 mask |= NETIF_F_ALL_CSUM;
6504 mask |= NETIF_F_VLAN_CHALLENGED;
6506 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6507 all &= one | ~NETIF_F_ALL_FOR_ALL;
6509 /* If one device supports hw checksumming, set for all. */
6510 if (all & NETIF_F_GEN_CSUM)
6511 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6515 EXPORT_SYMBOL(netdev_increment_features);
6517 static struct hlist_head *netdev_create_hash(void)
6520 struct hlist_head *hash;
6522 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6524 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6525 INIT_HLIST_HEAD(&hash[i]);
6530 /* Initialize per network namespace state */
6531 static int __net_init netdev_init(struct net *net)
6533 if (net != &init_net)
6534 INIT_LIST_HEAD(&net->dev_base_head);
6536 net->dev_name_head = netdev_create_hash();
6537 if (net->dev_name_head == NULL)
6540 net->dev_index_head = netdev_create_hash();
6541 if (net->dev_index_head == NULL)
6547 kfree(net->dev_name_head);
6553 * netdev_drivername - network driver for the device
6554 * @dev: network device
6556 * Determine network driver for device.
6558 const char *netdev_drivername(const struct net_device *dev)
6560 const struct device_driver *driver;
6561 const struct device *parent;
6562 const char *empty = "";
6564 parent = dev->dev.parent;
6568 driver = parent->driver;
6569 if (driver && driver->name)
6570 return driver->name;
6574 static int __netdev_printk(const char *level, const struct net_device *dev,
6575 struct va_format *vaf)
6579 if (dev && dev->dev.parent) {
6580 r = dev_printk_emit(level[1] - '0',
6583 dev_driver_string(dev->dev.parent),
6584 dev_name(dev->dev.parent),
6585 netdev_name(dev), vaf);
6587 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6589 r = printk("%s(NULL net_device): %pV", level, vaf);
6595 int netdev_printk(const char *level, const struct net_device *dev,
6596 const char *format, ...)
6598 struct va_format vaf;
6602 va_start(args, format);
6607 r = __netdev_printk(level, dev, &vaf);
6613 EXPORT_SYMBOL(netdev_printk);
6615 #define define_netdev_printk_level(func, level) \
6616 int func(const struct net_device *dev, const char *fmt, ...) \
6619 struct va_format vaf; \
6622 va_start(args, fmt); \
6627 r = __netdev_printk(level, dev, &vaf); \
6633 EXPORT_SYMBOL(func);
6635 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6636 define_netdev_printk_level(netdev_alert, KERN_ALERT);
6637 define_netdev_printk_level(netdev_crit, KERN_CRIT);
6638 define_netdev_printk_level(netdev_err, KERN_ERR);
6639 define_netdev_printk_level(netdev_warn, KERN_WARNING);
6640 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6641 define_netdev_printk_level(netdev_info, KERN_INFO);
6643 static void __net_exit netdev_exit(struct net *net)
6645 kfree(net->dev_name_head);
6646 kfree(net->dev_index_head);
6649 static struct pernet_operations __net_initdata netdev_net_ops = {
6650 .init = netdev_init,
6651 .exit = netdev_exit,
6654 static void __net_exit default_device_exit(struct net *net)
6656 struct net_device *dev, *aux;
6658 * Push all migratable network devices back to the
6659 * initial network namespace
6662 for_each_netdev_safe(net, dev, aux) {
6664 char fb_name[IFNAMSIZ];
6666 /* Ignore unmoveable devices (i.e. loopback) */
6667 if (dev->features & NETIF_F_NETNS_LOCAL)
6670 /* Leave virtual devices for the generic cleanup */
6671 if (dev->rtnl_link_ops)
6674 /* Push remaining network devices to init_net */
6675 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6676 err = dev_change_net_namespace(dev, &init_net, fb_name);
6678 pr_emerg("%s: failed to move %s to init_net: %d\n",
6679 __func__, dev->name, err);
6686 static void __net_exit default_device_exit_batch(struct list_head *net_list)
6688 /* At exit all network devices most be removed from a network
6689 * namespace. Do this in the reverse order of registration.
6690 * Do this across as many network namespaces as possible to
6691 * improve batching efficiency.
6693 struct net_device *dev;
6695 LIST_HEAD(dev_kill_list);
6698 list_for_each_entry(net, net_list, exit_list) {
6699 for_each_netdev_reverse(net, dev) {
6700 if (dev->rtnl_link_ops)
6701 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6703 unregister_netdevice_queue(dev, &dev_kill_list);
6706 unregister_netdevice_many(&dev_kill_list);
6707 list_del(&dev_kill_list);
6711 static struct pernet_operations __net_initdata default_device_ops = {
6712 .exit = default_device_exit,
6713 .exit_batch = default_device_exit_batch,
6717 * Initialize the DEV module. At boot time this walks the device list and
6718 * unhooks any devices that fail to initialise (normally hardware not
6719 * present) and leaves us with a valid list of present and active devices.
6724 * This is called single threaded during boot, so no need
6725 * to take the rtnl semaphore.
6727 static int __init net_dev_init(void)
6729 int i, rc = -ENOMEM;
6731 BUG_ON(!dev_boot_phase);
6733 if (dev_proc_init())
6736 if (netdev_kobject_init())
6739 INIT_LIST_HEAD(&ptype_all);
6740 for (i = 0; i < PTYPE_HASH_SIZE; i++)
6741 INIT_LIST_HEAD(&ptype_base[i]);
6743 INIT_LIST_HEAD(&offload_base);
6745 if (register_pernet_subsys(&netdev_net_ops))
6749 * Initialise the packet receive queues.
6752 for_each_possible_cpu(i) {
6753 struct softnet_data *sd = &per_cpu(softnet_data, i);
6755 memset(sd, 0, sizeof(*sd));
6756 skb_queue_head_init(&sd->input_pkt_queue);
6757 skb_queue_head_init(&sd->process_queue);
6758 sd->completion_queue = NULL;
6759 INIT_LIST_HEAD(&sd->poll_list);
6760 sd->output_queue = NULL;
6761 sd->output_queue_tailp = &sd->output_queue;
6763 sd->csd.func = rps_trigger_softirq;
6769 sd->backlog.poll = process_backlog;
6770 sd->backlog.weight = weight_p;
6771 sd->backlog.gro_list = NULL;
6772 sd->backlog.gro_count = 0;
6777 /* The loopback device is special if any other network devices
6778 * is present in a network namespace the loopback device must
6779 * be present. Since we now dynamically allocate and free the
6780 * loopback device ensure this invariant is maintained by
6781 * keeping the loopback device as the first device on the
6782 * list of network devices. Ensuring the loopback devices
6783 * is the first device that appears and the last network device
6786 if (register_pernet_device(&loopback_net_ops))
6789 if (register_pernet_device(&default_device_ops))
6792 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6793 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6795 hotcpu_notifier(dev_cpu_callback, 0);
6803 subsys_initcall(net_dev_init);
6805 static int __init initialize_hashrnd(void)
6807 get_random_bytes(&hashrnd, sizeof(hashrnd));
6811 late_initcall_sync(initialize_hashrnd);