2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
121 #include <linux/ipv6.h>
122 #include <linux/in.h>
123 #include <linux/jhash.h>
124 #include <linux/random.h>
125 #include <trace/events/napi.h>
126 #include <trace/events/net.h>
127 #include <trace/events/skb.h>
128 #include <linux/pci.h>
129 #include <linux/inetdevice.h>
130 #include <linux/cpu_rmap.h>
131 #include <linux/static_key.h>
132 #include <linux/hashtable.h>
133 #include <linux/vmalloc.h>
134 #include <linux/if_macvlan.h>
135 #include <linux/errqueue.h>
137 #include "net-sysfs.h"
139 /* Instead of increasing this, you should create a hash table. */
140 #define MAX_GRO_SKBS 8
142 /* This should be increased if a protocol with a bigger head is added. */
143 #define GRO_MAX_HEAD (MAX_HEADER + 128)
145 static DEFINE_SPINLOCK(ptype_lock);
146 static DEFINE_SPINLOCK(offload_lock);
147 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
148 struct list_head ptype_all __read_mostly; /* Taps */
149 static struct list_head offload_base __read_mostly;
151 static int netif_rx_internal(struct sk_buff *skb);
152 static int call_netdevice_notifiers_info(unsigned long val,
153 struct net_device *dev,
154 struct netdev_notifier_info *info);
157 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
160 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
162 * Writers must hold the rtnl semaphore while they loop through the
163 * dev_base_head list, and hold dev_base_lock for writing when they do the
164 * actual updates. This allows pure readers to access the list even
165 * while a writer is preparing to update it.
167 * To put it another way, dev_base_lock is held for writing only to
168 * protect against pure readers; the rtnl semaphore provides the
169 * protection against other writers.
171 * See, for example usages, register_netdevice() and
172 * unregister_netdevice(), which must be called with the rtnl
175 DEFINE_RWLOCK(dev_base_lock);
176 EXPORT_SYMBOL(dev_base_lock);
178 /* protects napi_hash addition/deletion and napi_gen_id */
179 static DEFINE_SPINLOCK(napi_hash_lock);
181 static unsigned int napi_gen_id;
182 static DEFINE_HASHTABLE(napi_hash, 8);
184 static seqcount_t devnet_rename_seq;
186 static inline void dev_base_seq_inc(struct net *net)
188 while (++net->dev_base_seq == 0);
191 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
193 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
195 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
198 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
200 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
203 static inline void rps_lock(struct softnet_data *sd)
206 spin_lock(&sd->input_pkt_queue.lock);
210 static inline void rps_unlock(struct softnet_data *sd)
213 spin_unlock(&sd->input_pkt_queue.lock);
217 /* Device list insertion */
218 static void list_netdevice(struct net_device *dev)
220 struct net *net = dev_net(dev);
224 write_lock_bh(&dev_base_lock);
225 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
226 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
227 hlist_add_head_rcu(&dev->index_hlist,
228 dev_index_hash(net, dev->ifindex));
229 write_unlock_bh(&dev_base_lock);
231 dev_base_seq_inc(net);
234 /* Device list removal
235 * caller must respect a RCU grace period before freeing/reusing dev
237 static void unlist_netdevice(struct net_device *dev)
241 /* Unlink dev from the device chain */
242 write_lock_bh(&dev_base_lock);
243 list_del_rcu(&dev->dev_list);
244 hlist_del_rcu(&dev->name_hlist);
245 hlist_del_rcu(&dev->index_hlist);
246 write_unlock_bh(&dev_base_lock);
248 dev_base_seq_inc(dev_net(dev));
255 static RAW_NOTIFIER_HEAD(netdev_chain);
258 * Device drivers call our routines to queue packets here. We empty the
259 * queue in the local softnet handler.
262 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
263 EXPORT_PER_CPU_SYMBOL(softnet_data);
265 #ifdef CONFIG_LOCKDEP
267 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
268 * according to dev->type
270 static const unsigned short netdev_lock_type[] =
271 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
272 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
273 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
274 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
275 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
276 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
277 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
278 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
279 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
280 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
281 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
282 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
283 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
284 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
285 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
287 static const char *const netdev_lock_name[] =
288 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
289 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
290 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
291 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
292 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
293 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
294 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
295 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
296 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
297 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
298 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
299 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
300 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
301 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
302 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
304 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
305 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
307 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
311 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
312 if (netdev_lock_type[i] == dev_type)
314 /* the last key is used by default */
315 return ARRAY_SIZE(netdev_lock_type) - 1;
318 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
319 unsigned short dev_type)
323 i = netdev_lock_pos(dev_type);
324 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
325 netdev_lock_name[i]);
328 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
332 i = netdev_lock_pos(dev->type);
333 lockdep_set_class_and_name(&dev->addr_list_lock,
334 &netdev_addr_lock_key[i],
335 netdev_lock_name[i]);
338 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
339 unsigned short dev_type)
342 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
347 /*******************************************************************************
349 Protocol management and registration routines
351 *******************************************************************************/
354 * Add a protocol ID to the list. Now that the input handler is
355 * smarter we can dispense with all the messy stuff that used to be
358 * BEWARE!!! Protocol handlers, mangling input packets,
359 * MUST BE last in hash buckets and checking protocol handlers
360 * MUST start from promiscuous ptype_all chain in net_bh.
361 * It is true now, do not change it.
362 * Explanation follows: if protocol handler, mangling packet, will
363 * be the first on list, it is not able to sense, that packet
364 * is cloned and should be copied-on-write, so that it will
365 * change it and subsequent readers will get broken packet.
369 static inline struct list_head *ptype_head(const struct packet_type *pt)
371 if (pt->type == htons(ETH_P_ALL))
374 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
378 * dev_add_pack - add packet handler
379 * @pt: packet type declaration
381 * Add a protocol handler to the networking stack. The passed &packet_type
382 * is linked into kernel lists and may not be freed until it has been
383 * removed from the kernel lists.
385 * This call does not sleep therefore it can not
386 * guarantee all CPU's that are in middle of receiving packets
387 * will see the new packet type (until the next received packet).
390 void dev_add_pack(struct packet_type *pt)
392 struct list_head *head = ptype_head(pt);
394 spin_lock(&ptype_lock);
395 list_add_rcu(&pt->list, head);
396 spin_unlock(&ptype_lock);
398 EXPORT_SYMBOL(dev_add_pack);
401 * __dev_remove_pack - remove packet handler
402 * @pt: packet type declaration
404 * Remove a protocol handler that was previously added to the kernel
405 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
406 * from the kernel lists and can be freed or reused once this function
409 * The packet type might still be in use by receivers
410 * and must not be freed until after all the CPU's have gone
411 * through a quiescent state.
413 void __dev_remove_pack(struct packet_type *pt)
415 struct list_head *head = ptype_head(pt);
416 struct packet_type *pt1;
418 spin_lock(&ptype_lock);
420 list_for_each_entry(pt1, head, list) {
422 list_del_rcu(&pt->list);
427 pr_warn("dev_remove_pack: %p not found\n", pt);
429 spin_unlock(&ptype_lock);
431 EXPORT_SYMBOL(__dev_remove_pack);
434 * dev_remove_pack - remove packet handler
435 * @pt: packet type declaration
437 * Remove a protocol handler that was previously added to the kernel
438 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
439 * from the kernel lists and can be freed or reused once this function
442 * This call sleeps to guarantee that no CPU is looking at the packet
445 void dev_remove_pack(struct packet_type *pt)
447 __dev_remove_pack(pt);
451 EXPORT_SYMBOL(dev_remove_pack);
455 * dev_add_offload - register offload handlers
456 * @po: protocol offload declaration
458 * Add protocol offload handlers to the networking stack. The passed
459 * &proto_offload is linked into kernel lists and may not be freed until
460 * it has been removed from the kernel lists.
462 * This call does not sleep therefore it can not
463 * guarantee all CPU's that are in middle of receiving packets
464 * will see the new offload handlers (until the next received packet).
466 void dev_add_offload(struct packet_offload *po)
468 struct list_head *head = &offload_base;
470 spin_lock(&offload_lock);
471 list_add_rcu(&po->list, head);
472 spin_unlock(&offload_lock);
474 EXPORT_SYMBOL(dev_add_offload);
477 * __dev_remove_offload - remove offload handler
478 * @po: packet offload declaration
480 * Remove a protocol offload handler that was previously added to the
481 * kernel offload handlers by dev_add_offload(). The passed &offload_type
482 * is removed from the kernel lists and can be freed or reused once this
485 * The packet type might still be in use by receivers
486 * and must not be freed until after all the CPU's have gone
487 * through a quiescent state.
489 static void __dev_remove_offload(struct packet_offload *po)
491 struct list_head *head = &offload_base;
492 struct packet_offload *po1;
494 spin_lock(&offload_lock);
496 list_for_each_entry(po1, head, list) {
498 list_del_rcu(&po->list);
503 pr_warn("dev_remove_offload: %p not found\n", po);
505 spin_unlock(&offload_lock);
509 * dev_remove_offload - remove packet offload handler
510 * @po: packet offload declaration
512 * Remove a packet offload handler that was previously added to the kernel
513 * offload handlers by dev_add_offload(). The passed &offload_type is
514 * removed from the kernel lists and can be freed or reused once this
517 * This call sleeps to guarantee that no CPU is looking at the packet
520 void dev_remove_offload(struct packet_offload *po)
522 __dev_remove_offload(po);
526 EXPORT_SYMBOL(dev_remove_offload);
528 /******************************************************************************
530 Device Boot-time Settings Routines
532 *******************************************************************************/
534 /* Boot time configuration table */
535 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
538 * netdev_boot_setup_add - add new setup entry
539 * @name: name of the device
540 * @map: configured settings for the device
542 * Adds new setup entry to the dev_boot_setup list. The function
543 * returns 0 on error and 1 on success. This is a generic routine to
546 static int netdev_boot_setup_add(char *name, struct ifmap *map)
548 struct netdev_boot_setup *s;
552 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
553 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
554 memset(s[i].name, 0, sizeof(s[i].name));
555 strlcpy(s[i].name, name, IFNAMSIZ);
556 memcpy(&s[i].map, map, sizeof(s[i].map));
561 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
565 * netdev_boot_setup_check - check boot time settings
566 * @dev: the netdevice
568 * Check boot time settings for the device.
569 * The found settings are set for the device to be used
570 * later in the device probing.
571 * Returns 0 if no settings found, 1 if they are.
573 int netdev_boot_setup_check(struct net_device *dev)
575 struct netdev_boot_setup *s = dev_boot_setup;
578 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
579 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
580 !strcmp(dev->name, s[i].name)) {
581 dev->irq = s[i].map.irq;
582 dev->base_addr = s[i].map.base_addr;
583 dev->mem_start = s[i].map.mem_start;
584 dev->mem_end = s[i].map.mem_end;
590 EXPORT_SYMBOL(netdev_boot_setup_check);
594 * netdev_boot_base - get address from boot time settings
595 * @prefix: prefix for network device
596 * @unit: id for network device
598 * Check boot time settings for the base address of device.
599 * The found settings are set for the device to be used
600 * later in the device probing.
601 * Returns 0 if no settings found.
603 unsigned long netdev_boot_base(const char *prefix, int unit)
605 const struct netdev_boot_setup *s = dev_boot_setup;
609 sprintf(name, "%s%d", prefix, unit);
612 * If device already registered then return base of 1
613 * to indicate not to probe for this interface
615 if (__dev_get_by_name(&init_net, name))
618 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
619 if (!strcmp(name, s[i].name))
620 return s[i].map.base_addr;
625 * Saves at boot time configured settings for any netdevice.
627 int __init netdev_boot_setup(char *str)
632 str = get_options(str, ARRAY_SIZE(ints), ints);
637 memset(&map, 0, sizeof(map));
641 map.base_addr = ints[2];
643 map.mem_start = ints[3];
645 map.mem_end = ints[4];
647 /* Add new entry to the list */
648 return netdev_boot_setup_add(str, &map);
651 __setup("netdev=", netdev_boot_setup);
653 /*******************************************************************************
655 Device Interface Subroutines
657 *******************************************************************************/
660 * __dev_get_by_name - find a device by its name
661 * @net: the applicable net namespace
662 * @name: name to find
664 * Find an interface by name. Must be called under RTNL semaphore
665 * or @dev_base_lock. If the name is found a pointer to the device
666 * is returned. If the name is not found then %NULL is returned. The
667 * reference counters are not incremented so the caller must be
668 * careful with locks.
671 struct net_device *__dev_get_by_name(struct net *net, const char *name)
673 struct net_device *dev;
674 struct hlist_head *head = dev_name_hash(net, name);
676 hlist_for_each_entry(dev, head, name_hlist)
677 if (!strncmp(dev->name, name, IFNAMSIZ))
682 EXPORT_SYMBOL(__dev_get_by_name);
685 * dev_get_by_name_rcu - find a device by its name
686 * @net: the applicable net namespace
687 * @name: name to find
689 * Find an interface by name.
690 * If the name is found a pointer to the device is returned.
691 * If the name is not found then %NULL is returned.
692 * The reference counters are not incremented so the caller must be
693 * careful with locks. The caller must hold RCU lock.
696 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
698 struct net_device *dev;
699 struct hlist_head *head = dev_name_hash(net, name);
701 hlist_for_each_entry_rcu(dev, head, name_hlist)
702 if (!strncmp(dev->name, name, IFNAMSIZ))
707 EXPORT_SYMBOL(dev_get_by_name_rcu);
710 * dev_get_by_name - find a device by its name
711 * @net: the applicable net namespace
712 * @name: name to find
714 * Find an interface by name. This can be called from any
715 * context and does its own locking. The returned handle has
716 * the usage count incremented and the caller must use dev_put() to
717 * release it when it is no longer needed. %NULL is returned if no
718 * matching device is found.
721 struct net_device *dev_get_by_name(struct net *net, const char *name)
723 struct net_device *dev;
726 dev = dev_get_by_name_rcu(net, name);
732 EXPORT_SYMBOL(dev_get_by_name);
735 * __dev_get_by_index - find a device by its ifindex
736 * @net: the applicable net namespace
737 * @ifindex: index of device
739 * Search for an interface by index. Returns %NULL if the device
740 * is not found or a pointer to the device. The device has not
741 * had its reference counter increased so the caller must be careful
742 * about locking. The caller must hold either the RTNL semaphore
746 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
748 struct net_device *dev;
749 struct hlist_head *head = dev_index_hash(net, ifindex);
751 hlist_for_each_entry(dev, head, index_hlist)
752 if (dev->ifindex == ifindex)
757 EXPORT_SYMBOL(__dev_get_by_index);
760 * dev_get_by_index_rcu - find a device by its ifindex
761 * @net: the applicable net namespace
762 * @ifindex: index of device
764 * Search for an interface by index. Returns %NULL if the device
765 * is not found or a pointer to the device. The device has not
766 * had its reference counter increased so the caller must be careful
767 * about locking. The caller must hold RCU lock.
770 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
772 struct net_device *dev;
773 struct hlist_head *head = dev_index_hash(net, ifindex);
775 hlist_for_each_entry_rcu(dev, head, index_hlist)
776 if (dev->ifindex == ifindex)
781 EXPORT_SYMBOL(dev_get_by_index_rcu);
785 * dev_get_by_index - find a device by its ifindex
786 * @net: the applicable net namespace
787 * @ifindex: index of device
789 * Search for an interface by index. Returns NULL if the device
790 * is not found or a pointer to the device. The device returned has
791 * had a reference added and the pointer is safe until the user calls
792 * dev_put to indicate they have finished with it.
795 struct net_device *dev_get_by_index(struct net *net, int ifindex)
797 struct net_device *dev;
800 dev = dev_get_by_index_rcu(net, ifindex);
806 EXPORT_SYMBOL(dev_get_by_index);
809 * netdev_get_name - get a netdevice name, knowing its ifindex.
810 * @net: network namespace
811 * @name: a pointer to the buffer where the name will be stored.
812 * @ifindex: the ifindex of the interface to get the name from.
814 * The use of raw_seqcount_begin() and cond_resched() before
815 * retrying is required as we want to give the writers a chance
816 * to complete when CONFIG_PREEMPT is not set.
818 int netdev_get_name(struct net *net, char *name, int ifindex)
820 struct net_device *dev;
824 seq = raw_seqcount_begin(&devnet_rename_seq);
826 dev = dev_get_by_index_rcu(net, ifindex);
832 strcpy(name, dev->name);
834 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
843 * dev_getbyhwaddr_rcu - find a device by its hardware address
844 * @net: the applicable net namespace
845 * @type: media type of device
846 * @ha: hardware address
848 * Search for an interface by MAC address. Returns NULL if the device
849 * is not found or a pointer to the device.
850 * The caller must hold RCU or RTNL.
851 * The returned device has not had its ref count increased
852 * and the caller must therefore be careful about locking
856 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
859 struct net_device *dev;
861 for_each_netdev_rcu(net, dev)
862 if (dev->type == type &&
863 !memcmp(dev->dev_addr, ha, dev->addr_len))
868 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
870 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
872 struct net_device *dev;
875 for_each_netdev(net, dev)
876 if (dev->type == type)
881 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
883 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
885 struct net_device *dev, *ret = NULL;
888 for_each_netdev_rcu(net, dev)
889 if (dev->type == type) {
897 EXPORT_SYMBOL(dev_getfirstbyhwtype);
900 * dev_get_by_flags_rcu - find any device with given flags
901 * @net: the applicable net namespace
902 * @if_flags: IFF_* values
903 * @mask: bitmask of bits in if_flags to check
905 * Search for any interface with the given flags. Returns NULL if a device
906 * is not found or a pointer to the device. Must be called inside
907 * rcu_read_lock(), and result refcount is unchanged.
910 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
913 struct net_device *dev, *ret;
916 for_each_netdev_rcu(net, dev) {
917 if (((dev->flags ^ if_flags) & mask) == 0) {
924 EXPORT_SYMBOL(dev_get_by_flags_rcu);
927 * dev_valid_name - check if name is okay for network device
930 * Network device names need to be valid file names to
931 * to allow sysfs to work. We also disallow any kind of
934 bool dev_valid_name(const char *name)
938 if (strlen(name) >= IFNAMSIZ)
940 if (!strcmp(name, ".") || !strcmp(name, ".."))
944 if (*name == '/' || isspace(*name))
950 EXPORT_SYMBOL(dev_valid_name);
953 * __dev_alloc_name - allocate a name for a device
954 * @net: network namespace to allocate the device name in
955 * @name: name format string
956 * @buf: scratch buffer and result name string
958 * Passed a format string - eg "lt%d" it will try and find a suitable
959 * id. It scans list of devices to build up a free map, then chooses
960 * the first empty slot. The caller must hold the dev_base or rtnl lock
961 * while allocating the name and adding the device in order to avoid
963 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
964 * Returns the number of the unit assigned or a negative errno code.
967 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
971 const int max_netdevices = 8*PAGE_SIZE;
972 unsigned long *inuse;
973 struct net_device *d;
975 p = strnchr(name, IFNAMSIZ-1, '%');
978 * Verify the string as this thing may have come from
979 * the user. There must be either one "%d" and no other "%"
982 if (p[1] != 'd' || strchr(p + 2, '%'))
985 /* Use one page as a bit array of possible slots */
986 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
990 for_each_netdev(net, d) {
991 if (!sscanf(d->name, name, &i))
993 if (i < 0 || i >= max_netdevices)
996 /* avoid cases where sscanf is not exact inverse of printf */
997 snprintf(buf, IFNAMSIZ, name, i);
998 if (!strncmp(buf, d->name, IFNAMSIZ))
1002 i = find_first_zero_bit(inuse, max_netdevices);
1003 free_page((unsigned long) inuse);
1007 snprintf(buf, IFNAMSIZ, name, i);
1008 if (!__dev_get_by_name(net, buf))
1011 /* It is possible to run out of possible slots
1012 * when the name is long and there isn't enough space left
1013 * for the digits, or if all bits are used.
1019 * dev_alloc_name - allocate a name for a device
1021 * @name: name format string
1023 * Passed a format string - eg "lt%d" it will try and find a suitable
1024 * id. It scans list of devices to build up a free map, then chooses
1025 * the first empty slot. The caller must hold the dev_base or rtnl lock
1026 * while allocating the name and adding the device in order to avoid
1028 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1029 * Returns the number of the unit assigned or a negative errno code.
1032 int dev_alloc_name(struct net_device *dev, const char *name)
1038 BUG_ON(!dev_net(dev));
1040 ret = __dev_alloc_name(net, name, buf);
1042 strlcpy(dev->name, buf, IFNAMSIZ);
1045 EXPORT_SYMBOL(dev_alloc_name);
1047 static int dev_alloc_name_ns(struct net *net,
1048 struct net_device *dev,
1054 ret = __dev_alloc_name(net, name, buf);
1056 strlcpy(dev->name, buf, IFNAMSIZ);
1060 static int dev_get_valid_name(struct net *net,
1061 struct net_device *dev,
1066 if (!dev_valid_name(name))
1069 if (strchr(name, '%'))
1070 return dev_alloc_name_ns(net, dev, name);
1071 else if (__dev_get_by_name(net, name))
1073 else if (dev->name != name)
1074 strlcpy(dev->name, name, IFNAMSIZ);
1080 * dev_change_name - change name of a device
1082 * @newname: name (or format string) must be at least IFNAMSIZ
1084 * Change name of a device, can pass format strings "eth%d".
1087 int dev_change_name(struct net_device *dev, const char *newname)
1089 unsigned char old_assign_type;
1090 char oldname[IFNAMSIZ];
1096 BUG_ON(!dev_net(dev));
1099 if (dev->flags & IFF_UP)
1102 write_seqcount_begin(&devnet_rename_seq);
1104 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1105 write_seqcount_end(&devnet_rename_seq);
1109 memcpy(oldname, dev->name, IFNAMSIZ);
1111 err = dev_get_valid_name(net, dev, newname);
1113 write_seqcount_end(&devnet_rename_seq);
1117 if (oldname[0] && !strchr(oldname, '%'))
1118 netdev_info(dev, "renamed from %s\n", oldname);
1120 old_assign_type = dev->name_assign_type;
1121 dev->name_assign_type = NET_NAME_RENAMED;
1124 ret = device_rename(&dev->dev, dev->name);
1126 memcpy(dev->name, oldname, IFNAMSIZ);
1127 dev->name_assign_type = old_assign_type;
1128 write_seqcount_end(&devnet_rename_seq);
1132 write_seqcount_end(&devnet_rename_seq);
1134 netdev_adjacent_rename_links(dev, oldname);
1136 write_lock_bh(&dev_base_lock);
1137 hlist_del_rcu(&dev->name_hlist);
1138 write_unlock_bh(&dev_base_lock);
1142 write_lock_bh(&dev_base_lock);
1143 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1144 write_unlock_bh(&dev_base_lock);
1146 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1147 ret = notifier_to_errno(ret);
1150 /* err >= 0 after dev_alloc_name() or stores the first errno */
1153 write_seqcount_begin(&devnet_rename_seq);
1154 memcpy(dev->name, oldname, IFNAMSIZ);
1155 memcpy(oldname, newname, IFNAMSIZ);
1156 dev->name_assign_type = old_assign_type;
1157 old_assign_type = NET_NAME_RENAMED;
1160 pr_err("%s: name change rollback failed: %d\n",
1169 * dev_set_alias - change ifalias of a device
1171 * @alias: name up to IFALIASZ
1172 * @len: limit of bytes to copy from info
1174 * Set ifalias for a device,
1176 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1182 if (len >= IFALIASZ)
1186 kfree(dev->ifalias);
1187 dev->ifalias = NULL;
1191 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1194 dev->ifalias = new_ifalias;
1196 strlcpy(dev->ifalias, alias, len+1);
1202 * netdev_features_change - device changes features
1203 * @dev: device to cause notification
1205 * Called to indicate a device has changed features.
1207 void netdev_features_change(struct net_device *dev)
1209 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1211 EXPORT_SYMBOL(netdev_features_change);
1214 * netdev_state_change - device changes state
1215 * @dev: device to cause notification
1217 * Called to indicate a device has changed state. This function calls
1218 * the notifier chains for netdev_chain and sends a NEWLINK message
1219 * to the routing socket.
1221 void netdev_state_change(struct net_device *dev)
1223 if (dev->flags & IFF_UP) {
1224 struct netdev_notifier_change_info change_info;
1226 change_info.flags_changed = 0;
1227 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1229 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1232 EXPORT_SYMBOL(netdev_state_change);
1235 * netdev_notify_peers - notify network peers about existence of @dev
1236 * @dev: network device
1238 * Generate traffic such that interested network peers are aware of
1239 * @dev, such as by generating a gratuitous ARP. This may be used when
1240 * a device wants to inform the rest of the network about some sort of
1241 * reconfiguration such as a failover event or virtual machine
1244 void netdev_notify_peers(struct net_device *dev)
1247 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1250 EXPORT_SYMBOL(netdev_notify_peers);
1252 static int __dev_open(struct net_device *dev)
1254 const struct net_device_ops *ops = dev->netdev_ops;
1259 if (!netif_device_present(dev))
1262 /* Block netpoll from trying to do any rx path servicing.
1263 * If we don't do this there is a chance ndo_poll_controller
1264 * or ndo_poll may be running while we open the device
1266 netpoll_poll_disable(dev);
1268 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1269 ret = notifier_to_errno(ret);
1273 set_bit(__LINK_STATE_START, &dev->state);
1275 if (ops->ndo_validate_addr)
1276 ret = ops->ndo_validate_addr(dev);
1278 if (!ret && ops->ndo_open)
1279 ret = ops->ndo_open(dev);
1281 netpoll_poll_enable(dev);
1284 clear_bit(__LINK_STATE_START, &dev->state);
1286 dev->flags |= IFF_UP;
1287 dev_set_rx_mode(dev);
1289 add_device_randomness(dev->dev_addr, dev->addr_len);
1296 * dev_open - prepare an interface for use.
1297 * @dev: device to open
1299 * Takes a device from down to up state. The device's private open
1300 * function is invoked and then the multicast lists are loaded. Finally
1301 * the device is moved into the up state and a %NETDEV_UP message is
1302 * sent to the netdev notifier chain.
1304 * Calling this function on an active interface is a nop. On a failure
1305 * a negative errno code is returned.
1307 int dev_open(struct net_device *dev)
1311 if (dev->flags & IFF_UP)
1314 ret = __dev_open(dev);
1318 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1319 call_netdevice_notifiers(NETDEV_UP, dev);
1323 EXPORT_SYMBOL(dev_open);
1325 static int __dev_close_many(struct list_head *head)
1327 struct net_device *dev;
1332 list_for_each_entry(dev, head, close_list) {
1333 /* Temporarily disable netpoll until the interface is down */
1334 netpoll_poll_disable(dev);
1336 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1338 clear_bit(__LINK_STATE_START, &dev->state);
1340 /* Synchronize to scheduled poll. We cannot touch poll list, it
1341 * can be even on different cpu. So just clear netif_running().
1343 * dev->stop() will invoke napi_disable() on all of it's
1344 * napi_struct instances on this device.
1346 smp_mb__after_atomic(); /* Commit netif_running(). */
1349 dev_deactivate_many(head);
1351 list_for_each_entry(dev, head, close_list) {
1352 const struct net_device_ops *ops = dev->netdev_ops;
1355 * Call the device specific close. This cannot fail.
1356 * Only if device is UP
1358 * We allow it to be called even after a DETACH hot-plug
1364 dev->flags &= ~IFF_UP;
1365 netpoll_poll_enable(dev);
1371 static int __dev_close(struct net_device *dev)
1376 list_add(&dev->close_list, &single);
1377 retval = __dev_close_many(&single);
1383 static int dev_close_many(struct list_head *head)
1385 struct net_device *dev, *tmp;
1387 /* Remove the devices that don't need to be closed */
1388 list_for_each_entry_safe(dev, tmp, head, close_list)
1389 if (!(dev->flags & IFF_UP))
1390 list_del_init(&dev->close_list);
1392 __dev_close_many(head);
1394 list_for_each_entry_safe(dev, tmp, head, close_list) {
1395 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1396 call_netdevice_notifiers(NETDEV_DOWN, dev);
1397 list_del_init(&dev->close_list);
1404 * dev_close - shutdown an interface.
1405 * @dev: device to shutdown
1407 * This function moves an active device into down state. A
1408 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1409 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1412 int dev_close(struct net_device *dev)
1414 if (dev->flags & IFF_UP) {
1417 list_add(&dev->close_list, &single);
1418 dev_close_many(&single);
1423 EXPORT_SYMBOL(dev_close);
1427 * dev_disable_lro - disable Large Receive Offload on a device
1430 * Disable Large Receive Offload (LRO) on a net device. Must be
1431 * called under RTNL. This is needed if received packets may be
1432 * forwarded to another interface.
1434 void dev_disable_lro(struct net_device *dev)
1437 * If we're trying to disable lro on a vlan device
1438 * use the underlying physical device instead
1440 if (is_vlan_dev(dev))
1441 dev = vlan_dev_real_dev(dev);
1443 /* the same for macvlan devices */
1444 if (netif_is_macvlan(dev))
1445 dev = macvlan_dev_real_dev(dev);
1447 dev->wanted_features &= ~NETIF_F_LRO;
1448 netdev_update_features(dev);
1450 if (unlikely(dev->features & NETIF_F_LRO))
1451 netdev_WARN(dev, "failed to disable LRO!\n");
1453 EXPORT_SYMBOL(dev_disable_lro);
1455 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1456 struct net_device *dev)
1458 struct netdev_notifier_info info;
1460 netdev_notifier_info_init(&info, dev);
1461 return nb->notifier_call(nb, val, &info);
1464 static int dev_boot_phase = 1;
1467 * register_netdevice_notifier - register a network notifier block
1470 * Register a notifier to be called when network device events occur.
1471 * The notifier passed is linked into the kernel structures and must
1472 * not be reused until it has been unregistered. A negative errno code
1473 * is returned on a failure.
1475 * When registered all registration and up events are replayed
1476 * to the new notifier to allow device to have a race free
1477 * view of the network device list.
1480 int register_netdevice_notifier(struct notifier_block *nb)
1482 struct net_device *dev;
1483 struct net_device *last;
1488 err = raw_notifier_chain_register(&netdev_chain, nb);
1494 for_each_netdev(net, dev) {
1495 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1496 err = notifier_to_errno(err);
1500 if (!(dev->flags & IFF_UP))
1503 call_netdevice_notifier(nb, NETDEV_UP, dev);
1514 for_each_netdev(net, dev) {
1518 if (dev->flags & IFF_UP) {
1519 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1521 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1523 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1528 raw_notifier_chain_unregister(&netdev_chain, nb);
1531 EXPORT_SYMBOL(register_netdevice_notifier);
1534 * unregister_netdevice_notifier - unregister a network notifier block
1537 * Unregister a notifier previously registered by
1538 * register_netdevice_notifier(). The notifier is unlinked into the
1539 * kernel structures and may then be reused. A negative errno code
1540 * is returned on a failure.
1542 * After unregistering unregister and down device events are synthesized
1543 * for all devices on the device list to the removed notifier to remove
1544 * the need for special case cleanup code.
1547 int unregister_netdevice_notifier(struct notifier_block *nb)
1549 struct net_device *dev;
1554 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1559 for_each_netdev(net, dev) {
1560 if (dev->flags & IFF_UP) {
1561 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1563 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1565 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1572 EXPORT_SYMBOL(unregister_netdevice_notifier);
1575 * call_netdevice_notifiers_info - call all network notifier blocks
1576 * @val: value passed unmodified to notifier function
1577 * @dev: net_device pointer passed unmodified to notifier function
1578 * @info: notifier information data
1580 * Call all network notifier blocks. Parameters and return value
1581 * are as for raw_notifier_call_chain().
1584 static int call_netdevice_notifiers_info(unsigned long val,
1585 struct net_device *dev,
1586 struct netdev_notifier_info *info)
1589 netdev_notifier_info_init(info, dev);
1590 return raw_notifier_call_chain(&netdev_chain, val, info);
1594 * call_netdevice_notifiers - call all network notifier blocks
1595 * @val: value passed unmodified to notifier function
1596 * @dev: net_device pointer passed unmodified to notifier function
1598 * Call all network notifier blocks. Parameters and return value
1599 * are as for raw_notifier_call_chain().
1602 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1604 struct netdev_notifier_info info;
1606 return call_netdevice_notifiers_info(val, dev, &info);
1608 EXPORT_SYMBOL(call_netdevice_notifiers);
1610 static struct static_key netstamp_needed __read_mostly;
1611 #ifdef HAVE_JUMP_LABEL
1612 /* We are not allowed to call static_key_slow_dec() from irq context
1613 * If net_disable_timestamp() is called from irq context, defer the
1614 * static_key_slow_dec() calls.
1616 static atomic_t netstamp_needed_deferred;
1619 void net_enable_timestamp(void)
1621 #ifdef HAVE_JUMP_LABEL
1622 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1626 static_key_slow_dec(&netstamp_needed);
1630 static_key_slow_inc(&netstamp_needed);
1632 EXPORT_SYMBOL(net_enable_timestamp);
1634 void net_disable_timestamp(void)
1636 #ifdef HAVE_JUMP_LABEL
1637 if (in_interrupt()) {
1638 atomic_inc(&netstamp_needed_deferred);
1642 static_key_slow_dec(&netstamp_needed);
1644 EXPORT_SYMBOL(net_disable_timestamp);
1646 static inline void net_timestamp_set(struct sk_buff *skb)
1648 skb->tstamp.tv64 = 0;
1649 if (static_key_false(&netstamp_needed))
1650 __net_timestamp(skb);
1653 #define net_timestamp_check(COND, SKB) \
1654 if (static_key_false(&netstamp_needed)) { \
1655 if ((COND) && !(SKB)->tstamp.tv64) \
1656 __net_timestamp(SKB); \
1659 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1663 if (!(dev->flags & IFF_UP))
1666 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1667 if (skb->len <= len)
1670 /* if TSO is enabled, we don't care about the length as the packet
1671 * could be forwarded without being segmented before
1673 if (skb_is_gso(skb))
1678 EXPORT_SYMBOL_GPL(is_skb_forwardable);
1680 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1682 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1683 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1684 atomic_long_inc(&dev->rx_dropped);
1690 if (unlikely(!is_skb_forwardable(dev, skb))) {
1691 atomic_long_inc(&dev->rx_dropped);
1696 skb_scrub_packet(skb, true);
1697 skb->protocol = eth_type_trans(skb, dev);
1701 EXPORT_SYMBOL_GPL(__dev_forward_skb);
1704 * dev_forward_skb - loopback an skb to another netif
1706 * @dev: destination network device
1707 * @skb: buffer to forward
1710 * NET_RX_SUCCESS (no congestion)
1711 * NET_RX_DROP (packet was dropped, but freed)
1713 * dev_forward_skb can be used for injecting an skb from the
1714 * start_xmit function of one device into the receive queue
1715 * of another device.
1717 * The receiving device may be in another namespace, so
1718 * we have to clear all information in the skb that could
1719 * impact namespace isolation.
1721 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1723 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1725 EXPORT_SYMBOL_GPL(dev_forward_skb);
1727 static inline int deliver_skb(struct sk_buff *skb,
1728 struct packet_type *pt_prev,
1729 struct net_device *orig_dev)
1731 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1733 atomic_inc(&skb->users);
1734 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1737 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1739 if (!ptype->af_packet_priv || !skb->sk)
1742 if (ptype->id_match)
1743 return ptype->id_match(ptype, skb->sk);
1744 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1751 * Support routine. Sends outgoing frames to any network
1752 * taps currently in use.
1755 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1757 struct packet_type *ptype;
1758 struct sk_buff *skb2 = NULL;
1759 struct packet_type *pt_prev = NULL;
1762 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1763 /* Never send packets back to the socket
1764 * they originated from - MvS (miquels@drinkel.ow.org)
1766 if ((ptype->dev == dev || !ptype->dev) &&
1767 (!skb_loop_sk(ptype, skb))) {
1769 deliver_skb(skb2, pt_prev, skb->dev);
1774 skb2 = skb_clone(skb, GFP_ATOMIC);
1778 net_timestamp_set(skb2);
1780 /* skb->nh should be correctly
1781 set by sender, so that the second statement is
1782 just protection against buggy protocols.
1784 skb_reset_mac_header(skb2);
1786 if (skb_network_header(skb2) < skb2->data ||
1787 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1788 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1789 ntohs(skb2->protocol),
1791 skb_reset_network_header(skb2);
1794 skb2->transport_header = skb2->network_header;
1795 skb2->pkt_type = PACKET_OUTGOING;
1800 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1805 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1806 * @dev: Network device
1807 * @txq: number of queues available
1809 * If real_num_tx_queues is changed the tc mappings may no longer be
1810 * valid. To resolve this verify the tc mapping remains valid and if
1811 * not NULL the mapping. With no priorities mapping to this
1812 * offset/count pair it will no longer be used. In the worst case TC0
1813 * is invalid nothing can be done so disable priority mappings. If is
1814 * expected that drivers will fix this mapping if they can before
1815 * calling netif_set_real_num_tx_queues.
1817 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1820 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1822 /* If TC0 is invalidated disable TC mapping */
1823 if (tc->offset + tc->count > txq) {
1824 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1829 /* Invalidated prio to tc mappings set to TC0 */
1830 for (i = 1; i < TC_BITMASK + 1; i++) {
1831 int q = netdev_get_prio_tc_map(dev, i);
1833 tc = &dev->tc_to_txq[q];
1834 if (tc->offset + tc->count > txq) {
1835 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1837 netdev_set_prio_tc_map(dev, i, 0);
1843 static DEFINE_MUTEX(xps_map_mutex);
1844 #define xmap_dereference(P) \
1845 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1847 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1850 struct xps_map *map = NULL;
1854 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1856 for (pos = 0; map && pos < map->len; pos++) {
1857 if (map->queues[pos] == index) {
1859 map->queues[pos] = map->queues[--map->len];
1861 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1862 kfree_rcu(map, rcu);
1872 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1874 struct xps_dev_maps *dev_maps;
1876 bool active = false;
1878 mutex_lock(&xps_map_mutex);
1879 dev_maps = xmap_dereference(dev->xps_maps);
1884 for_each_possible_cpu(cpu) {
1885 for (i = index; i < dev->num_tx_queues; i++) {
1886 if (!remove_xps_queue(dev_maps, cpu, i))
1889 if (i == dev->num_tx_queues)
1894 RCU_INIT_POINTER(dev->xps_maps, NULL);
1895 kfree_rcu(dev_maps, rcu);
1898 for (i = index; i < dev->num_tx_queues; i++)
1899 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1903 mutex_unlock(&xps_map_mutex);
1906 static struct xps_map *expand_xps_map(struct xps_map *map,
1909 struct xps_map *new_map;
1910 int alloc_len = XPS_MIN_MAP_ALLOC;
1913 for (pos = 0; map && pos < map->len; pos++) {
1914 if (map->queues[pos] != index)
1919 /* Need to add queue to this CPU's existing map */
1921 if (pos < map->alloc_len)
1924 alloc_len = map->alloc_len * 2;
1927 /* Need to allocate new map to store queue on this CPU's map */
1928 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1933 for (i = 0; i < pos; i++)
1934 new_map->queues[i] = map->queues[i];
1935 new_map->alloc_len = alloc_len;
1941 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1944 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
1945 struct xps_map *map, *new_map;
1946 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
1947 int cpu, numa_node_id = -2;
1948 bool active = false;
1950 mutex_lock(&xps_map_mutex);
1952 dev_maps = xmap_dereference(dev->xps_maps);
1954 /* allocate memory for queue storage */
1955 for_each_online_cpu(cpu) {
1956 if (!cpumask_test_cpu(cpu, mask))
1960 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1961 if (!new_dev_maps) {
1962 mutex_unlock(&xps_map_mutex);
1966 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1969 map = expand_xps_map(map, cpu, index);
1973 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1977 goto out_no_new_maps;
1979 for_each_possible_cpu(cpu) {
1980 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1981 /* add queue to CPU maps */
1984 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1985 while ((pos < map->len) && (map->queues[pos] != index))
1988 if (pos == map->len)
1989 map->queues[map->len++] = index;
1991 if (numa_node_id == -2)
1992 numa_node_id = cpu_to_node(cpu);
1993 else if (numa_node_id != cpu_to_node(cpu))
1996 } else if (dev_maps) {
1997 /* fill in the new device map from the old device map */
1998 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1999 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2004 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2006 /* Cleanup old maps */
2008 for_each_possible_cpu(cpu) {
2009 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2010 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2011 if (map && map != new_map)
2012 kfree_rcu(map, rcu);
2015 kfree_rcu(dev_maps, rcu);
2018 dev_maps = new_dev_maps;
2022 /* update Tx queue numa node */
2023 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2024 (numa_node_id >= 0) ? numa_node_id :
2030 /* removes queue from unused CPUs */
2031 for_each_possible_cpu(cpu) {
2032 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2035 if (remove_xps_queue(dev_maps, cpu, index))
2039 /* free map if not active */
2041 RCU_INIT_POINTER(dev->xps_maps, NULL);
2042 kfree_rcu(dev_maps, rcu);
2046 mutex_unlock(&xps_map_mutex);
2050 /* remove any maps that we added */
2051 for_each_possible_cpu(cpu) {
2052 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2053 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2055 if (new_map && new_map != map)
2059 mutex_unlock(&xps_map_mutex);
2061 kfree(new_dev_maps);
2064 EXPORT_SYMBOL(netif_set_xps_queue);
2068 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2069 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2071 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2075 if (txq < 1 || txq > dev->num_tx_queues)
2078 if (dev->reg_state == NETREG_REGISTERED ||
2079 dev->reg_state == NETREG_UNREGISTERING) {
2082 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2088 netif_setup_tc(dev, txq);
2090 if (txq < dev->real_num_tx_queues) {
2091 qdisc_reset_all_tx_gt(dev, txq);
2093 netif_reset_xps_queues_gt(dev, txq);
2098 dev->real_num_tx_queues = txq;
2101 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2105 * netif_set_real_num_rx_queues - set actual number of RX queues used
2106 * @dev: Network device
2107 * @rxq: Actual number of RX queues
2109 * This must be called either with the rtnl_lock held or before
2110 * registration of the net device. Returns 0 on success, or a
2111 * negative error code. If called before registration, it always
2114 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2118 if (rxq < 1 || rxq > dev->num_rx_queues)
2121 if (dev->reg_state == NETREG_REGISTERED) {
2124 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2130 dev->real_num_rx_queues = rxq;
2133 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2137 * netif_get_num_default_rss_queues - default number of RSS queues
2139 * This routine should set an upper limit on the number of RSS queues
2140 * used by default by multiqueue devices.
2142 int netif_get_num_default_rss_queues(void)
2144 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2146 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2148 static inline void __netif_reschedule(struct Qdisc *q)
2150 struct softnet_data *sd;
2151 unsigned long flags;
2153 local_irq_save(flags);
2154 sd = &__get_cpu_var(softnet_data);
2155 q->next_sched = NULL;
2156 *sd->output_queue_tailp = q;
2157 sd->output_queue_tailp = &q->next_sched;
2158 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2159 local_irq_restore(flags);
2162 void __netif_schedule(struct Qdisc *q)
2164 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2165 __netif_reschedule(q);
2167 EXPORT_SYMBOL(__netif_schedule);
2169 struct dev_kfree_skb_cb {
2170 enum skb_free_reason reason;
2173 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2175 return (struct dev_kfree_skb_cb *)skb->cb;
2178 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2180 unsigned long flags;
2182 if (likely(atomic_read(&skb->users) == 1)) {
2184 atomic_set(&skb->users, 0);
2185 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2188 get_kfree_skb_cb(skb)->reason = reason;
2189 local_irq_save(flags);
2190 skb->next = __this_cpu_read(softnet_data.completion_queue);
2191 __this_cpu_write(softnet_data.completion_queue, skb);
2192 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2193 local_irq_restore(flags);
2195 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2197 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2199 if (in_irq() || irqs_disabled())
2200 __dev_kfree_skb_irq(skb, reason);
2204 EXPORT_SYMBOL(__dev_kfree_skb_any);
2208 * netif_device_detach - mark device as removed
2209 * @dev: network device
2211 * Mark device as removed from system and therefore no longer available.
2213 void netif_device_detach(struct net_device *dev)
2215 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2216 netif_running(dev)) {
2217 netif_tx_stop_all_queues(dev);
2220 EXPORT_SYMBOL(netif_device_detach);
2223 * netif_device_attach - mark device as attached
2224 * @dev: network device
2226 * Mark device as attached from system and restart if needed.
2228 void netif_device_attach(struct net_device *dev)
2230 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2231 netif_running(dev)) {
2232 netif_tx_wake_all_queues(dev);
2233 __netdev_watchdog_up(dev);
2236 EXPORT_SYMBOL(netif_device_attach);
2238 static void skb_warn_bad_offload(const struct sk_buff *skb)
2240 static const netdev_features_t null_features = 0;
2241 struct net_device *dev = skb->dev;
2242 const char *driver = "";
2244 if (!net_ratelimit())
2247 if (dev && dev->dev.parent)
2248 driver = dev_driver_string(dev->dev.parent);
2250 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2251 "gso_type=%d ip_summed=%d\n",
2252 driver, dev ? &dev->features : &null_features,
2253 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2254 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2255 skb_shinfo(skb)->gso_type, skb->ip_summed);
2259 * Invalidate hardware checksum when packet is to be mangled, and
2260 * complete checksum manually on outgoing path.
2262 int skb_checksum_help(struct sk_buff *skb)
2265 int ret = 0, offset;
2267 if (skb->ip_summed == CHECKSUM_COMPLETE)
2268 goto out_set_summed;
2270 if (unlikely(skb_shinfo(skb)->gso_size)) {
2271 skb_warn_bad_offload(skb);
2275 /* Before computing a checksum, we should make sure no frag could
2276 * be modified by an external entity : checksum could be wrong.
2278 if (skb_has_shared_frag(skb)) {
2279 ret = __skb_linearize(skb);
2284 offset = skb_checksum_start_offset(skb);
2285 BUG_ON(offset >= skb_headlen(skb));
2286 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2288 offset += skb->csum_offset;
2289 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2291 if (skb_cloned(skb) &&
2292 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2293 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2298 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2300 skb->ip_summed = CHECKSUM_NONE;
2304 EXPORT_SYMBOL(skb_checksum_help);
2306 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2308 unsigned int vlan_depth = skb->mac_len;
2309 __be16 type = skb->protocol;
2311 /* Tunnel gso handlers can set protocol to ethernet. */
2312 if (type == htons(ETH_P_TEB)) {
2315 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2318 eth = (struct ethhdr *)skb_mac_header(skb);
2319 type = eth->h_proto;
2322 /* if skb->protocol is 802.1Q/AD then the header should already be
2323 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2324 * ETH_HLEN otherwise
2326 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2328 if (WARN_ON(vlan_depth < VLAN_HLEN))
2330 vlan_depth -= VLAN_HLEN;
2332 vlan_depth = ETH_HLEN;
2335 struct vlan_hdr *vh;
2337 if (unlikely(!pskb_may_pull(skb,
2338 vlan_depth + VLAN_HLEN)))
2341 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2342 type = vh->h_vlan_encapsulated_proto;
2343 vlan_depth += VLAN_HLEN;
2344 } while (type == htons(ETH_P_8021Q) ||
2345 type == htons(ETH_P_8021AD));
2348 *depth = vlan_depth;
2354 * skb_mac_gso_segment - mac layer segmentation handler.
2355 * @skb: buffer to segment
2356 * @features: features for the output path (see dev->features)
2358 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2359 netdev_features_t features)
2361 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2362 struct packet_offload *ptype;
2363 int vlan_depth = skb->mac_len;
2364 __be16 type = skb_network_protocol(skb, &vlan_depth);
2366 if (unlikely(!type))
2367 return ERR_PTR(-EINVAL);
2369 __skb_pull(skb, vlan_depth);
2372 list_for_each_entry_rcu(ptype, &offload_base, list) {
2373 if (ptype->type == type && ptype->callbacks.gso_segment) {
2374 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2377 err = ptype->callbacks.gso_send_check(skb);
2378 segs = ERR_PTR(err);
2379 if (err || skb_gso_ok(skb, features))
2381 __skb_push(skb, (skb->data -
2382 skb_network_header(skb)));
2384 segs = ptype->callbacks.gso_segment(skb, features);
2390 __skb_push(skb, skb->data - skb_mac_header(skb));
2394 EXPORT_SYMBOL(skb_mac_gso_segment);
2397 /* openvswitch calls this on rx path, so we need a different check.
2399 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2402 return skb->ip_summed != CHECKSUM_PARTIAL;
2404 return skb->ip_summed == CHECKSUM_NONE;
2408 * __skb_gso_segment - Perform segmentation on skb.
2409 * @skb: buffer to segment
2410 * @features: features for the output path (see dev->features)
2411 * @tx_path: whether it is called in TX path
2413 * This function segments the given skb and returns a list of segments.
2415 * It may return NULL if the skb requires no segmentation. This is
2416 * only possible when GSO is used for verifying header integrity.
2418 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2419 netdev_features_t features, bool tx_path)
2421 if (unlikely(skb_needs_check(skb, tx_path))) {
2424 skb_warn_bad_offload(skb);
2426 err = skb_cow_head(skb, 0);
2428 return ERR_PTR(err);
2431 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2432 SKB_GSO_CB(skb)->encap_level = 0;
2434 skb_reset_mac_header(skb);
2435 skb_reset_mac_len(skb);
2437 return skb_mac_gso_segment(skb, features);
2439 EXPORT_SYMBOL(__skb_gso_segment);
2441 /* Take action when hardware reception checksum errors are detected. */
2443 void netdev_rx_csum_fault(struct net_device *dev)
2445 if (net_ratelimit()) {
2446 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2450 EXPORT_SYMBOL(netdev_rx_csum_fault);
2453 /* Actually, we should eliminate this check as soon as we know, that:
2454 * 1. IOMMU is present and allows to map all the memory.
2455 * 2. No high memory really exists on this machine.
2458 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2460 #ifdef CONFIG_HIGHMEM
2462 if (!(dev->features & NETIF_F_HIGHDMA)) {
2463 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2464 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2465 if (PageHighMem(skb_frag_page(frag)))
2470 if (PCI_DMA_BUS_IS_PHYS) {
2471 struct device *pdev = dev->dev.parent;
2475 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2476 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2477 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2478 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2487 void (*destructor)(struct sk_buff *skb);
2490 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2492 static void dev_gso_skb_destructor(struct sk_buff *skb)
2494 struct dev_gso_cb *cb;
2496 kfree_skb_list(skb->next);
2499 cb = DEV_GSO_CB(skb);
2501 cb->destructor(skb);
2505 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2506 * @skb: buffer to segment
2507 * @features: device features as applicable to this skb
2509 * This function segments the given skb and stores the list of segments
2512 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2514 struct sk_buff *segs;
2516 segs = skb_gso_segment(skb, features);
2518 /* Verifying header integrity only. */
2523 return PTR_ERR(segs);
2526 DEV_GSO_CB(skb)->destructor = skb->destructor;
2527 skb->destructor = dev_gso_skb_destructor;
2532 /* If MPLS offload request, verify we are testing hardware MPLS features
2533 * instead of standard features for the netdev.
2535 #ifdef CONFIG_NET_MPLS_GSO
2536 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2537 netdev_features_t features,
2540 if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
2541 features &= skb->dev->mpls_features;
2546 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2547 netdev_features_t features,
2554 static netdev_features_t harmonize_features(struct sk_buff *skb,
2555 netdev_features_t features)
2560 type = skb_network_protocol(skb, &tmp);
2561 features = net_mpls_features(skb, features, type);
2563 if (skb->ip_summed != CHECKSUM_NONE &&
2564 !can_checksum_protocol(features, type)) {
2565 features &= ~NETIF_F_ALL_CSUM;
2566 } else if (illegal_highdma(skb->dev, skb)) {
2567 features &= ~NETIF_F_SG;
2573 netdev_features_t netif_skb_features(struct sk_buff *skb)
2575 __be16 protocol = skb->protocol;
2576 netdev_features_t features = skb->dev->features;
2578 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2579 features &= ~NETIF_F_GSO_MASK;
2581 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2582 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2583 protocol = veh->h_vlan_encapsulated_proto;
2584 } else if (!vlan_tx_tag_present(skb)) {
2585 return harmonize_features(skb, features);
2588 features = netdev_intersect_features(features,
2589 skb->dev->vlan_features |
2590 NETIF_F_HW_VLAN_CTAG_TX |
2591 NETIF_F_HW_VLAN_STAG_TX);
2593 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
2594 features = netdev_intersect_features(features,
2599 NETIF_F_HW_VLAN_CTAG_TX |
2600 NETIF_F_HW_VLAN_STAG_TX);
2602 return harmonize_features(skb, features);
2604 EXPORT_SYMBOL(netif_skb_features);
2606 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2607 struct netdev_queue *txq)
2609 const struct net_device_ops *ops = dev->netdev_ops;
2610 int rc = NETDEV_TX_OK;
2611 unsigned int skb_len;
2613 if (likely(!skb->next)) {
2614 netdev_features_t features;
2617 * If device doesn't need skb->dst, release it right now while
2618 * its hot in this cpu cache
2620 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2623 features = netif_skb_features(skb);
2625 if (vlan_tx_tag_present(skb) &&
2626 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2627 skb = __vlan_put_tag(skb, skb->vlan_proto,
2628 vlan_tx_tag_get(skb));
2635 /* If encapsulation offload request, verify we are testing
2636 * hardware encapsulation features instead of standard
2637 * features for the netdev
2639 if (skb->encapsulation)
2640 features &= dev->hw_enc_features;
2642 if (netif_needs_gso(skb, features)) {
2643 if (unlikely(dev_gso_segment(skb, features)))
2648 if (skb_needs_linearize(skb, features) &&
2649 __skb_linearize(skb))
2652 /* If packet is not checksummed and device does not
2653 * support checksumming for this protocol, complete
2654 * checksumming here.
2656 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2657 if (skb->encapsulation)
2658 skb_set_inner_transport_header(skb,
2659 skb_checksum_start_offset(skb));
2661 skb_set_transport_header(skb,
2662 skb_checksum_start_offset(skb));
2663 if (!(features & NETIF_F_ALL_CSUM) &&
2664 skb_checksum_help(skb))
2669 if (!list_empty(&ptype_all))
2670 dev_queue_xmit_nit(skb, dev);
2673 trace_net_dev_start_xmit(skb, dev);
2674 rc = ops->ndo_start_xmit(skb, dev);
2675 trace_net_dev_xmit(skb, rc, dev, skb_len);
2676 if (rc == NETDEV_TX_OK)
2677 txq_trans_update(txq);
2683 struct sk_buff *nskb = skb->next;
2685 skb->next = nskb->next;
2688 if (!list_empty(&ptype_all))
2689 dev_queue_xmit_nit(nskb, dev);
2691 skb_len = nskb->len;
2692 trace_net_dev_start_xmit(nskb, dev);
2693 rc = ops->ndo_start_xmit(nskb, dev);
2694 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2695 if (unlikely(rc != NETDEV_TX_OK)) {
2696 if (rc & ~NETDEV_TX_MASK)
2697 goto out_kfree_gso_skb;
2698 nskb->next = skb->next;
2702 txq_trans_update(txq);
2703 if (unlikely(netif_xmit_stopped(txq) && skb->next))
2704 return NETDEV_TX_BUSY;
2705 } while (skb->next);
2708 if (likely(skb->next == NULL)) {
2709 skb->destructor = DEV_GSO_CB(skb)->destructor;
2718 EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
2720 static void qdisc_pkt_len_init(struct sk_buff *skb)
2722 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2724 qdisc_skb_cb(skb)->pkt_len = skb->len;
2726 /* To get more precise estimation of bytes sent on wire,
2727 * we add to pkt_len the headers size of all segments
2729 if (shinfo->gso_size) {
2730 unsigned int hdr_len;
2731 u16 gso_segs = shinfo->gso_segs;
2733 /* mac layer + network layer */
2734 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2736 /* + transport layer */
2737 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2738 hdr_len += tcp_hdrlen(skb);
2740 hdr_len += sizeof(struct udphdr);
2742 if (shinfo->gso_type & SKB_GSO_DODGY)
2743 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2746 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
2750 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2751 struct net_device *dev,
2752 struct netdev_queue *txq)
2754 spinlock_t *root_lock = qdisc_lock(q);
2758 qdisc_pkt_len_init(skb);
2759 qdisc_calculate_pkt_len(skb, q);
2761 * Heuristic to force contended enqueues to serialize on a
2762 * separate lock before trying to get qdisc main lock.
2763 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2764 * often and dequeue packets faster.
2766 contended = qdisc_is_running(q);
2767 if (unlikely(contended))
2768 spin_lock(&q->busylock);
2770 spin_lock(root_lock);
2771 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2774 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2775 qdisc_run_begin(q)) {
2777 * This is a work-conserving queue; there are no old skbs
2778 * waiting to be sent out; and the qdisc is not running -
2779 * xmit the skb directly.
2781 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2784 qdisc_bstats_update(q, skb);
2786 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2787 if (unlikely(contended)) {
2788 spin_unlock(&q->busylock);
2795 rc = NET_XMIT_SUCCESS;
2798 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2799 if (qdisc_run_begin(q)) {
2800 if (unlikely(contended)) {
2801 spin_unlock(&q->busylock);
2807 spin_unlock(root_lock);
2808 if (unlikely(contended))
2809 spin_unlock(&q->busylock);
2813 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2814 static void skb_update_prio(struct sk_buff *skb)
2816 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2818 if (!skb->priority && skb->sk && map) {
2819 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2821 if (prioidx < map->priomap_len)
2822 skb->priority = map->priomap[prioidx];
2826 #define skb_update_prio(skb)
2829 static DEFINE_PER_CPU(int, xmit_recursion);
2830 #define RECURSION_LIMIT 10
2833 * dev_loopback_xmit - loop back @skb
2834 * @skb: buffer to transmit
2836 int dev_loopback_xmit(struct sk_buff *skb)
2838 skb_reset_mac_header(skb);
2839 __skb_pull(skb, skb_network_offset(skb));
2840 skb->pkt_type = PACKET_LOOPBACK;
2841 skb->ip_summed = CHECKSUM_UNNECESSARY;
2842 WARN_ON(!skb_dst(skb));
2847 EXPORT_SYMBOL(dev_loopback_xmit);
2850 * __dev_queue_xmit - transmit a buffer
2851 * @skb: buffer to transmit
2852 * @accel_priv: private data used for L2 forwarding offload
2854 * Queue a buffer for transmission to a network device. The caller must
2855 * have set the device and priority and built the buffer before calling
2856 * this function. The function can be called from an interrupt.
2858 * A negative errno code is returned on a failure. A success does not
2859 * guarantee the frame will be transmitted as it may be dropped due
2860 * to congestion or traffic shaping.
2862 * -----------------------------------------------------------------------------------
2863 * I notice this method can also return errors from the queue disciplines,
2864 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2867 * Regardless of the return value, the skb is consumed, so it is currently
2868 * difficult to retry a send to this method. (You can bump the ref count
2869 * before sending to hold a reference for retry if you are careful.)
2871 * When calling this method, interrupts MUST be enabled. This is because
2872 * the BH enable code must have IRQs enabled so that it will not deadlock.
2875 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2877 struct net_device *dev = skb->dev;
2878 struct netdev_queue *txq;
2882 skb_reset_mac_header(skb);
2884 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
2885 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
2887 /* Disable soft irqs for various locks below. Also
2888 * stops preemption for RCU.
2892 skb_update_prio(skb);
2894 txq = netdev_pick_tx(dev, skb, accel_priv);
2895 q = rcu_dereference_bh(txq->qdisc);
2897 #ifdef CONFIG_NET_CLS_ACT
2898 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2900 trace_net_dev_queue(skb);
2902 rc = __dev_xmit_skb(skb, q, dev, txq);
2906 /* The device has no queue. Common case for software devices:
2907 loopback, all the sorts of tunnels...
2909 Really, it is unlikely that netif_tx_lock protection is necessary
2910 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2912 However, it is possible, that they rely on protection
2915 Check this and shot the lock. It is not prone from deadlocks.
2916 Either shot noqueue qdisc, it is even simpler 8)
2918 if (dev->flags & IFF_UP) {
2919 int cpu = smp_processor_id(); /* ok because BHs are off */
2921 if (txq->xmit_lock_owner != cpu) {
2923 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2924 goto recursion_alert;
2926 HARD_TX_LOCK(dev, txq, cpu);
2928 if (!netif_xmit_stopped(txq)) {
2929 __this_cpu_inc(xmit_recursion);
2930 rc = dev_hard_start_xmit(skb, dev, txq);
2931 __this_cpu_dec(xmit_recursion);
2932 if (dev_xmit_complete(rc)) {
2933 HARD_TX_UNLOCK(dev, txq);
2937 HARD_TX_UNLOCK(dev, txq);
2938 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2941 /* Recursion is detected! It is possible,
2945 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2951 rcu_read_unlock_bh();
2953 atomic_long_inc(&dev->tx_dropped);
2957 rcu_read_unlock_bh();
2961 int dev_queue_xmit(struct sk_buff *skb)
2963 return __dev_queue_xmit(skb, NULL);
2965 EXPORT_SYMBOL(dev_queue_xmit);
2967 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
2969 return __dev_queue_xmit(skb, accel_priv);
2971 EXPORT_SYMBOL(dev_queue_xmit_accel);
2974 /*=======================================================================
2976 =======================================================================*/
2978 int netdev_max_backlog __read_mostly = 1000;
2979 EXPORT_SYMBOL(netdev_max_backlog);
2981 int netdev_tstamp_prequeue __read_mostly = 1;
2982 int netdev_budget __read_mostly = 300;
2983 int weight_p __read_mostly = 64; /* old backlog weight */
2985 /* Called with irq disabled */
2986 static inline void ____napi_schedule(struct softnet_data *sd,
2987 struct napi_struct *napi)
2989 list_add_tail(&napi->poll_list, &sd->poll_list);
2990 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2995 /* One global table that all flow-based protocols share. */
2996 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2997 EXPORT_SYMBOL(rps_sock_flow_table);
2999 struct static_key rps_needed __read_mostly;
3001 static struct rps_dev_flow *
3002 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3003 struct rps_dev_flow *rflow, u16 next_cpu)
3005 if (next_cpu != RPS_NO_CPU) {
3006 #ifdef CONFIG_RFS_ACCEL
3007 struct netdev_rx_queue *rxqueue;
3008 struct rps_dev_flow_table *flow_table;
3009 struct rps_dev_flow *old_rflow;
3014 /* Should we steer this flow to a different hardware queue? */
3015 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3016 !(dev->features & NETIF_F_NTUPLE))
3018 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3019 if (rxq_index == skb_get_rx_queue(skb))
3022 rxqueue = dev->_rx + rxq_index;
3023 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3026 flow_id = skb_get_hash(skb) & flow_table->mask;
3027 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3028 rxq_index, flow_id);
3032 rflow = &flow_table->flows[flow_id];
3034 if (old_rflow->filter == rflow->filter)
3035 old_rflow->filter = RPS_NO_FILTER;
3039 per_cpu(softnet_data, next_cpu).input_queue_head;
3042 rflow->cpu = next_cpu;
3047 * get_rps_cpu is called from netif_receive_skb and returns the target
3048 * CPU from the RPS map of the receiving queue for a given skb.
3049 * rcu_read_lock must be held on entry.
3051 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3052 struct rps_dev_flow **rflowp)
3054 struct netdev_rx_queue *rxqueue;
3055 struct rps_map *map;
3056 struct rps_dev_flow_table *flow_table;
3057 struct rps_sock_flow_table *sock_flow_table;
3062 if (skb_rx_queue_recorded(skb)) {
3063 u16 index = skb_get_rx_queue(skb);
3064 if (unlikely(index >= dev->real_num_rx_queues)) {
3065 WARN_ONCE(dev->real_num_rx_queues > 1,
3066 "%s received packet on queue %u, but number "
3067 "of RX queues is %u\n",
3068 dev->name, index, dev->real_num_rx_queues);
3071 rxqueue = dev->_rx + index;
3075 map = rcu_dereference(rxqueue->rps_map);
3077 if (map->len == 1 &&
3078 !rcu_access_pointer(rxqueue->rps_flow_table)) {
3079 tcpu = map->cpus[0];
3080 if (cpu_online(tcpu))
3084 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
3088 skb_reset_network_header(skb);
3089 hash = skb_get_hash(skb);
3093 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3094 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3095 if (flow_table && sock_flow_table) {
3097 struct rps_dev_flow *rflow;
3099 rflow = &flow_table->flows[hash & flow_table->mask];
3102 next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask];
3105 * If the desired CPU (where last recvmsg was done) is
3106 * different from current CPU (one in the rx-queue flow
3107 * table entry), switch if one of the following holds:
3108 * - Current CPU is unset (equal to RPS_NO_CPU).
3109 * - Current CPU is offline.
3110 * - The current CPU's queue tail has advanced beyond the
3111 * last packet that was enqueued using this table entry.
3112 * This guarantees that all previous packets for the flow
3113 * have been dequeued, thus preserving in order delivery.
3115 if (unlikely(tcpu != next_cpu) &&
3116 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3117 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3118 rflow->last_qtail)) >= 0)) {
3120 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3123 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3131 tcpu = map->cpus[((u64) hash * map->len) >> 32];
3133 if (cpu_online(tcpu)) {
3143 #ifdef CONFIG_RFS_ACCEL
3146 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3147 * @dev: Device on which the filter was set
3148 * @rxq_index: RX queue index
3149 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3150 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3152 * Drivers that implement ndo_rx_flow_steer() should periodically call
3153 * this function for each installed filter and remove the filters for
3154 * which it returns %true.
3156 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3157 u32 flow_id, u16 filter_id)
3159 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3160 struct rps_dev_flow_table *flow_table;
3161 struct rps_dev_flow *rflow;
3166 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3167 if (flow_table && flow_id <= flow_table->mask) {
3168 rflow = &flow_table->flows[flow_id];
3169 cpu = ACCESS_ONCE(rflow->cpu);
3170 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3171 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3172 rflow->last_qtail) <
3173 (int)(10 * flow_table->mask)))
3179 EXPORT_SYMBOL(rps_may_expire_flow);
3181 #endif /* CONFIG_RFS_ACCEL */
3183 /* Called from hardirq (IPI) context */
3184 static void rps_trigger_softirq(void *data)
3186 struct softnet_data *sd = data;
3188 ____napi_schedule(sd, &sd->backlog);
3192 #endif /* CONFIG_RPS */
3195 * Check if this softnet_data structure is another cpu one
3196 * If yes, queue it to our IPI list and return 1
3199 static int rps_ipi_queued(struct softnet_data *sd)
3202 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3205 sd->rps_ipi_next = mysd->rps_ipi_list;
3206 mysd->rps_ipi_list = sd;
3208 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3211 #endif /* CONFIG_RPS */
3215 #ifdef CONFIG_NET_FLOW_LIMIT
3216 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3219 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3221 #ifdef CONFIG_NET_FLOW_LIMIT
3222 struct sd_flow_limit *fl;
3223 struct softnet_data *sd;
3224 unsigned int old_flow, new_flow;
3226 if (qlen < (netdev_max_backlog >> 1))
3229 sd = &__get_cpu_var(softnet_data);
3232 fl = rcu_dereference(sd->flow_limit);
3234 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3235 old_flow = fl->history[fl->history_head];
3236 fl->history[fl->history_head] = new_flow;
3239 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3241 if (likely(fl->buckets[old_flow]))
3242 fl->buckets[old_flow]--;
3244 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3256 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3257 * queue (may be a remote CPU queue).
3259 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3260 unsigned int *qtail)
3262 struct softnet_data *sd;
3263 unsigned long flags;
3266 sd = &per_cpu(softnet_data, cpu);
3268 local_irq_save(flags);
3271 qlen = skb_queue_len(&sd->input_pkt_queue);
3272 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3273 if (skb_queue_len(&sd->input_pkt_queue)) {
3275 __skb_queue_tail(&sd->input_pkt_queue, skb);
3276 input_queue_tail_incr_save(sd, qtail);
3278 local_irq_restore(flags);
3279 return NET_RX_SUCCESS;
3282 /* Schedule NAPI for backlog device
3283 * We can use non atomic operation since we own the queue lock
3285 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3286 if (!rps_ipi_queued(sd))
3287 ____napi_schedule(sd, &sd->backlog);
3295 local_irq_restore(flags);
3297 atomic_long_inc(&skb->dev->rx_dropped);
3302 static int netif_rx_internal(struct sk_buff *skb)
3306 net_timestamp_check(netdev_tstamp_prequeue, skb);
3308 trace_netif_rx(skb);
3310 if (static_key_false(&rps_needed)) {
3311 struct rps_dev_flow voidflow, *rflow = &voidflow;
3317 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3319 cpu = smp_processor_id();
3321 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3329 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3336 * netif_rx - post buffer to the network code
3337 * @skb: buffer to post
3339 * This function receives a packet from a device driver and queues it for
3340 * the upper (protocol) levels to process. It always succeeds. The buffer
3341 * may be dropped during processing for congestion control or by the
3345 * NET_RX_SUCCESS (no congestion)
3346 * NET_RX_DROP (packet was dropped)
3350 int netif_rx(struct sk_buff *skb)
3352 trace_netif_rx_entry(skb);
3354 return netif_rx_internal(skb);
3356 EXPORT_SYMBOL(netif_rx);
3358 int netif_rx_ni(struct sk_buff *skb)
3362 trace_netif_rx_ni_entry(skb);
3365 err = netif_rx_internal(skb);
3366 if (local_softirq_pending())
3372 EXPORT_SYMBOL(netif_rx_ni);
3374 static void net_tx_action(struct softirq_action *h)
3376 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3378 if (sd->completion_queue) {
3379 struct sk_buff *clist;
3381 local_irq_disable();
3382 clist = sd->completion_queue;
3383 sd->completion_queue = NULL;
3387 struct sk_buff *skb = clist;
3388 clist = clist->next;
3390 WARN_ON(atomic_read(&skb->users));
3391 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3392 trace_consume_skb(skb);
3394 trace_kfree_skb(skb, net_tx_action);
3399 if (sd->output_queue) {
3402 local_irq_disable();
3403 head = sd->output_queue;
3404 sd->output_queue = NULL;
3405 sd->output_queue_tailp = &sd->output_queue;
3409 struct Qdisc *q = head;
3410 spinlock_t *root_lock;
3412 head = head->next_sched;
3414 root_lock = qdisc_lock(q);
3415 if (spin_trylock(root_lock)) {
3416 smp_mb__before_atomic();
3417 clear_bit(__QDISC_STATE_SCHED,
3420 spin_unlock(root_lock);
3422 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3424 __netif_reschedule(q);
3426 smp_mb__before_atomic();
3427 clear_bit(__QDISC_STATE_SCHED,
3435 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3436 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3437 /* This hook is defined here for ATM LANE */
3438 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3439 unsigned char *addr) __read_mostly;
3440 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3443 #ifdef CONFIG_NET_CLS_ACT
3444 /* TODO: Maybe we should just force sch_ingress to be compiled in
3445 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3446 * a compare and 2 stores extra right now if we dont have it on
3447 * but have CONFIG_NET_CLS_ACT
3448 * NOTE: This doesn't stop any functionality; if you dont have
3449 * the ingress scheduler, you just can't add policies on ingress.
3452 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3454 struct net_device *dev = skb->dev;
3455 u32 ttl = G_TC_RTTL(skb->tc_verd);
3456 int result = TC_ACT_OK;
3459 if (unlikely(MAX_RED_LOOP < ttl++)) {
3460 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3461 skb->skb_iif, dev->ifindex);
3465 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3466 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3469 if (q != &noop_qdisc) {
3470 spin_lock(qdisc_lock(q));
3471 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3472 result = qdisc_enqueue_root(skb, q);
3473 spin_unlock(qdisc_lock(q));
3479 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3480 struct packet_type **pt_prev,
3481 int *ret, struct net_device *orig_dev)
3483 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3485 if (!rxq || rxq->qdisc == &noop_qdisc)
3489 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3493 switch (ing_filter(skb, rxq)) {
3507 * netdev_rx_handler_register - register receive handler
3508 * @dev: device to register a handler for
3509 * @rx_handler: receive handler to register
3510 * @rx_handler_data: data pointer that is used by rx handler
3512 * Register a receive handler for a device. This handler will then be
3513 * called from __netif_receive_skb. A negative errno code is returned
3516 * The caller must hold the rtnl_mutex.
3518 * For a general description of rx_handler, see enum rx_handler_result.
3520 int netdev_rx_handler_register(struct net_device *dev,
3521 rx_handler_func_t *rx_handler,
3522 void *rx_handler_data)
3526 if (dev->rx_handler)
3529 /* Note: rx_handler_data must be set before rx_handler */
3530 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3531 rcu_assign_pointer(dev->rx_handler, rx_handler);
3535 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3538 * netdev_rx_handler_unregister - unregister receive handler
3539 * @dev: device to unregister a handler from
3541 * Unregister a receive handler from a device.
3543 * The caller must hold the rtnl_mutex.
3545 void netdev_rx_handler_unregister(struct net_device *dev)
3549 RCU_INIT_POINTER(dev->rx_handler, NULL);
3550 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3551 * section has a guarantee to see a non NULL rx_handler_data
3555 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3557 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3560 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3561 * the special handling of PFMEMALLOC skbs.
3563 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3565 switch (skb->protocol) {
3566 case htons(ETH_P_ARP):
3567 case htons(ETH_P_IP):
3568 case htons(ETH_P_IPV6):
3569 case htons(ETH_P_8021Q):
3570 case htons(ETH_P_8021AD):
3577 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3579 struct packet_type *ptype, *pt_prev;
3580 rx_handler_func_t *rx_handler;
3581 struct net_device *orig_dev;
3582 struct net_device *null_or_dev;
3583 bool deliver_exact = false;
3584 int ret = NET_RX_DROP;
3587 net_timestamp_check(!netdev_tstamp_prequeue, skb);
3589 trace_netif_receive_skb(skb);
3591 orig_dev = skb->dev;
3593 skb_reset_network_header(skb);
3594 if (!skb_transport_header_was_set(skb))
3595 skb_reset_transport_header(skb);
3596 skb_reset_mac_len(skb);
3603 skb->skb_iif = skb->dev->ifindex;
3605 __this_cpu_inc(softnet_data.processed);
3607 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3608 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3609 skb = skb_vlan_untag(skb);
3614 #ifdef CONFIG_NET_CLS_ACT
3615 if (skb->tc_verd & TC_NCLS) {
3616 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3624 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3625 if (!ptype->dev || ptype->dev == skb->dev) {
3627 ret = deliver_skb(skb, pt_prev, orig_dev);
3633 #ifdef CONFIG_NET_CLS_ACT
3634 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3640 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3643 if (vlan_tx_tag_present(skb)) {
3645 ret = deliver_skb(skb, pt_prev, orig_dev);
3648 if (vlan_do_receive(&skb))
3650 else if (unlikely(!skb))
3654 rx_handler = rcu_dereference(skb->dev->rx_handler);
3657 ret = deliver_skb(skb, pt_prev, orig_dev);
3660 switch (rx_handler(&skb)) {
3661 case RX_HANDLER_CONSUMED:
3662 ret = NET_RX_SUCCESS;
3664 case RX_HANDLER_ANOTHER:
3666 case RX_HANDLER_EXACT:
3667 deliver_exact = true;
3668 case RX_HANDLER_PASS:
3675 if (unlikely(vlan_tx_tag_present(skb))) {
3676 if (vlan_tx_tag_get_id(skb))
3677 skb->pkt_type = PACKET_OTHERHOST;
3678 /* Note: we might in the future use prio bits
3679 * and set skb->priority like in vlan_do_receive()
3680 * For the time being, just ignore Priority Code Point
3685 /* deliver only exact match when indicated */
3686 null_or_dev = deliver_exact ? skb->dev : NULL;
3688 type = skb->protocol;
3689 list_for_each_entry_rcu(ptype,
3690 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3691 if (ptype->type == type &&
3692 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3693 ptype->dev == orig_dev)) {
3695 ret = deliver_skb(skb, pt_prev, orig_dev);
3701 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3704 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3707 atomic_long_inc(&skb->dev->rx_dropped);
3709 /* Jamal, now you will not able to escape explaining
3710 * me how you were going to use this. :-)
3720 static int __netif_receive_skb(struct sk_buff *skb)
3724 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3725 unsigned long pflags = current->flags;
3728 * PFMEMALLOC skbs are special, they should
3729 * - be delivered to SOCK_MEMALLOC sockets only
3730 * - stay away from userspace
3731 * - have bounded memory usage
3733 * Use PF_MEMALLOC as this saves us from propagating the allocation
3734 * context down to all allocation sites.
3736 current->flags |= PF_MEMALLOC;
3737 ret = __netif_receive_skb_core(skb, true);
3738 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3740 ret = __netif_receive_skb_core(skb, false);
3745 static int netif_receive_skb_internal(struct sk_buff *skb)
3747 net_timestamp_check(netdev_tstamp_prequeue, skb);
3749 if (skb_defer_rx_timestamp(skb))
3750 return NET_RX_SUCCESS;
3753 if (static_key_false(&rps_needed)) {
3754 struct rps_dev_flow voidflow, *rflow = &voidflow;
3759 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3762 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3769 return __netif_receive_skb(skb);
3773 * netif_receive_skb - process receive buffer from network
3774 * @skb: buffer to process
3776 * netif_receive_skb() is the main receive data processing function.
3777 * It always succeeds. The buffer may be dropped during processing
3778 * for congestion control or by the protocol layers.
3780 * This function may only be called from softirq context and interrupts
3781 * should be enabled.
3783 * Return values (usually ignored):
3784 * NET_RX_SUCCESS: no congestion
3785 * NET_RX_DROP: packet was dropped
3787 int netif_receive_skb(struct sk_buff *skb)
3789 trace_netif_receive_skb_entry(skb);
3791 return netif_receive_skb_internal(skb);
3793 EXPORT_SYMBOL(netif_receive_skb);
3795 /* Network device is going away, flush any packets still pending
3796 * Called with irqs disabled.
3798 static void flush_backlog(void *arg)
3800 struct net_device *dev = arg;
3801 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3802 struct sk_buff *skb, *tmp;
3805 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3806 if (skb->dev == dev) {
3807 __skb_unlink(skb, &sd->input_pkt_queue);
3809 input_queue_head_incr(sd);
3814 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3815 if (skb->dev == dev) {
3816 __skb_unlink(skb, &sd->process_queue);
3818 input_queue_head_incr(sd);
3823 static int napi_gro_complete(struct sk_buff *skb)
3825 struct packet_offload *ptype;
3826 __be16 type = skb->protocol;
3827 struct list_head *head = &offload_base;
3830 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3832 if (NAPI_GRO_CB(skb)->count == 1) {
3833 skb_shinfo(skb)->gso_size = 0;
3838 list_for_each_entry_rcu(ptype, head, list) {
3839 if (ptype->type != type || !ptype->callbacks.gro_complete)
3842 err = ptype->callbacks.gro_complete(skb, 0);
3848 WARN_ON(&ptype->list == head);
3850 return NET_RX_SUCCESS;
3854 return netif_receive_skb_internal(skb);
3857 /* napi->gro_list contains packets ordered by age.
3858 * youngest packets at the head of it.
3859 * Complete skbs in reverse order to reduce latencies.
3861 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
3863 struct sk_buff *skb, *prev = NULL;
3865 /* scan list and build reverse chain */
3866 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3871 for (skb = prev; skb; skb = prev) {
3874 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3878 napi_gro_complete(skb);
3882 napi->gro_list = NULL;
3884 EXPORT_SYMBOL(napi_gro_flush);
3886 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3889 unsigned int maclen = skb->dev->hard_header_len;
3890 u32 hash = skb_get_hash_raw(skb);
3892 for (p = napi->gro_list; p; p = p->next) {
3893 unsigned long diffs;
3895 NAPI_GRO_CB(p)->flush = 0;
3897 if (hash != skb_get_hash_raw(p)) {
3898 NAPI_GRO_CB(p)->same_flow = 0;
3902 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3903 diffs |= p->vlan_tci ^ skb->vlan_tci;
3904 if (maclen == ETH_HLEN)
3905 diffs |= compare_ether_header(skb_mac_header(p),
3906 skb_mac_header(skb));
3908 diffs = memcmp(skb_mac_header(p),
3909 skb_mac_header(skb),
3911 NAPI_GRO_CB(p)->same_flow = !diffs;
3915 static void skb_gro_reset_offset(struct sk_buff *skb)
3917 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3918 const skb_frag_t *frag0 = &pinfo->frags[0];
3920 NAPI_GRO_CB(skb)->data_offset = 0;
3921 NAPI_GRO_CB(skb)->frag0 = NULL;
3922 NAPI_GRO_CB(skb)->frag0_len = 0;
3924 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3926 !PageHighMem(skb_frag_page(frag0))) {
3927 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3928 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3932 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
3934 struct skb_shared_info *pinfo = skb_shinfo(skb);
3936 BUG_ON(skb->end - skb->tail < grow);
3938 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3940 skb->data_len -= grow;
3943 pinfo->frags[0].page_offset += grow;
3944 skb_frag_size_sub(&pinfo->frags[0], grow);
3946 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
3947 skb_frag_unref(skb, 0);
3948 memmove(pinfo->frags, pinfo->frags + 1,
3949 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
3953 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3955 struct sk_buff **pp = NULL;
3956 struct packet_offload *ptype;
3957 __be16 type = skb->protocol;
3958 struct list_head *head = &offload_base;
3960 enum gro_result ret;
3963 if (!(skb->dev->features & NETIF_F_GRO))
3966 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3969 gro_list_prepare(napi, skb);
3970 NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
3973 list_for_each_entry_rcu(ptype, head, list) {
3974 if (ptype->type != type || !ptype->callbacks.gro_receive)
3977 skb_set_network_header(skb, skb_gro_offset(skb));
3978 skb_reset_mac_len(skb);
3979 NAPI_GRO_CB(skb)->same_flow = 0;
3980 NAPI_GRO_CB(skb)->flush = 0;
3981 NAPI_GRO_CB(skb)->free = 0;
3982 NAPI_GRO_CB(skb)->udp_mark = 0;
3984 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
3989 if (&ptype->list == head)
3992 same_flow = NAPI_GRO_CB(skb)->same_flow;
3993 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3996 struct sk_buff *nskb = *pp;
4000 napi_gro_complete(nskb);
4007 if (NAPI_GRO_CB(skb)->flush)
4010 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4011 struct sk_buff *nskb = napi->gro_list;
4013 /* locate the end of the list to select the 'oldest' flow */
4014 while (nskb->next) {
4020 napi_gro_complete(nskb);
4024 NAPI_GRO_CB(skb)->count = 1;
4025 NAPI_GRO_CB(skb)->age = jiffies;
4026 NAPI_GRO_CB(skb)->last = skb;
4027 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
4028 skb->next = napi->gro_list;
4029 napi->gro_list = skb;
4033 grow = skb_gro_offset(skb) - skb_headlen(skb);
4035 gro_pull_from_frag0(skb, grow);
4044 struct packet_offload *gro_find_receive_by_type(__be16 type)
4046 struct list_head *offload_head = &offload_base;
4047 struct packet_offload *ptype;
4049 list_for_each_entry_rcu(ptype, offload_head, list) {
4050 if (ptype->type != type || !ptype->callbacks.gro_receive)
4056 EXPORT_SYMBOL(gro_find_receive_by_type);
4058 struct packet_offload *gro_find_complete_by_type(__be16 type)
4060 struct list_head *offload_head = &offload_base;
4061 struct packet_offload *ptype;
4063 list_for_each_entry_rcu(ptype, offload_head, list) {
4064 if (ptype->type != type || !ptype->callbacks.gro_complete)
4070 EXPORT_SYMBOL(gro_find_complete_by_type);
4072 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4076 if (netif_receive_skb_internal(skb))
4084 case GRO_MERGED_FREE:
4085 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4086 kmem_cache_free(skbuff_head_cache, skb);
4099 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4101 trace_napi_gro_receive_entry(skb);
4103 skb_gro_reset_offset(skb);
4105 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4107 EXPORT_SYMBOL(napi_gro_receive);
4109 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4111 __skb_pull(skb, skb_headlen(skb));
4112 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4113 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4115 skb->dev = napi->dev;
4117 skb->encapsulation = 0;
4118 skb_shinfo(skb)->gso_type = 0;
4119 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4124 struct sk_buff *napi_get_frags(struct napi_struct *napi)
4126 struct sk_buff *skb = napi->skb;
4129 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
4134 EXPORT_SYMBOL(napi_get_frags);
4136 static gro_result_t napi_frags_finish(struct napi_struct *napi,
4137 struct sk_buff *skb,
4143 __skb_push(skb, ETH_HLEN);
4144 skb->protocol = eth_type_trans(skb, skb->dev);
4145 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
4150 case GRO_MERGED_FREE:
4151 napi_reuse_skb(napi, skb);
4161 /* Upper GRO stack assumes network header starts at gro_offset=0
4162 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4163 * We copy ethernet header into skb->data to have a common layout.
4165 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4167 struct sk_buff *skb = napi->skb;
4168 const struct ethhdr *eth;
4169 unsigned int hlen = sizeof(*eth);
4173 skb_reset_mac_header(skb);
4174 skb_gro_reset_offset(skb);
4176 eth = skb_gro_header_fast(skb, 0);
4177 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4178 eth = skb_gro_header_slow(skb, hlen, 0);
4179 if (unlikely(!eth)) {
4180 napi_reuse_skb(napi, skb);
4184 gro_pull_from_frag0(skb, hlen);
4185 NAPI_GRO_CB(skb)->frag0 += hlen;
4186 NAPI_GRO_CB(skb)->frag0_len -= hlen;
4188 __skb_pull(skb, hlen);
4191 * This works because the only protocols we care about don't require
4193 * We'll fix it up properly in napi_frags_finish()
4195 skb->protocol = eth->h_proto;
4200 gro_result_t napi_gro_frags(struct napi_struct *napi)
4202 struct sk_buff *skb = napi_frags_skb(napi);
4207 trace_napi_gro_frags_entry(skb);
4209 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4211 EXPORT_SYMBOL(napi_gro_frags);
4214 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4215 * Note: called with local irq disabled, but exits with local irq enabled.
4217 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4220 struct softnet_data *remsd = sd->rps_ipi_list;
4223 sd->rps_ipi_list = NULL;
4227 /* Send pending IPI's to kick RPS processing on remote cpus. */
4229 struct softnet_data *next = remsd->rps_ipi_next;
4231 if (cpu_online(remsd->cpu))
4232 smp_call_function_single_async(remsd->cpu,
4241 static int process_backlog(struct napi_struct *napi, int quota)
4244 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4247 /* Check if we have pending ipi, its better to send them now,
4248 * not waiting net_rx_action() end.
4250 if (sd->rps_ipi_list) {
4251 local_irq_disable();
4252 net_rps_action_and_irq_enable(sd);
4255 napi->weight = weight_p;
4256 local_irq_disable();
4258 struct sk_buff *skb;
4260 while ((skb = __skb_dequeue(&sd->process_queue))) {
4262 __netif_receive_skb(skb);
4263 local_irq_disable();
4264 input_queue_head_incr(sd);
4265 if (++work >= quota) {
4272 if (skb_queue_empty(&sd->input_pkt_queue)) {
4274 * Inline a custom version of __napi_complete().
4275 * only current cpu owns and manipulates this napi,
4276 * and NAPI_STATE_SCHED is the only possible flag set
4278 * We can use a plain write instead of clear_bit(),
4279 * and we dont need an smp_mb() memory barrier.
4281 list_del(&napi->poll_list);
4288 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4289 &sd->process_queue);
4298 * __napi_schedule - schedule for receive
4299 * @n: entry to schedule
4301 * The entry's receive function will be scheduled to run
4303 void __napi_schedule(struct napi_struct *n)
4305 unsigned long flags;
4307 local_irq_save(flags);
4308 ____napi_schedule(&__get_cpu_var(softnet_data), n);
4309 local_irq_restore(flags);
4311 EXPORT_SYMBOL(__napi_schedule);
4313 void __napi_complete(struct napi_struct *n)
4315 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4316 BUG_ON(n->gro_list);
4318 list_del(&n->poll_list);
4319 smp_mb__before_atomic();
4320 clear_bit(NAPI_STATE_SCHED, &n->state);
4322 EXPORT_SYMBOL(__napi_complete);
4324 void napi_complete(struct napi_struct *n)
4326 unsigned long flags;
4329 * don't let napi dequeue from the cpu poll list
4330 * just in case its running on a different cpu
4332 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4335 napi_gro_flush(n, false);
4336 local_irq_save(flags);
4338 local_irq_restore(flags);
4340 EXPORT_SYMBOL(napi_complete);
4342 /* must be called under rcu_read_lock(), as we dont take a reference */
4343 struct napi_struct *napi_by_id(unsigned int napi_id)
4345 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4346 struct napi_struct *napi;
4348 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4349 if (napi->napi_id == napi_id)
4354 EXPORT_SYMBOL_GPL(napi_by_id);
4356 void napi_hash_add(struct napi_struct *napi)
4358 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4360 spin_lock(&napi_hash_lock);
4362 /* 0 is not a valid id, we also skip an id that is taken
4363 * we expect both events to be extremely rare
4366 while (!napi->napi_id) {
4367 napi->napi_id = ++napi_gen_id;
4368 if (napi_by_id(napi->napi_id))
4372 hlist_add_head_rcu(&napi->napi_hash_node,
4373 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4375 spin_unlock(&napi_hash_lock);
4378 EXPORT_SYMBOL_GPL(napi_hash_add);
4380 /* Warning : caller is responsible to make sure rcu grace period
4381 * is respected before freeing memory containing @napi
4383 void napi_hash_del(struct napi_struct *napi)
4385 spin_lock(&napi_hash_lock);
4387 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4388 hlist_del_rcu(&napi->napi_hash_node);
4390 spin_unlock(&napi_hash_lock);
4392 EXPORT_SYMBOL_GPL(napi_hash_del);
4394 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4395 int (*poll)(struct napi_struct *, int), int weight)
4397 INIT_LIST_HEAD(&napi->poll_list);
4398 napi->gro_count = 0;
4399 napi->gro_list = NULL;
4402 if (weight > NAPI_POLL_WEIGHT)
4403 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4405 napi->weight = weight;
4406 list_add(&napi->dev_list, &dev->napi_list);
4408 #ifdef CONFIG_NETPOLL
4409 spin_lock_init(&napi->poll_lock);
4410 napi->poll_owner = -1;
4412 set_bit(NAPI_STATE_SCHED, &napi->state);
4414 EXPORT_SYMBOL(netif_napi_add);
4416 void netif_napi_del(struct napi_struct *napi)
4418 list_del_init(&napi->dev_list);
4419 napi_free_frags(napi);
4421 kfree_skb_list(napi->gro_list);
4422 napi->gro_list = NULL;
4423 napi->gro_count = 0;
4425 EXPORT_SYMBOL(netif_napi_del);
4427 static void net_rx_action(struct softirq_action *h)
4429 struct softnet_data *sd = &__get_cpu_var(softnet_data);
4430 unsigned long time_limit = jiffies + 2;
4431 int budget = netdev_budget;
4434 local_irq_disable();
4436 while (!list_empty(&sd->poll_list)) {
4437 struct napi_struct *n;
4440 /* If softirq window is exhuasted then punt.
4441 * Allow this to run for 2 jiffies since which will allow
4442 * an average latency of 1.5/HZ.
4444 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
4449 /* Even though interrupts have been re-enabled, this
4450 * access is safe because interrupts can only add new
4451 * entries to the tail of this list, and only ->poll()
4452 * calls can remove this head entry from the list.
4454 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
4456 have = netpoll_poll_lock(n);
4460 /* This NAPI_STATE_SCHED test is for avoiding a race
4461 * with netpoll's poll_napi(). Only the entity which
4462 * obtains the lock and sees NAPI_STATE_SCHED set will
4463 * actually make the ->poll() call. Therefore we avoid
4464 * accidentally calling ->poll() when NAPI is not scheduled.
4467 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4468 work = n->poll(n, weight);
4472 WARN_ON_ONCE(work > weight);
4476 local_irq_disable();
4478 /* Drivers must not modify the NAPI state if they
4479 * consume the entire weight. In such cases this code
4480 * still "owns" the NAPI instance and therefore can
4481 * move the instance around on the list at-will.
4483 if (unlikely(work == weight)) {
4484 if (unlikely(napi_disable_pending(n))) {
4487 local_irq_disable();
4490 /* flush too old packets
4491 * If HZ < 1000, flush all packets.
4494 napi_gro_flush(n, HZ >= 1000);
4495 local_irq_disable();
4497 list_move_tail(&n->poll_list, &sd->poll_list);
4501 netpoll_poll_unlock(have);
4504 net_rps_action_and_irq_enable(sd);
4510 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4514 struct netdev_adjacent {
4515 struct net_device *dev;
4517 /* upper master flag, there can only be one master device per list */
4520 /* counter for the number of times this device was added to us */
4523 /* private field for the users */
4526 struct list_head list;
4527 struct rcu_head rcu;
4530 static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4531 struct net_device *adj_dev,
4532 struct list_head *adj_list)
4534 struct netdev_adjacent *adj;
4536 list_for_each_entry(adj, adj_list, list) {
4537 if (adj->dev == adj_dev)
4544 * netdev_has_upper_dev - Check if device is linked to an upper device
4546 * @upper_dev: upper device to check
4548 * Find out if a device is linked to specified upper device and return true
4549 * in case it is. Note that this checks only immediate upper device,
4550 * not through a complete stack of devices. The caller must hold the RTNL lock.
4552 bool netdev_has_upper_dev(struct net_device *dev,
4553 struct net_device *upper_dev)
4557 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
4559 EXPORT_SYMBOL(netdev_has_upper_dev);
4562 * netdev_has_any_upper_dev - Check if device is linked to some device
4565 * Find out if a device is linked to an upper device and return true in case
4566 * it is. The caller must hold the RTNL lock.
4568 static bool netdev_has_any_upper_dev(struct net_device *dev)
4572 return !list_empty(&dev->all_adj_list.upper);
4576 * netdev_master_upper_dev_get - Get master upper device
4579 * Find a master upper device and return pointer to it or NULL in case
4580 * it's not there. The caller must hold the RTNL lock.
4582 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4584 struct netdev_adjacent *upper;
4588 if (list_empty(&dev->adj_list.upper))
4591 upper = list_first_entry(&dev->adj_list.upper,
4592 struct netdev_adjacent, list);
4593 if (likely(upper->master))
4597 EXPORT_SYMBOL(netdev_master_upper_dev_get);
4599 void *netdev_adjacent_get_private(struct list_head *adj_list)
4601 struct netdev_adjacent *adj;
4603 adj = list_entry(adj_list, struct netdev_adjacent, list);
4605 return adj->private;
4607 EXPORT_SYMBOL(netdev_adjacent_get_private);
4610 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4612 * @iter: list_head ** of the current position
4614 * Gets the next device from the dev's upper list, starting from iter
4615 * position. The caller must hold RCU read lock.
4617 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4618 struct list_head **iter)
4620 struct netdev_adjacent *upper;
4622 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4624 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4626 if (&upper->list == &dev->adj_list.upper)
4629 *iter = &upper->list;
4633 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4636 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4638 * @iter: list_head ** of the current position
4640 * Gets the next device from the dev's upper list, starting from iter
4641 * position. The caller must hold RCU read lock.
4643 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4644 struct list_head **iter)
4646 struct netdev_adjacent *upper;
4648 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4650 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4652 if (&upper->list == &dev->all_adj_list.upper)
4655 *iter = &upper->list;
4659 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
4662 * netdev_lower_get_next_private - Get the next ->private from the
4663 * lower neighbour list
4665 * @iter: list_head ** of the current position
4667 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4668 * list, starting from iter position. The caller must hold either hold the
4669 * RTNL lock or its own locking that guarantees that the neighbour lower
4670 * list will remain unchainged.
4672 void *netdev_lower_get_next_private(struct net_device *dev,
4673 struct list_head **iter)
4675 struct netdev_adjacent *lower;
4677 lower = list_entry(*iter, struct netdev_adjacent, list);
4679 if (&lower->list == &dev->adj_list.lower)
4682 *iter = lower->list.next;
4684 return lower->private;
4686 EXPORT_SYMBOL(netdev_lower_get_next_private);
4689 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4690 * lower neighbour list, RCU
4693 * @iter: list_head ** of the current position
4695 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4696 * list, starting from iter position. The caller must hold RCU read lock.
4698 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4699 struct list_head **iter)
4701 struct netdev_adjacent *lower;
4703 WARN_ON_ONCE(!rcu_read_lock_held());
4705 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4707 if (&lower->list == &dev->adj_list.lower)
4710 *iter = &lower->list;
4712 return lower->private;
4714 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4717 * netdev_lower_get_next - Get the next device from the lower neighbour
4720 * @iter: list_head ** of the current position
4722 * Gets the next netdev_adjacent from the dev's lower neighbour
4723 * list, starting from iter position. The caller must hold RTNL lock or
4724 * its own locking that guarantees that the neighbour lower
4725 * list will remain unchainged.
4727 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4729 struct netdev_adjacent *lower;
4731 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4733 if (&lower->list == &dev->adj_list.lower)
4736 *iter = &lower->list;
4740 EXPORT_SYMBOL(netdev_lower_get_next);
4743 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4744 * lower neighbour list, RCU
4748 * Gets the first netdev_adjacent->private from the dev's lower neighbour
4749 * list. The caller must hold RCU read lock.
4751 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4753 struct netdev_adjacent *lower;
4755 lower = list_first_or_null_rcu(&dev->adj_list.lower,
4756 struct netdev_adjacent, list);
4758 return lower->private;
4761 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4764 * netdev_master_upper_dev_get_rcu - Get master upper device
4767 * Find a master upper device and return pointer to it or NULL in case
4768 * it's not there. The caller must hold the RCU read lock.
4770 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4772 struct netdev_adjacent *upper;
4774 upper = list_first_or_null_rcu(&dev->adj_list.upper,
4775 struct netdev_adjacent, list);
4776 if (upper && likely(upper->master))
4780 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4782 static int netdev_adjacent_sysfs_add(struct net_device *dev,
4783 struct net_device *adj_dev,
4784 struct list_head *dev_list)
4786 char linkname[IFNAMSIZ+7];
4787 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4788 "upper_%s" : "lower_%s", adj_dev->name);
4789 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4792 static void netdev_adjacent_sysfs_del(struct net_device *dev,
4794 struct list_head *dev_list)
4796 char linkname[IFNAMSIZ+7];
4797 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4798 "upper_%s" : "lower_%s", name);
4799 sysfs_remove_link(&(dev->dev.kobj), linkname);
4802 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
4803 struct net_device *adj_dev,
4804 struct list_head *dev_list)
4806 return (dev_list == &dev->adj_list.upper ||
4807 dev_list == &dev->adj_list.lower) &&
4808 net_eq(dev_net(dev), dev_net(adj_dev));
4811 static int __netdev_adjacent_dev_insert(struct net_device *dev,
4812 struct net_device *adj_dev,
4813 struct list_head *dev_list,
4814 void *private, bool master)
4816 struct netdev_adjacent *adj;
4819 adj = __netdev_find_adj(dev, adj_dev, dev_list);
4826 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4831 adj->master = master;
4833 adj->private = private;
4836 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4837 adj_dev->name, dev->name, adj_dev->name);
4839 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
4840 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
4845 /* Ensure that master link is always the first item in list. */
4847 ret = sysfs_create_link(&(dev->dev.kobj),
4848 &(adj_dev->dev.kobj), "master");
4850 goto remove_symlinks;
4852 list_add_rcu(&adj->list, dev_list);
4854 list_add_tail_rcu(&adj->list, dev_list);
4860 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
4861 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
4869 static void __netdev_adjacent_dev_remove(struct net_device *dev,
4870 struct net_device *adj_dev,
4871 struct list_head *dev_list)
4873 struct netdev_adjacent *adj;
4875 adj = __netdev_find_adj(dev, adj_dev, dev_list);
4878 pr_err("tried to remove device %s from %s\n",
4879 dev->name, adj_dev->name);
4883 if (adj->ref_nr > 1) {
4884 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
4891 sysfs_remove_link(&(dev->dev.kobj), "master");
4893 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
4894 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
4896 list_del_rcu(&adj->list);
4897 pr_debug("dev_put for %s, because link removed from %s to %s\n",
4898 adj_dev->name, dev->name, adj_dev->name);
4900 kfree_rcu(adj, rcu);
4903 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
4904 struct net_device *upper_dev,
4905 struct list_head *up_list,
4906 struct list_head *down_list,
4907 void *private, bool master)
4911 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
4916 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
4919 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
4926 static int __netdev_adjacent_dev_link(struct net_device *dev,
4927 struct net_device *upper_dev)
4929 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
4930 &dev->all_adj_list.upper,
4931 &upper_dev->all_adj_list.lower,
4935 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
4936 struct net_device *upper_dev,
4937 struct list_head *up_list,
4938 struct list_head *down_list)
4940 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
4941 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
4944 static void __netdev_adjacent_dev_unlink(struct net_device *dev,
4945 struct net_device *upper_dev)
4947 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4948 &dev->all_adj_list.upper,
4949 &upper_dev->all_adj_list.lower);
4952 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
4953 struct net_device *upper_dev,
4954 void *private, bool master)
4956 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
4961 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
4962 &dev->adj_list.upper,
4963 &upper_dev->adj_list.lower,
4966 __netdev_adjacent_dev_unlink(dev, upper_dev);
4973 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
4974 struct net_device *upper_dev)
4976 __netdev_adjacent_dev_unlink(dev, upper_dev);
4977 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4978 &dev->adj_list.upper,
4979 &upper_dev->adj_list.lower);
4982 static int __netdev_upper_dev_link(struct net_device *dev,
4983 struct net_device *upper_dev, bool master,
4986 struct netdev_adjacent *i, *j, *to_i, *to_j;
4991 if (dev == upper_dev)
4994 /* To prevent loops, check if dev is not upper device to upper_dev. */
4995 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
4998 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
5001 if (master && netdev_master_upper_dev_get(dev))
5004 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5009 /* Now that we linked these devs, make all the upper_dev's
5010 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5011 * versa, and don't forget the devices itself. All of these
5012 * links are non-neighbours.
5014 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5015 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5016 pr_debug("Interlinking %s with %s, non-neighbour\n",
5017 i->dev->name, j->dev->name);
5018 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5024 /* add dev to every upper_dev's upper device */
5025 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5026 pr_debug("linking %s's upper device %s with %s\n",
5027 upper_dev->name, i->dev->name, dev->name);
5028 ret = __netdev_adjacent_dev_link(dev, i->dev);
5030 goto rollback_upper_mesh;
5033 /* add upper_dev to every dev's lower device */
5034 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5035 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5036 i->dev->name, upper_dev->name);
5037 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5039 goto rollback_lower_mesh;
5042 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5045 rollback_lower_mesh:
5047 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5050 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5055 rollback_upper_mesh:
5057 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5060 __netdev_adjacent_dev_unlink(dev, i->dev);
5068 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5069 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5070 if (i == to_i && j == to_j)
5072 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5078 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5084 * netdev_upper_dev_link - Add a link to the upper device
5086 * @upper_dev: new upper device
5088 * Adds a link to device which is upper to this one. The caller must hold
5089 * the RTNL lock. On a failure a negative errno code is returned.
5090 * On success the reference counts are adjusted and the function
5093 int netdev_upper_dev_link(struct net_device *dev,
5094 struct net_device *upper_dev)
5096 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
5098 EXPORT_SYMBOL(netdev_upper_dev_link);
5101 * netdev_master_upper_dev_link - Add a master link to the upper device
5103 * @upper_dev: new upper device
5105 * Adds a link to device which is upper to this one. In this case, only
5106 * one master upper device can be linked, although other non-master devices
5107 * might be linked as well. The caller must hold the RTNL lock.
5108 * On a failure a negative errno code is returned. On success the reference
5109 * counts are adjusted and the function returns zero.
5111 int netdev_master_upper_dev_link(struct net_device *dev,
5112 struct net_device *upper_dev)
5114 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
5116 EXPORT_SYMBOL(netdev_master_upper_dev_link);
5118 int netdev_master_upper_dev_link_private(struct net_device *dev,
5119 struct net_device *upper_dev,
5122 return __netdev_upper_dev_link(dev, upper_dev, true, private);
5124 EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5127 * netdev_upper_dev_unlink - Removes a link to upper device
5129 * @upper_dev: new upper device
5131 * Removes a link to device which is upper to this one. The caller must hold
5134 void netdev_upper_dev_unlink(struct net_device *dev,
5135 struct net_device *upper_dev)
5137 struct netdev_adjacent *i, *j;
5140 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5142 /* Here is the tricky part. We must remove all dev's lower
5143 * devices from all upper_dev's upper devices and vice
5144 * versa, to maintain the graph relationship.
5146 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5147 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5148 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5150 /* remove also the devices itself from lower/upper device
5153 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5154 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5156 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5157 __netdev_adjacent_dev_unlink(dev, i->dev);
5159 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5161 EXPORT_SYMBOL(netdev_upper_dev_unlink);
5163 void netdev_adjacent_add_links(struct net_device *dev)
5165 struct netdev_adjacent *iter;
5167 struct net *net = dev_net(dev);
5169 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5170 if (!net_eq(net,dev_net(iter->dev)))
5172 netdev_adjacent_sysfs_add(iter->dev, dev,
5173 &iter->dev->adj_list.lower);
5174 netdev_adjacent_sysfs_add(dev, iter->dev,
5175 &dev->adj_list.upper);
5178 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5179 if (!net_eq(net,dev_net(iter->dev)))
5181 netdev_adjacent_sysfs_add(iter->dev, dev,
5182 &iter->dev->adj_list.upper);
5183 netdev_adjacent_sysfs_add(dev, iter->dev,
5184 &dev->adj_list.lower);
5188 void netdev_adjacent_del_links(struct net_device *dev)
5190 struct netdev_adjacent *iter;
5192 struct net *net = dev_net(dev);
5194 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5195 if (!net_eq(net,dev_net(iter->dev)))
5197 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5198 &iter->dev->adj_list.lower);
5199 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5200 &dev->adj_list.upper);
5203 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5204 if (!net_eq(net,dev_net(iter->dev)))
5206 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5207 &iter->dev->adj_list.upper);
5208 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5209 &dev->adj_list.lower);
5213 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
5215 struct netdev_adjacent *iter;
5217 struct net *net = dev_net(dev);
5219 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5220 if (!net_eq(net,dev_net(iter->dev)))
5222 netdev_adjacent_sysfs_del(iter->dev, oldname,
5223 &iter->dev->adj_list.lower);
5224 netdev_adjacent_sysfs_add(iter->dev, dev,
5225 &iter->dev->adj_list.lower);
5228 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5229 if (!net_eq(net,dev_net(iter->dev)))
5231 netdev_adjacent_sysfs_del(iter->dev, oldname,
5232 &iter->dev->adj_list.upper);
5233 netdev_adjacent_sysfs_add(iter->dev, dev,
5234 &iter->dev->adj_list.upper);
5238 void *netdev_lower_dev_get_private(struct net_device *dev,
5239 struct net_device *lower_dev)
5241 struct netdev_adjacent *lower;
5245 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5249 return lower->private;
5251 EXPORT_SYMBOL(netdev_lower_dev_get_private);
5254 int dev_get_nest_level(struct net_device *dev,
5255 bool (*type_check)(struct net_device *dev))
5257 struct net_device *lower = NULL;
5258 struct list_head *iter;
5264 netdev_for_each_lower_dev(dev, lower, iter) {
5265 nest = dev_get_nest_level(lower, type_check);
5266 if (max_nest < nest)
5270 if (type_check(dev))
5275 EXPORT_SYMBOL(dev_get_nest_level);
5277 static void dev_change_rx_flags(struct net_device *dev, int flags)
5279 const struct net_device_ops *ops = dev->netdev_ops;
5281 if (ops->ndo_change_rx_flags)
5282 ops->ndo_change_rx_flags(dev, flags);
5285 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
5287 unsigned int old_flags = dev->flags;
5293 dev->flags |= IFF_PROMISC;
5294 dev->promiscuity += inc;
5295 if (dev->promiscuity == 0) {
5298 * If inc causes overflow, untouch promisc and return error.
5301 dev->flags &= ~IFF_PROMISC;
5303 dev->promiscuity -= inc;
5304 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5309 if (dev->flags != old_flags) {
5310 pr_info("device %s %s promiscuous mode\n",
5312 dev->flags & IFF_PROMISC ? "entered" : "left");
5313 if (audit_enabled) {
5314 current_uid_gid(&uid, &gid);
5315 audit_log(current->audit_context, GFP_ATOMIC,
5316 AUDIT_ANOM_PROMISCUOUS,
5317 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5318 dev->name, (dev->flags & IFF_PROMISC),
5319 (old_flags & IFF_PROMISC),
5320 from_kuid(&init_user_ns, audit_get_loginuid(current)),
5321 from_kuid(&init_user_ns, uid),
5322 from_kgid(&init_user_ns, gid),
5323 audit_get_sessionid(current));
5326 dev_change_rx_flags(dev, IFF_PROMISC);
5329 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
5334 * dev_set_promiscuity - update promiscuity count on a device
5338 * Add or remove promiscuity from a device. While the count in the device
5339 * remains above zero the interface remains promiscuous. Once it hits zero
5340 * the device reverts back to normal filtering operation. A negative inc
5341 * value is used to drop promiscuity on the device.
5342 * Return 0 if successful or a negative errno code on error.
5344 int dev_set_promiscuity(struct net_device *dev, int inc)
5346 unsigned int old_flags = dev->flags;
5349 err = __dev_set_promiscuity(dev, inc, true);
5352 if (dev->flags != old_flags)
5353 dev_set_rx_mode(dev);
5356 EXPORT_SYMBOL(dev_set_promiscuity);
5358 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
5360 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
5364 dev->flags |= IFF_ALLMULTI;
5365 dev->allmulti += inc;
5366 if (dev->allmulti == 0) {
5369 * If inc causes overflow, untouch allmulti and return error.
5372 dev->flags &= ~IFF_ALLMULTI;
5374 dev->allmulti -= inc;
5375 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5380 if (dev->flags ^ old_flags) {
5381 dev_change_rx_flags(dev, IFF_ALLMULTI);
5382 dev_set_rx_mode(dev);
5384 __dev_notify_flags(dev, old_flags,
5385 dev->gflags ^ old_gflags);
5391 * dev_set_allmulti - update allmulti count on a device
5395 * Add or remove reception of all multicast frames to a device. While the
5396 * count in the device remains above zero the interface remains listening
5397 * to all interfaces. Once it hits zero the device reverts back to normal
5398 * filtering operation. A negative @inc value is used to drop the counter
5399 * when releasing a resource needing all multicasts.
5400 * Return 0 if successful or a negative errno code on error.
5403 int dev_set_allmulti(struct net_device *dev, int inc)
5405 return __dev_set_allmulti(dev, inc, true);
5407 EXPORT_SYMBOL(dev_set_allmulti);
5410 * Upload unicast and multicast address lists to device and
5411 * configure RX filtering. When the device doesn't support unicast
5412 * filtering it is put in promiscuous mode while unicast addresses
5415 void __dev_set_rx_mode(struct net_device *dev)
5417 const struct net_device_ops *ops = dev->netdev_ops;
5419 /* dev_open will call this function so the list will stay sane. */
5420 if (!(dev->flags&IFF_UP))
5423 if (!netif_device_present(dev))
5426 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
5427 /* Unicast addresses changes may only happen under the rtnl,
5428 * therefore calling __dev_set_promiscuity here is safe.
5430 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
5431 __dev_set_promiscuity(dev, 1, false);
5432 dev->uc_promisc = true;
5433 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
5434 __dev_set_promiscuity(dev, -1, false);
5435 dev->uc_promisc = false;
5439 if (ops->ndo_set_rx_mode)
5440 ops->ndo_set_rx_mode(dev);
5443 void dev_set_rx_mode(struct net_device *dev)
5445 netif_addr_lock_bh(dev);
5446 __dev_set_rx_mode(dev);
5447 netif_addr_unlock_bh(dev);
5451 * dev_get_flags - get flags reported to userspace
5454 * Get the combination of flag bits exported through APIs to userspace.
5456 unsigned int dev_get_flags(const struct net_device *dev)
5460 flags = (dev->flags & ~(IFF_PROMISC |
5465 (dev->gflags & (IFF_PROMISC |
5468 if (netif_running(dev)) {
5469 if (netif_oper_up(dev))
5470 flags |= IFF_RUNNING;
5471 if (netif_carrier_ok(dev))
5472 flags |= IFF_LOWER_UP;
5473 if (netif_dormant(dev))
5474 flags |= IFF_DORMANT;
5479 EXPORT_SYMBOL(dev_get_flags);
5481 int __dev_change_flags(struct net_device *dev, unsigned int flags)
5483 unsigned int old_flags = dev->flags;
5489 * Set the flags on our device.
5492 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5493 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5495 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5499 * Load in the correct multicast list now the flags have changed.
5502 if ((old_flags ^ flags) & IFF_MULTICAST)
5503 dev_change_rx_flags(dev, IFF_MULTICAST);
5505 dev_set_rx_mode(dev);
5508 * Have we downed the interface. We handle IFF_UP ourselves
5509 * according to user attempts to set it, rather than blindly
5514 if ((old_flags ^ flags) & IFF_UP)
5515 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
5517 if ((flags ^ dev->gflags) & IFF_PROMISC) {
5518 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5519 unsigned int old_flags = dev->flags;
5521 dev->gflags ^= IFF_PROMISC;
5523 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5524 if (dev->flags != old_flags)
5525 dev_set_rx_mode(dev);
5528 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5529 is important. Some (broken) drivers set IFF_PROMISC, when
5530 IFF_ALLMULTI is requested not asking us and not reporting.
5532 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
5533 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5535 dev->gflags ^= IFF_ALLMULTI;
5536 __dev_set_allmulti(dev, inc, false);
5542 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5543 unsigned int gchanges)
5545 unsigned int changes = dev->flags ^ old_flags;
5548 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
5550 if (changes & IFF_UP) {
5551 if (dev->flags & IFF_UP)
5552 call_netdevice_notifiers(NETDEV_UP, dev);
5554 call_netdevice_notifiers(NETDEV_DOWN, dev);
5557 if (dev->flags & IFF_UP &&
5558 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5559 struct netdev_notifier_change_info change_info;
5561 change_info.flags_changed = changes;
5562 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5568 * dev_change_flags - change device settings
5570 * @flags: device state flags
5572 * Change settings on device based state flags. The flags are
5573 * in the userspace exported format.
5575 int dev_change_flags(struct net_device *dev, unsigned int flags)
5578 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
5580 ret = __dev_change_flags(dev, flags);
5584 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
5585 __dev_notify_flags(dev, old_flags, changes);
5588 EXPORT_SYMBOL(dev_change_flags);
5590 static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5592 const struct net_device_ops *ops = dev->netdev_ops;
5594 if (ops->ndo_change_mtu)
5595 return ops->ndo_change_mtu(dev, new_mtu);
5602 * dev_set_mtu - Change maximum transfer unit
5604 * @new_mtu: new transfer unit
5606 * Change the maximum transfer size of the network device.
5608 int dev_set_mtu(struct net_device *dev, int new_mtu)
5612 if (new_mtu == dev->mtu)
5615 /* MTU must be positive. */
5619 if (!netif_device_present(dev))
5622 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5623 err = notifier_to_errno(err);
5627 orig_mtu = dev->mtu;
5628 err = __dev_set_mtu(dev, new_mtu);
5631 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5632 err = notifier_to_errno(err);
5634 /* setting mtu back and notifying everyone again,
5635 * so that they have a chance to revert changes.
5637 __dev_set_mtu(dev, orig_mtu);
5638 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5643 EXPORT_SYMBOL(dev_set_mtu);
5646 * dev_set_group - Change group this device belongs to
5648 * @new_group: group this device should belong to
5650 void dev_set_group(struct net_device *dev, int new_group)
5652 dev->group = new_group;
5654 EXPORT_SYMBOL(dev_set_group);
5657 * dev_set_mac_address - Change Media Access Control Address
5661 * Change the hardware (MAC) address of the device
5663 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5665 const struct net_device_ops *ops = dev->netdev_ops;
5668 if (!ops->ndo_set_mac_address)
5670 if (sa->sa_family != dev->type)
5672 if (!netif_device_present(dev))
5674 err = ops->ndo_set_mac_address(dev, sa);
5677 dev->addr_assign_type = NET_ADDR_SET;
5678 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5679 add_device_randomness(dev->dev_addr, dev->addr_len);
5682 EXPORT_SYMBOL(dev_set_mac_address);
5685 * dev_change_carrier - Change device carrier
5687 * @new_carrier: new value
5689 * Change device carrier
5691 int dev_change_carrier(struct net_device *dev, bool new_carrier)
5693 const struct net_device_ops *ops = dev->netdev_ops;
5695 if (!ops->ndo_change_carrier)
5697 if (!netif_device_present(dev))
5699 return ops->ndo_change_carrier(dev, new_carrier);
5701 EXPORT_SYMBOL(dev_change_carrier);
5704 * dev_get_phys_port_id - Get device physical port ID
5708 * Get device physical port ID
5710 int dev_get_phys_port_id(struct net_device *dev,
5711 struct netdev_phys_port_id *ppid)
5713 const struct net_device_ops *ops = dev->netdev_ops;
5715 if (!ops->ndo_get_phys_port_id)
5717 return ops->ndo_get_phys_port_id(dev, ppid);
5719 EXPORT_SYMBOL(dev_get_phys_port_id);
5722 * dev_new_index - allocate an ifindex
5723 * @net: the applicable net namespace
5725 * Returns a suitable unique value for a new device interface
5726 * number. The caller must hold the rtnl semaphore or the
5727 * dev_base_lock to be sure it remains unique.
5729 static int dev_new_index(struct net *net)
5731 int ifindex = net->ifindex;
5735 if (!__dev_get_by_index(net, ifindex))
5736 return net->ifindex = ifindex;
5740 /* Delayed registration/unregisteration */
5741 static LIST_HEAD(net_todo_list);
5742 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5744 static void net_set_todo(struct net_device *dev)
5746 list_add_tail(&dev->todo_list, &net_todo_list);
5747 dev_net(dev)->dev_unreg_count++;
5750 static void rollback_registered_many(struct list_head *head)
5752 struct net_device *dev, *tmp;
5753 LIST_HEAD(close_head);
5755 BUG_ON(dev_boot_phase);
5758 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5759 /* Some devices call without registering
5760 * for initialization unwind. Remove those
5761 * devices and proceed with the remaining.
5763 if (dev->reg_state == NETREG_UNINITIALIZED) {
5764 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5768 list_del(&dev->unreg_list);
5771 dev->dismantle = true;
5772 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5775 /* If device is running, close it first. */
5776 list_for_each_entry(dev, head, unreg_list)
5777 list_add_tail(&dev->close_list, &close_head);
5778 dev_close_many(&close_head);
5780 list_for_each_entry(dev, head, unreg_list) {
5781 /* And unlink it from device chain. */
5782 unlist_netdevice(dev);
5784 dev->reg_state = NETREG_UNREGISTERING;
5789 list_for_each_entry(dev, head, unreg_list) {
5790 /* Shutdown queueing discipline. */
5794 /* Notify protocols, that we are about to destroy
5795 this device. They should clean all the things.
5797 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5800 * Flush the unicast and multicast chains
5805 if (dev->netdev_ops->ndo_uninit)
5806 dev->netdev_ops->ndo_uninit(dev);
5808 if (!dev->rtnl_link_ops ||
5809 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5810 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
5812 /* Notifier chain MUST detach us all upper devices. */
5813 WARN_ON(netdev_has_any_upper_dev(dev));
5815 /* Remove entries from kobject tree */
5816 netdev_unregister_kobject(dev);
5818 /* Remove XPS queueing entries */
5819 netif_reset_xps_queues_gt(dev, 0);
5825 list_for_each_entry(dev, head, unreg_list)
5829 static void rollback_registered(struct net_device *dev)
5833 list_add(&dev->unreg_list, &single);
5834 rollback_registered_many(&single);
5838 static netdev_features_t netdev_fix_features(struct net_device *dev,
5839 netdev_features_t features)
5841 /* Fix illegal checksum combinations */
5842 if ((features & NETIF_F_HW_CSUM) &&
5843 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5844 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5845 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5848 /* TSO requires that SG is present as well. */
5849 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5850 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5851 features &= ~NETIF_F_ALL_TSO;
5854 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5855 !(features & NETIF_F_IP_CSUM)) {
5856 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5857 features &= ~NETIF_F_TSO;
5858 features &= ~NETIF_F_TSO_ECN;
5861 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5862 !(features & NETIF_F_IPV6_CSUM)) {
5863 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5864 features &= ~NETIF_F_TSO6;
5867 /* TSO ECN requires that TSO is present as well. */
5868 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5869 features &= ~NETIF_F_TSO_ECN;
5871 /* Software GSO depends on SG. */
5872 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5873 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5874 features &= ~NETIF_F_GSO;
5877 /* UFO needs SG and checksumming */
5878 if (features & NETIF_F_UFO) {
5879 /* maybe split UFO into V4 and V6? */
5880 if (!((features & NETIF_F_GEN_CSUM) ||
5881 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5882 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5884 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5885 features &= ~NETIF_F_UFO;
5888 if (!(features & NETIF_F_SG)) {
5890 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5891 features &= ~NETIF_F_UFO;
5895 #ifdef CONFIG_NET_RX_BUSY_POLL
5896 if (dev->netdev_ops->ndo_busy_poll)
5897 features |= NETIF_F_BUSY_POLL;
5900 features &= ~NETIF_F_BUSY_POLL;
5905 int __netdev_update_features(struct net_device *dev)
5907 netdev_features_t features;
5912 features = netdev_get_wanted_features(dev);
5914 if (dev->netdev_ops->ndo_fix_features)
5915 features = dev->netdev_ops->ndo_fix_features(dev, features);
5917 /* driver might be less strict about feature dependencies */
5918 features = netdev_fix_features(dev, features);
5920 if (dev->features == features)
5923 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5924 &dev->features, &features);
5926 if (dev->netdev_ops->ndo_set_features)
5927 err = dev->netdev_ops->ndo_set_features(dev, features);
5929 if (unlikely(err < 0)) {
5931 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5932 err, &features, &dev->features);
5937 dev->features = features;
5943 * netdev_update_features - recalculate device features
5944 * @dev: the device to check
5946 * Recalculate dev->features set and send notifications if it
5947 * has changed. Should be called after driver or hardware dependent
5948 * conditions might have changed that influence the features.
5950 void netdev_update_features(struct net_device *dev)
5952 if (__netdev_update_features(dev))
5953 netdev_features_change(dev);
5955 EXPORT_SYMBOL(netdev_update_features);
5958 * netdev_change_features - recalculate device features
5959 * @dev: the device to check
5961 * Recalculate dev->features set and send notifications even
5962 * if they have not changed. Should be called instead of
5963 * netdev_update_features() if also dev->vlan_features might
5964 * have changed to allow the changes to be propagated to stacked
5967 void netdev_change_features(struct net_device *dev)
5969 __netdev_update_features(dev);
5970 netdev_features_change(dev);
5972 EXPORT_SYMBOL(netdev_change_features);
5975 * netif_stacked_transfer_operstate - transfer operstate
5976 * @rootdev: the root or lower level device to transfer state from
5977 * @dev: the device to transfer operstate to
5979 * Transfer operational state from root to device. This is normally
5980 * called when a stacking relationship exists between the root
5981 * device and the device(a leaf device).
5983 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5984 struct net_device *dev)
5986 if (rootdev->operstate == IF_OPER_DORMANT)
5987 netif_dormant_on(dev);
5989 netif_dormant_off(dev);
5991 if (netif_carrier_ok(rootdev)) {
5992 if (!netif_carrier_ok(dev))
5993 netif_carrier_on(dev);
5995 if (netif_carrier_ok(dev))
5996 netif_carrier_off(dev);
5999 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6002 static int netif_alloc_rx_queues(struct net_device *dev)
6004 unsigned int i, count = dev->num_rx_queues;
6005 struct netdev_rx_queue *rx;
6009 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
6015 for (i = 0; i < count; i++)
6021 static void netdev_init_one_queue(struct net_device *dev,
6022 struct netdev_queue *queue, void *_unused)
6024 /* Initialize queue lock */
6025 spin_lock_init(&queue->_xmit_lock);
6026 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6027 queue->xmit_lock_owner = -1;
6028 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
6031 dql_init(&queue->dql, HZ);
6035 static void netif_free_tx_queues(struct net_device *dev)
6040 static int netif_alloc_netdev_queues(struct net_device *dev)
6042 unsigned int count = dev->num_tx_queues;
6043 struct netdev_queue *tx;
6044 size_t sz = count * sizeof(*tx);
6046 BUG_ON(count < 1 || count > 0xffff);
6048 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6056 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6057 spin_lock_init(&dev->tx_global_lock);
6063 * register_netdevice - register a network device
6064 * @dev: device to register
6066 * Take a completed network device structure and add it to the kernel
6067 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6068 * chain. 0 is returned on success. A negative errno code is returned
6069 * on a failure to set up the device, or if the name is a duplicate.
6071 * Callers must hold the rtnl semaphore. You may want
6072 * register_netdev() instead of this.
6075 * The locking appears insufficient to guarantee two parallel registers
6076 * will not get the same name.
6079 int register_netdevice(struct net_device *dev)
6082 struct net *net = dev_net(dev);
6084 BUG_ON(dev_boot_phase);
6089 /* When net_device's are persistent, this will be fatal. */
6090 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
6093 spin_lock_init(&dev->addr_list_lock);
6094 netdev_set_addr_lockdep_class(dev);
6098 ret = dev_get_valid_name(net, dev, dev->name);
6102 /* Init, if this function is available */
6103 if (dev->netdev_ops->ndo_init) {
6104 ret = dev->netdev_ops->ndo_init(dev);
6112 if (((dev->hw_features | dev->features) &
6113 NETIF_F_HW_VLAN_CTAG_FILTER) &&
6114 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6115 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6116 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6123 dev->ifindex = dev_new_index(net);
6124 else if (__dev_get_by_index(net, dev->ifindex))
6127 if (dev->iflink == -1)
6128 dev->iflink = dev->ifindex;
6130 /* Transfer changeable features to wanted_features and enable
6131 * software offloads (GSO and GRO).
6133 dev->hw_features |= NETIF_F_SOFT_FEATURES;
6134 dev->features |= NETIF_F_SOFT_FEATURES;
6135 dev->wanted_features = dev->features & dev->hw_features;
6137 if (!(dev->flags & IFF_LOOPBACK)) {
6138 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6141 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
6143 dev->vlan_features |= NETIF_F_HIGHDMA;
6145 /* Make NETIF_F_SG inheritable to tunnel devices.
6147 dev->hw_enc_features |= NETIF_F_SG;
6149 /* Make NETIF_F_SG inheritable to MPLS.
6151 dev->mpls_features |= NETIF_F_SG;
6153 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6154 ret = notifier_to_errno(ret);
6158 ret = netdev_register_kobject(dev);
6161 dev->reg_state = NETREG_REGISTERED;
6163 __netdev_update_features(dev);
6166 * Default initial state at registry is that the
6167 * device is present.
6170 set_bit(__LINK_STATE_PRESENT, &dev->state);
6172 linkwatch_init_dev(dev);
6174 dev_init_scheduler(dev);
6176 list_netdevice(dev);
6177 add_device_randomness(dev->dev_addr, dev->addr_len);
6179 /* If the device has permanent device address, driver should
6180 * set dev_addr and also addr_assign_type should be set to
6181 * NET_ADDR_PERM (default value).
6183 if (dev->addr_assign_type == NET_ADDR_PERM)
6184 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6186 /* Notify protocols, that a new device appeared. */
6187 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
6188 ret = notifier_to_errno(ret);
6190 rollback_registered(dev);
6191 dev->reg_state = NETREG_UNREGISTERED;
6194 * Prevent userspace races by waiting until the network
6195 * device is fully setup before sending notifications.
6197 if (!dev->rtnl_link_ops ||
6198 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6199 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
6205 if (dev->netdev_ops->ndo_uninit)
6206 dev->netdev_ops->ndo_uninit(dev);
6209 EXPORT_SYMBOL(register_netdevice);
6212 * init_dummy_netdev - init a dummy network device for NAPI
6213 * @dev: device to init
6215 * This takes a network device structure and initialize the minimum
6216 * amount of fields so it can be used to schedule NAPI polls without
6217 * registering a full blown interface. This is to be used by drivers
6218 * that need to tie several hardware interfaces to a single NAPI
6219 * poll scheduler due to HW limitations.
6221 int init_dummy_netdev(struct net_device *dev)
6223 /* Clear everything. Note we don't initialize spinlocks
6224 * are they aren't supposed to be taken by any of the
6225 * NAPI code and this dummy netdev is supposed to be
6226 * only ever used for NAPI polls
6228 memset(dev, 0, sizeof(struct net_device));
6230 /* make sure we BUG if trying to hit standard
6231 * register/unregister code path
6233 dev->reg_state = NETREG_DUMMY;
6235 /* NAPI wants this */
6236 INIT_LIST_HEAD(&dev->napi_list);
6238 /* a dummy interface is started by default */
6239 set_bit(__LINK_STATE_PRESENT, &dev->state);
6240 set_bit(__LINK_STATE_START, &dev->state);
6242 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6243 * because users of this 'device' dont need to change
6249 EXPORT_SYMBOL_GPL(init_dummy_netdev);
6253 * register_netdev - register a network device
6254 * @dev: device to register
6256 * Take a completed network device structure and add it to the kernel
6257 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6258 * chain. 0 is returned on success. A negative errno code is returned
6259 * on a failure to set up the device, or if the name is a duplicate.
6261 * This is a wrapper around register_netdevice that takes the rtnl semaphore
6262 * and expands the device name if you passed a format string to
6265 int register_netdev(struct net_device *dev)
6270 err = register_netdevice(dev);
6274 EXPORT_SYMBOL(register_netdev);
6276 int netdev_refcnt_read(const struct net_device *dev)
6280 for_each_possible_cpu(i)
6281 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6284 EXPORT_SYMBOL(netdev_refcnt_read);
6287 * netdev_wait_allrefs - wait until all references are gone.
6288 * @dev: target net_device
6290 * This is called when unregistering network devices.
6292 * Any protocol or device that holds a reference should register
6293 * for netdevice notification, and cleanup and put back the
6294 * reference if they receive an UNREGISTER event.
6295 * We can get stuck here if buggy protocols don't correctly
6298 static void netdev_wait_allrefs(struct net_device *dev)
6300 unsigned long rebroadcast_time, warning_time;
6303 linkwatch_forget_dev(dev);
6305 rebroadcast_time = warning_time = jiffies;
6306 refcnt = netdev_refcnt_read(dev);
6308 while (refcnt != 0) {
6309 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6312 /* Rebroadcast unregister notification */
6313 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6319 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6320 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6322 /* We must not have linkwatch events
6323 * pending on unregister. If this
6324 * happens, we simply run the queue
6325 * unscheduled, resulting in a noop
6328 linkwatch_run_queue();
6333 rebroadcast_time = jiffies;
6338 refcnt = netdev_refcnt_read(dev);
6340 if (time_after(jiffies, warning_time + 10 * HZ)) {
6341 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6343 warning_time = jiffies;
6352 * register_netdevice(x1);
6353 * register_netdevice(x2);
6355 * unregister_netdevice(y1);
6356 * unregister_netdevice(y2);
6362 * We are invoked by rtnl_unlock().
6363 * This allows us to deal with problems:
6364 * 1) We can delete sysfs objects which invoke hotplug
6365 * without deadlocking with linkwatch via keventd.
6366 * 2) Since we run with the RTNL semaphore not held, we can sleep
6367 * safely in order to wait for the netdev refcnt to drop to zero.
6369 * We must not return until all unregister events added during
6370 * the interval the lock was held have been completed.
6372 void netdev_run_todo(void)
6374 struct list_head list;
6376 /* Snapshot list, allow later requests */
6377 list_replace_init(&net_todo_list, &list);
6382 /* Wait for rcu callbacks to finish before next phase */
6383 if (!list_empty(&list))
6386 while (!list_empty(&list)) {
6387 struct net_device *dev
6388 = list_first_entry(&list, struct net_device, todo_list);
6389 list_del(&dev->todo_list);
6392 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6395 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
6396 pr_err("network todo '%s' but state %d\n",
6397 dev->name, dev->reg_state);
6402 dev->reg_state = NETREG_UNREGISTERED;
6404 on_each_cpu(flush_backlog, dev, 1);
6406 netdev_wait_allrefs(dev);
6409 BUG_ON(netdev_refcnt_read(dev));
6410 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6411 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
6412 WARN_ON(dev->dn_ptr);
6414 if (dev->destructor)
6415 dev->destructor(dev);
6417 /* Report a network device has been unregistered */
6419 dev_net(dev)->dev_unreg_count--;
6421 wake_up(&netdev_unregistering_wq);
6423 /* Free network device */
6424 kobject_put(&dev->dev.kobj);
6428 /* Convert net_device_stats to rtnl_link_stats64. They have the same
6429 * fields in the same order, with only the type differing.
6431 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6432 const struct net_device_stats *netdev_stats)
6434 #if BITS_PER_LONG == 64
6435 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6436 memcpy(stats64, netdev_stats, sizeof(*stats64));
6438 size_t i, n = sizeof(*stats64) / sizeof(u64);
6439 const unsigned long *src = (const unsigned long *)netdev_stats;
6440 u64 *dst = (u64 *)stats64;
6442 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6443 sizeof(*stats64) / sizeof(u64));
6444 for (i = 0; i < n; i++)
6448 EXPORT_SYMBOL(netdev_stats_to_stats64);
6451 * dev_get_stats - get network device statistics
6452 * @dev: device to get statistics from
6453 * @storage: place to store stats
6455 * Get network statistics from device. Return @storage.
6456 * The device driver may provide its own method by setting
6457 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6458 * otherwise the internal statistics structure is used.
6460 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6461 struct rtnl_link_stats64 *storage)
6463 const struct net_device_ops *ops = dev->netdev_ops;
6465 if (ops->ndo_get_stats64) {
6466 memset(storage, 0, sizeof(*storage));
6467 ops->ndo_get_stats64(dev, storage);
6468 } else if (ops->ndo_get_stats) {
6469 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
6471 netdev_stats_to_stats64(storage, &dev->stats);
6473 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
6474 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
6477 EXPORT_SYMBOL(dev_get_stats);
6479 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6481 struct netdev_queue *queue = dev_ingress_queue(dev);
6483 #ifdef CONFIG_NET_CLS_ACT
6486 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6489 netdev_init_one_queue(dev, queue, NULL);
6490 queue->qdisc = &noop_qdisc;
6491 queue->qdisc_sleeping = &noop_qdisc;
6492 rcu_assign_pointer(dev->ingress_queue, queue);
6497 static const struct ethtool_ops default_ethtool_ops;
6499 void netdev_set_default_ethtool_ops(struct net_device *dev,
6500 const struct ethtool_ops *ops)
6502 if (dev->ethtool_ops == &default_ethtool_ops)
6503 dev->ethtool_ops = ops;
6505 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6507 void netdev_freemem(struct net_device *dev)
6509 char *addr = (char *)dev - dev->padded;
6515 * alloc_netdev_mqs - allocate network device
6516 * @sizeof_priv: size of private data to allocate space for
6517 * @name: device name format string
6518 * @name_assign_type: origin of device name
6519 * @setup: callback to initialize device
6520 * @txqs: the number of TX subqueues to allocate
6521 * @rxqs: the number of RX subqueues to allocate
6523 * Allocates a struct net_device with private data area for driver use
6524 * and performs basic initialization. Also allocates subqueue structs
6525 * for each queue on the device.
6527 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6528 unsigned char name_assign_type,
6529 void (*setup)(struct net_device *),
6530 unsigned int txqs, unsigned int rxqs)
6532 struct net_device *dev;
6534 struct net_device *p;
6536 BUG_ON(strlen(name) >= sizeof(dev->name));
6539 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6545 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6550 alloc_size = sizeof(struct net_device);
6552 /* ensure 32-byte alignment of private area */
6553 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6554 alloc_size += sizeof_priv;
6556 /* ensure 32-byte alignment of whole construct */
6557 alloc_size += NETDEV_ALIGN - 1;
6559 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6561 p = vzalloc(alloc_size);
6565 dev = PTR_ALIGN(p, NETDEV_ALIGN);
6566 dev->padded = (char *)dev - (char *)p;
6568 dev->pcpu_refcnt = alloc_percpu(int);
6569 if (!dev->pcpu_refcnt)
6572 if (dev_addr_init(dev))
6578 dev_net_set(dev, &init_net);
6580 dev->gso_max_size = GSO_MAX_SIZE;
6581 dev->gso_max_segs = GSO_MAX_SEGS;
6583 INIT_LIST_HEAD(&dev->napi_list);
6584 INIT_LIST_HEAD(&dev->unreg_list);
6585 INIT_LIST_HEAD(&dev->close_list);
6586 INIT_LIST_HEAD(&dev->link_watch_list);
6587 INIT_LIST_HEAD(&dev->adj_list.upper);
6588 INIT_LIST_HEAD(&dev->adj_list.lower);
6589 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6590 INIT_LIST_HEAD(&dev->all_adj_list.lower);
6591 dev->priv_flags = IFF_XMIT_DST_RELEASE;
6594 dev->num_tx_queues = txqs;
6595 dev->real_num_tx_queues = txqs;
6596 if (netif_alloc_netdev_queues(dev))
6600 dev->num_rx_queues = rxqs;
6601 dev->real_num_rx_queues = rxqs;
6602 if (netif_alloc_rx_queues(dev))
6606 strcpy(dev->name, name);
6607 dev->name_assign_type = name_assign_type;
6608 dev->group = INIT_NETDEV_GROUP;
6609 if (!dev->ethtool_ops)
6610 dev->ethtool_ops = &default_ethtool_ops;
6618 free_percpu(dev->pcpu_refcnt);
6620 netdev_freemem(dev);
6623 EXPORT_SYMBOL(alloc_netdev_mqs);
6626 * free_netdev - free network device
6629 * This function does the last stage of destroying an allocated device
6630 * interface. The reference to the device object is released.
6631 * If this is the last reference then it will be freed.
6633 void free_netdev(struct net_device *dev)
6635 struct napi_struct *p, *n;
6637 release_net(dev_net(dev));
6639 netif_free_tx_queues(dev);
6644 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6646 /* Flush device addresses */
6647 dev_addr_flush(dev);
6649 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6652 free_percpu(dev->pcpu_refcnt);
6653 dev->pcpu_refcnt = NULL;
6655 /* Compatibility with error handling in drivers */
6656 if (dev->reg_state == NETREG_UNINITIALIZED) {
6657 netdev_freemem(dev);
6661 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6662 dev->reg_state = NETREG_RELEASED;
6664 /* will free via device release */
6665 put_device(&dev->dev);
6667 EXPORT_SYMBOL(free_netdev);
6670 * synchronize_net - Synchronize with packet receive processing
6672 * Wait for packets currently being received to be done.
6673 * Does not block later packets from starting.
6675 void synchronize_net(void)
6678 if (rtnl_is_locked())
6679 synchronize_rcu_expedited();
6683 EXPORT_SYMBOL(synchronize_net);
6686 * unregister_netdevice_queue - remove device from the kernel
6690 * This function shuts down a device interface and removes it
6691 * from the kernel tables.
6692 * If head not NULL, device is queued to be unregistered later.
6694 * Callers must hold the rtnl semaphore. You may want
6695 * unregister_netdev() instead of this.
6698 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6703 list_move_tail(&dev->unreg_list, head);
6705 rollback_registered(dev);
6706 /* Finish processing unregister after unlock */
6710 EXPORT_SYMBOL(unregister_netdevice_queue);
6713 * unregister_netdevice_many - unregister many devices
6714 * @head: list of devices
6716 * Note: As most callers use a stack allocated list_head,
6717 * we force a list_del() to make sure stack wont be corrupted later.
6719 void unregister_netdevice_many(struct list_head *head)
6721 struct net_device *dev;
6723 if (!list_empty(head)) {
6724 rollback_registered_many(head);
6725 list_for_each_entry(dev, head, unreg_list)
6730 EXPORT_SYMBOL(unregister_netdevice_many);
6733 * unregister_netdev - remove device from the kernel
6736 * This function shuts down a device interface and removes it
6737 * from the kernel tables.
6739 * This is just a wrapper for unregister_netdevice that takes
6740 * the rtnl semaphore. In general you want to use this and not
6741 * unregister_netdevice.
6743 void unregister_netdev(struct net_device *dev)
6746 unregister_netdevice(dev);
6749 EXPORT_SYMBOL(unregister_netdev);
6752 * dev_change_net_namespace - move device to different nethost namespace
6754 * @net: network namespace
6755 * @pat: If not NULL name pattern to try if the current device name
6756 * is already taken in the destination network namespace.
6758 * This function shuts down a device interface and moves it
6759 * to a new network namespace. On success 0 is returned, on
6760 * a failure a netagive errno code is returned.
6762 * Callers must hold the rtnl semaphore.
6765 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6771 /* Don't allow namespace local devices to be moved. */
6773 if (dev->features & NETIF_F_NETNS_LOCAL)
6776 /* Ensure the device has been registrered */
6777 if (dev->reg_state != NETREG_REGISTERED)
6780 /* Get out if there is nothing todo */
6782 if (net_eq(dev_net(dev), net))
6785 /* Pick the destination device name, and ensure
6786 * we can use it in the destination network namespace.
6789 if (__dev_get_by_name(net, dev->name)) {
6790 /* We get here if we can't use the current device name */
6793 if (dev_get_valid_name(net, dev, pat) < 0)
6798 * And now a mini version of register_netdevice unregister_netdevice.
6801 /* If device is running close it first. */
6804 /* And unlink it from device chain */
6806 unlist_netdevice(dev);
6810 /* Shutdown queueing discipline. */
6813 /* Notify protocols, that we are about to destroy
6814 this device. They should clean all the things.
6816 Note that dev->reg_state stays at NETREG_REGISTERED.
6817 This is wanted because this way 8021q and macvlan know
6818 the device is just moving and can keep their slaves up.
6820 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6822 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6823 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
6826 * Flush the unicast and multicast chains
6831 /* Send a netdev-removed uevent to the old namespace */
6832 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6833 netdev_adjacent_del_links(dev);
6835 /* Actually switch the network namespace */
6836 dev_net_set(dev, net);
6838 /* If there is an ifindex conflict assign a new one */
6839 if (__dev_get_by_index(net, dev->ifindex)) {
6840 int iflink = (dev->iflink == dev->ifindex);
6841 dev->ifindex = dev_new_index(net);
6843 dev->iflink = dev->ifindex;
6846 /* Send a netdev-add uevent to the new namespace */
6847 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6848 netdev_adjacent_add_links(dev);
6850 /* Fixup kobjects */
6851 err = device_rename(&dev->dev, dev->name);
6854 /* Add the device back in the hashes */
6855 list_netdevice(dev);
6857 /* Notify protocols, that a new device appeared. */
6858 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6861 * Prevent userspace races by waiting until the network
6862 * device is fully setup before sending notifications.
6864 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
6871 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6873 static int dev_cpu_callback(struct notifier_block *nfb,
6874 unsigned long action,
6877 struct sk_buff **list_skb;
6878 struct sk_buff *skb;
6879 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6880 struct softnet_data *sd, *oldsd;
6882 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
6885 local_irq_disable();
6886 cpu = smp_processor_id();
6887 sd = &per_cpu(softnet_data, cpu);
6888 oldsd = &per_cpu(softnet_data, oldcpu);
6890 /* Find end of our completion_queue. */
6891 list_skb = &sd->completion_queue;
6893 list_skb = &(*list_skb)->next;
6894 /* Append completion queue from offline CPU. */
6895 *list_skb = oldsd->completion_queue;
6896 oldsd->completion_queue = NULL;
6898 /* Append output queue from offline CPU. */
6899 if (oldsd->output_queue) {
6900 *sd->output_queue_tailp = oldsd->output_queue;
6901 sd->output_queue_tailp = oldsd->output_queue_tailp;
6902 oldsd->output_queue = NULL;
6903 oldsd->output_queue_tailp = &oldsd->output_queue;
6905 /* Append NAPI poll list from offline CPU. */
6906 if (!list_empty(&oldsd->poll_list)) {
6907 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6908 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6911 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6914 /* Process offline CPU's input_pkt_queue */
6915 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6916 netif_rx_internal(skb);
6917 input_queue_head_incr(oldsd);
6919 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6920 netif_rx_internal(skb);
6921 input_queue_head_incr(oldsd);
6929 * netdev_increment_features - increment feature set by one
6930 * @all: current feature set
6931 * @one: new feature set
6932 * @mask: mask feature set
6934 * Computes a new feature set after adding a device with feature set
6935 * @one to the master device with current feature set @all. Will not
6936 * enable anything that is off in @mask. Returns the new feature set.
6938 netdev_features_t netdev_increment_features(netdev_features_t all,
6939 netdev_features_t one, netdev_features_t mask)
6941 if (mask & NETIF_F_GEN_CSUM)
6942 mask |= NETIF_F_ALL_CSUM;
6943 mask |= NETIF_F_VLAN_CHALLENGED;
6945 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6946 all &= one | ~NETIF_F_ALL_FOR_ALL;
6948 /* If one device supports hw checksumming, set for all. */
6949 if (all & NETIF_F_GEN_CSUM)
6950 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6954 EXPORT_SYMBOL(netdev_increment_features);
6956 static struct hlist_head * __net_init netdev_create_hash(void)
6959 struct hlist_head *hash;
6961 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6963 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6964 INIT_HLIST_HEAD(&hash[i]);
6969 /* Initialize per network namespace state */
6970 static int __net_init netdev_init(struct net *net)
6972 if (net != &init_net)
6973 INIT_LIST_HEAD(&net->dev_base_head);
6975 net->dev_name_head = netdev_create_hash();
6976 if (net->dev_name_head == NULL)
6979 net->dev_index_head = netdev_create_hash();
6980 if (net->dev_index_head == NULL)
6986 kfree(net->dev_name_head);
6992 * netdev_drivername - network driver for the device
6993 * @dev: network device
6995 * Determine network driver for device.
6997 const char *netdev_drivername(const struct net_device *dev)
6999 const struct device_driver *driver;
7000 const struct device *parent;
7001 const char *empty = "";
7003 parent = dev->dev.parent;
7007 driver = parent->driver;
7008 if (driver && driver->name)
7009 return driver->name;
7013 static int __netdev_printk(const char *level, const struct net_device *dev,
7014 struct va_format *vaf)
7018 if (dev && dev->dev.parent) {
7019 r = dev_printk_emit(level[1] - '0',
7022 dev_driver_string(dev->dev.parent),
7023 dev_name(dev->dev.parent),
7024 netdev_name(dev), netdev_reg_state(dev),
7027 r = printk("%s%s%s: %pV", level, netdev_name(dev),
7028 netdev_reg_state(dev), vaf);
7030 r = printk("%s(NULL net_device): %pV", level, vaf);
7036 int netdev_printk(const char *level, const struct net_device *dev,
7037 const char *format, ...)
7039 struct va_format vaf;
7043 va_start(args, format);
7048 r = __netdev_printk(level, dev, &vaf);
7054 EXPORT_SYMBOL(netdev_printk);
7056 #define define_netdev_printk_level(func, level) \
7057 int func(const struct net_device *dev, const char *fmt, ...) \
7060 struct va_format vaf; \
7063 va_start(args, fmt); \
7068 r = __netdev_printk(level, dev, &vaf); \
7074 EXPORT_SYMBOL(func);
7076 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7077 define_netdev_printk_level(netdev_alert, KERN_ALERT);
7078 define_netdev_printk_level(netdev_crit, KERN_CRIT);
7079 define_netdev_printk_level(netdev_err, KERN_ERR);
7080 define_netdev_printk_level(netdev_warn, KERN_WARNING);
7081 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7082 define_netdev_printk_level(netdev_info, KERN_INFO);
7084 static void __net_exit netdev_exit(struct net *net)
7086 kfree(net->dev_name_head);
7087 kfree(net->dev_index_head);
7090 static struct pernet_operations __net_initdata netdev_net_ops = {
7091 .init = netdev_init,
7092 .exit = netdev_exit,
7095 static void __net_exit default_device_exit(struct net *net)
7097 struct net_device *dev, *aux;
7099 * Push all migratable network devices back to the
7100 * initial network namespace
7103 for_each_netdev_safe(net, dev, aux) {
7105 char fb_name[IFNAMSIZ];
7107 /* Ignore unmoveable devices (i.e. loopback) */
7108 if (dev->features & NETIF_F_NETNS_LOCAL)
7111 /* Leave virtual devices for the generic cleanup */
7112 if (dev->rtnl_link_ops)
7115 /* Push remaining network devices to init_net */
7116 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7117 err = dev_change_net_namespace(dev, &init_net, fb_name);
7119 pr_emerg("%s: failed to move %s to init_net: %d\n",
7120 __func__, dev->name, err);
7127 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7129 /* Return with the rtnl_lock held when there are no network
7130 * devices unregistering in any network namespace in net_list.
7137 prepare_to_wait(&netdev_unregistering_wq, &wait,
7138 TASK_UNINTERRUPTIBLE);
7139 unregistering = false;
7141 list_for_each_entry(net, net_list, exit_list) {
7142 if (net->dev_unreg_count > 0) {
7143 unregistering = true;
7152 finish_wait(&netdev_unregistering_wq, &wait);
7155 static void __net_exit default_device_exit_batch(struct list_head *net_list)
7157 /* At exit all network devices most be removed from a network
7158 * namespace. Do this in the reverse order of registration.
7159 * Do this across as many network namespaces as possible to
7160 * improve batching efficiency.
7162 struct net_device *dev;
7164 LIST_HEAD(dev_kill_list);
7166 /* To prevent network device cleanup code from dereferencing
7167 * loopback devices or network devices that have been freed
7168 * wait here for all pending unregistrations to complete,
7169 * before unregistring the loopback device and allowing the
7170 * network namespace be freed.
7172 * The netdev todo list containing all network devices
7173 * unregistrations that happen in default_device_exit_batch
7174 * will run in the rtnl_unlock() at the end of
7175 * default_device_exit_batch.
7177 rtnl_lock_unregistering(net_list);
7178 list_for_each_entry(net, net_list, exit_list) {
7179 for_each_netdev_reverse(net, dev) {
7180 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
7181 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7183 unregister_netdevice_queue(dev, &dev_kill_list);
7186 unregister_netdevice_many(&dev_kill_list);
7190 static struct pernet_operations __net_initdata default_device_ops = {
7191 .exit = default_device_exit,
7192 .exit_batch = default_device_exit_batch,
7196 * Initialize the DEV module. At boot time this walks the device list and
7197 * unhooks any devices that fail to initialise (normally hardware not
7198 * present) and leaves us with a valid list of present and active devices.
7203 * This is called single threaded during boot, so no need
7204 * to take the rtnl semaphore.
7206 static int __init net_dev_init(void)
7208 int i, rc = -ENOMEM;
7210 BUG_ON(!dev_boot_phase);
7212 if (dev_proc_init())
7215 if (netdev_kobject_init())
7218 INIT_LIST_HEAD(&ptype_all);
7219 for (i = 0; i < PTYPE_HASH_SIZE; i++)
7220 INIT_LIST_HEAD(&ptype_base[i]);
7222 INIT_LIST_HEAD(&offload_base);
7224 if (register_pernet_subsys(&netdev_net_ops))
7228 * Initialise the packet receive queues.
7231 for_each_possible_cpu(i) {
7232 struct softnet_data *sd = &per_cpu(softnet_data, i);
7234 skb_queue_head_init(&sd->input_pkt_queue);
7235 skb_queue_head_init(&sd->process_queue);
7236 INIT_LIST_HEAD(&sd->poll_list);
7237 sd->output_queue_tailp = &sd->output_queue;
7239 sd->csd.func = rps_trigger_softirq;
7244 sd->backlog.poll = process_backlog;
7245 sd->backlog.weight = weight_p;
7250 /* The loopback device is special if any other network devices
7251 * is present in a network namespace the loopback device must
7252 * be present. Since we now dynamically allocate and free the
7253 * loopback device ensure this invariant is maintained by
7254 * keeping the loopback device as the first device on the
7255 * list of network devices. Ensuring the loopback devices
7256 * is the first device that appears and the last network device
7259 if (register_pernet_device(&loopback_net_ops))
7262 if (register_pernet_device(&default_device_ops))
7265 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7266 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
7268 hotcpu_notifier(dev_cpu_callback, 0);
7275 subsys_initcall(net_dev_init);