pinctrl: at91: enhance (debugfs) at91_gpio_dbg_show
[cascardo/linux.git] / net / core / dev.c
1 /*
2  *      NET3    Protocol independent device support routines.
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  *      Derived from the non IP parts of dev.c 1.0.19
10  *              Authors:        Ross Biro
11  *                              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *                              Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *      Additional Authors:
15  *              Florian la Roche <rzsfl@rz.uni-sb.de>
16  *              Alan Cox <gw4pts@gw4pts.ampr.org>
17  *              David Hinds <dahinds@users.sourceforge.net>
18  *              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *              Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *      Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *                                      to 2 if register_netdev gets called
25  *                                      before net_dev_init & also removed a
26  *                                      few lines of code in the process.
27  *              Alan Cox        :       device private ioctl copies fields back.
28  *              Alan Cox        :       Transmit queue code does relevant
29  *                                      stunts to keep the queue safe.
30  *              Alan Cox        :       Fixed double lock.
31  *              Alan Cox        :       Fixed promisc NULL pointer trap
32  *              ????????        :       Support the full private ioctl range
33  *              Alan Cox        :       Moved ioctl permission check into
34  *                                      drivers
35  *              Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
36  *              Alan Cox        :       100 backlog just doesn't cut it when
37  *                                      you start doing multicast video 8)
38  *              Alan Cox        :       Rewrote net_bh and list manager.
39  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
40  *              Alan Cox        :       Took out transmit every packet pass
41  *                                      Saved a few bytes in the ioctl handler
42  *              Alan Cox        :       Network driver sets packet type before
43  *                                      calling netif_rx. Saves a function
44  *                                      call a packet.
45  *              Alan Cox        :       Hashed net_bh()
46  *              Richard Kooijman:       Timestamp fixes.
47  *              Alan Cox        :       Wrong field in SIOCGIFDSTADDR
48  *              Alan Cox        :       Device lock protection.
49  *              Alan Cox        :       Fixed nasty side effect of device close
50  *                                      changes.
51  *              Rudi Cilibrasi  :       Pass the right thing to
52  *                                      set_mac_address()
53  *              Dave Miller     :       32bit quantity for the device lock to
54  *                                      make it work out on a Sparc.
55  *              Bjorn Ekwall    :       Added KERNELD hack.
56  *              Alan Cox        :       Cleaned up the backlog initialise.
57  *              Craig Metz      :       SIOCGIFCONF fix if space for under
58  *                                      1 device.
59  *          Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
60  *                                      is no device open function.
61  *              Andi Kleen      :       Fix error reporting for SIOCGIFCONF
62  *          Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
63  *              Cyrus Durgin    :       Cleaned for KMOD
64  *              Adam Sulmicki   :       Bug Fix : Network Device Unload
65  *                                      A network device unload needs to purge
66  *                                      the backlog queue.
67  *      Paul Rusty Russell      :       SIOCSIFNAME
68  *              Pekka Riikonen  :       Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *                                      indefinitely on dev->refcnt
71  *              J Hadi Salim    :       - Backlog queue sampling
72  *                                      - netif_rx() feedback
73  */
74
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
86 #include <linux/mm.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
101 #include <net/dst.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
120 #include <net/ip.h>
121 #include <linux/ipv6.h>
122 #include <linux/in.h>
123 #include <linux/jhash.h>
124 #include <linux/random.h>
125 #include <trace/events/napi.h>
126 #include <trace/events/net.h>
127 #include <trace/events/skb.h>
128 #include <linux/pci.h>
129 #include <linux/inetdevice.h>
130 #include <linux/cpu_rmap.h>
131 #include <linux/static_key.h>
132 #include <linux/hashtable.h>
133 #include <linux/vmalloc.h>
134 #include <linux/if_macvlan.h>
135 #include <linux/errqueue.h>
136
137 #include "net-sysfs.h"
138
139 /* Instead of increasing this, you should create a hash table. */
140 #define MAX_GRO_SKBS 8
141
142 /* This should be increased if a protocol with a bigger head is added. */
143 #define GRO_MAX_HEAD (MAX_HEADER + 128)
144
145 static DEFINE_SPINLOCK(ptype_lock);
146 static DEFINE_SPINLOCK(offload_lock);
147 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
148 struct list_head ptype_all __read_mostly;       /* Taps */
149 static struct list_head offload_base __read_mostly;
150
151 static int netif_rx_internal(struct sk_buff *skb);
152 static int call_netdevice_notifiers_info(unsigned long val,
153                                          struct net_device *dev,
154                                          struct netdev_notifier_info *info);
155
156 /*
157  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
158  * semaphore.
159  *
160  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
161  *
162  * Writers must hold the rtnl semaphore while they loop through the
163  * dev_base_head list, and hold dev_base_lock for writing when they do the
164  * actual updates.  This allows pure readers to access the list even
165  * while a writer is preparing to update it.
166  *
167  * To put it another way, dev_base_lock is held for writing only to
168  * protect against pure readers; the rtnl semaphore provides the
169  * protection against other writers.
170  *
171  * See, for example usages, register_netdevice() and
172  * unregister_netdevice(), which must be called with the rtnl
173  * semaphore held.
174  */
175 DEFINE_RWLOCK(dev_base_lock);
176 EXPORT_SYMBOL(dev_base_lock);
177
178 /* protects napi_hash addition/deletion and napi_gen_id */
179 static DEFINE_SPINLOCK(napi_hash_lock);
180
181 static unsigned int napi_gen_id;
182 static DEFINE_HASHTABLE(napi_hash, 8);
183
184 static seqcount_t devnet_rename_seq;
185
186 static inline void dev_base_seq_inc(struct net *net)
187 {
188         while (++net->dev_base_seq == 0);
189 }
190
191 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
192 {
193         unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
194
195         return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
196 }
197
198 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
199 {
200         return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
201 }
202
203 static inline void rps_lock(struct softnet_data *sd)
204 {
205 #ifdef CONFIG_RPS
206         spin_lock(&sd->input_pkt_queue.lock);
207 #endif
208 }
209
210 static inline void rps_unlock(struct softnet_data *sd)
211 {
212 #ifdef CONFIG_RPS
213         spin_unlock(&sd->input_pkt_queue.lock);
214 #endif
215 }
216
217 /* Device list insertion */
218 static void list_netdevice(struct net_device *dev)
219 {
220         struct net *net = dev_net(dev);
221
222         ASSERT_RTNL();
223
224         write_lock_bh(&dev_base_lock);
225         list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
226         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
227         hlist_add_head_rcu(&dev->index_hlist,
228                            dev_index_hash(net, dev->ifindex));
229         write_unlock_bh(&dev_base_lock);
230
231         dev_base_seq_inc(net);
232 }
233
234 /* Device list removal
235  * caller must respect a RCU grace period before freeing/reusing dev
236  */
237 static void unlist_netdevice(struct net_device *dev)
238 {
239         ASSERT_RTNL();
240
241         /* Unlink dev from the device chain */
242         write_lock_bh(&dev_base_lock);
243         list_del_rcu(&dev->dev_list);
244         hlist_del_rcu(&dev->name_hlist);
245         hlist_del_rcu(&dev->index_hlist);
246         write_unlock_bh(&dev_base_lock);
247
248         dev_base_seq_inc(dev_net(dev));
249 }
250
251 /*
252  *      Our notifier list
253  */
254
255 static RAW_NOTIFIER_HEAD(netdev_chain);
256
257 /*
258  *      Device drivers call our routines to queue packets here. We empty the
259  *      queue in the local softnet handler.
260  */
261
262 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
263 EXPORT_PER_CPU_SYMBOL(softnet_data);
264
265 #ifdef CONFIG_LOCKDEP
266 /*
267  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
268  * according to dev->type
269  */
270 static const unsigned short netdev_lock_type[] =
271         {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
272          ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
273          ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
274          ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
275          ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
276          ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
277          ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
278          ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
279          ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
280          ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
281          ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
282          ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
283          ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
284          ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
285          ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
286
287 static const char *const netdev_lock_name[] =
288         {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
289          "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
290          "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
291          "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
292          "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
293          "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
294          "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
295          "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
296          "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
297          "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
298          "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
299          "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
300          "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
301          "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
302          "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
303
304 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
305 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
306
307 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
308 {
309         int i;
310
311         for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
312                 if (netdev_lock_type[i] == dev_type)
313                         return i;
314         /* the last key is used by default */
315         return ARRAY_SIZE(netdev_lock_type) - 1;
316 }
317
318 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
319                                                  unsigned short dev_type)
320 {
321         int i;
322
323         i = netdev_lock_pos(dev_type);
324         lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
325                                    netdev_lock_name[i]);
326 }
327
328 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
329 {
330         int i;
331
332         i = netdev_lock_pos(dev->type);
333         lockdep_set_class_and_name(&dev->addr_list_lock,
334                                    &netdev_addr_lock_key[i],
335                                    netdev_lock_name[i]);
336 }
337 #else
338 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
339                                                  unsigned short dev_type)
340 {
341 }
342 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
343 {
344 }
345 #endif
346
347 /*******************************************************************************
348
349                 Protocol management and registration routines
350
351 *******************************************************************************/
352
353 /*
354  *      Add a protocol ID to the list. Now that the input handler is
355  *      smarter we can dispense with all the messy stuff that used to be
356  *      here.
357  *
358  *      BEWARE!!! Protocol handlers, mangling input packets,
359  *      MUST BE last in hash buckets and checking protocol handlers
360  *      MUST start from promiscuous ptype_all chain in net_bh.
361  *      It is true now, do not change it.
362  *      Explanation follows: if protocol handler, mangling packet, will
363  *      be the first on list, it is not able to sense, that packet
364  *      is cloned and should be copied-on-write, so that it will
365  *      change it and subsequent readers will get broken packet.
366  *                                                      --ANK (980803)
367  */
368
369 static inline struct list_head *ptype_head(const struct packet_type *pt)
370 {
371         if (pt->type == htons(ETH_P_ALL))
372                 return &ptype_all;
373         else
374                 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
375 }
376
377 /**
378  *      dev_add_pack - add packet handler
379  *      @pt: packet type declaration
380  *
381  *      Add a protocol handler to the networking stack. The passed &packet_type
382  *      is linked into kernel lists and may not be freed until it has been
383  *      removed from the kernel lists.
384  *
385  *      This call does not sleep therefore it can not
386  *      guarantee all CPU's that are in middle of receiving packets
387  *      will see the new packet type (until the next received packet).
388  */
389
390 void dev_add_pack(struct packet_type *pt)
391 {
392         struct list_head *head = ptype_head(pt);
393
394         spin_lock(&ptype_lock);
395         list_add_rcu(&pt->list, head);
396         spin_unlock(&ptype_lock);
397 }
398 EXPORT_SYMBOL(dev_add_pack);
399
400 /**
401  *      __dev_remove_pack        - remove packet handler
402  *      @pt: packet type declaration
403  *
404  *      Remove a protocol handler that was previously added to the kernel
405  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
406  *      from the kernel lists and can be freed or reused once this function
407  *      returns.
408  *
409  *      The packet type might still be in use by receivers
410  *      and must not be freed until after all the CPU's have gone
411  *      through a quiescent state.
412  */
413 void __dev_remove_pack(struct packet_type *pt)
414 {
415         struct list_head *head = ptype_head(pt);
416         struct packet_type *pt1;
417
418         spin_lock(&ptype_lock);
419
420         list_for_each_entry(pt1, head, list) {
421                 if (pt == pt1) {
422                         list_del_rcu(&pt->list);
423                         goto out;
424                 }
425         }
426
427         pr_warn("dev_remove_pack: %p not found\n", pt);
428 out:
429         spin_unlock(&ptype_lock);
430 }
431 EXPORT_SYMBOL(__dev_remove_pack);
432
433 /**
434  *      dev_remove_pack  - remove packet handler
435  *      @pt: packet type declaration
436  *
437  *      Remove a protocol handler that was previously added to the kernel
438  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
439  *      from the kernel lists and can be freed or reused once this function
440  *      returns.
441  *
442  *      This call sleeps to guarantee that no CPU is looking at the packet
443  *      type after return.
444  */
445 void dev_remove_pack(struct packet_type *pt)
446 {
447         __dev_remove_pack(pt);
448
449         synchronize_net();
450 }
451 EXPORT_SYMBOL(dev_remove_pack);
452
453
454 /**
455  *      dev_add_offload - register offload handlers
456  *      @po: protocol offload declaration
457  *
458  *      Add protocol offload handlers to the networking stack. The passed
459  *      &proto_offload is linked into kernel lists and may not be freed until
460  *      it has been removed from the kernel lists.
461  *
462  *      This call does not sleep therefore it can not
463  *      guarantee all CPU's that are in middle of receiving packets
464  *      will see the new offload handlers (until the next received packet).
465  */
466 void dev_add_offload(struct packet_offload *po)
467 {
468         struct list_head *head = &offload_base;
469
470         spin_lock(&offload_lock);
471         list_add_rcu(&po->list, head);
472         spin_unlock(&offload_lock);
473 }
474 EXPORT_SYMBOL(dev_add_offload);
475
476 /**
477  *      __dev_remove_offload     - remove offload handler
478  *      @po: packet offload declaration
479  *
480  *      Remove a protocol offload handler that was previously added to the
481  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
482  *      is removed from the kernel lists and can be freed or reused once this
483  *      function returns.
484  *
485  *      The packet type might still be in use by receivers
486  *      and must not be freed until after all the CPU's have gone
487  *      through a quiescent state.
488  */
489 static void __dev_remove_offload(struct packet_offload *po)
490 {
491         struct list_head *head = &offload_base;
492         struct packet_offload *po1;
493
494         spin_lock(&offload_lock);
495
496         list_for_each_entry(po1, head, list) {
497                 if (po == po1) {
498                         list_del_rcu(&po->list);
499                         goto out;
500                 }
501         }
502
503         pr_warn("dev_remove_offload: %p not found\n", po);
504 out:
505         spin_unlock(&offload_lock);
506 }
507
508 /**
509  *      dev_remove_offload       - remove packet offload handler
510  *      @po: packet offload declaration
511  *
512  *      Remove a packet offload handler that was previously added to the kernel
513  *      offload handlers by dev_add_offload(). The passed &offload_type is
514  *      removed from the kernel lists and can be freed or reused once this
515  *      function returns.
516  *
517  *      This call sleeps to guarantee that no CPU is looking at the packet
518  *      type after return.
519  */
520 void dev_remove_offload(struct packet_offload *po)
521 {
522         __dev_remove_offload(po);
523
524         synchronize_net();
525 }
526 EXPORT_SYMBOL(dev_remove_offload);
527
528 /******************************************************************************
529
530                       Device Boot-time Settings Routines
531
532 *******************************************************************************/
533
534 /* Boot time configuration table */
535 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
536
537 /**
538  *      netdev_boot_setup_add   - add new setup entry
539  *      @name: name of the device
540  *      @map: configured settings for the device
541  *
542  *      Adds new setup entry to the dev_boot_setup list.  The function
543  *      returns 0 on error and 1 on success.  This is a generic routine to
544  *      all netdevices.
545  */
546 static int netdev_boot_setup_add(char *name, struct ifmap *map)
547 {
548         struct netdev_boot_setup *s;
549         int i;
550
551         s = dev_boot_setup;
552         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
553                 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
554                         memset(s[i].name, 0, sizeof(s[i].name));
555                         strlcpy(s[i].name, name, IFNAMSIZ);
556                         memcpy(&s[i].map, map, sizeof(s[i].map));
557                         break;
558                 }
559         }
560
561         return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
562 }
563
564 /**
565  *      netdev_boot_setup_check - check boot time settings
566  *      @dev: the netdevice
567  *
568  *      Check boot time settings for the device.
569  *      The found settings are set for the device to be used
570  *      later in the device probing.
571  *      Returns 0 if no settings found, 1 if they are.
572  */
573 int netdev_boot_setup_check(struct net_device *dev)
574 {
575         struct netdev_boot_setup *s = dev_boot_setup;
576         int i;
577
578         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
579                 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
580                     !strcmp(dev->name, s[i].name)) {
581                         dev->irq        = s[i].map.irq;
582                         dev->base_addr  = s[i].map.base_addr;
583                         dev->mem_start  = s[i].map.mem_start;
584                         dev->mem_end    = s[i].map.mem_end;
585                         return 1;
586                 }
587         }
588         return 0;
589 }
590 EXPORT_SYMBOL(netdev_boot_setup_check);
591
592
593 /**
594  *      netdev_boot_base        - get address from boot time settings
595  *      @prefix: prefix for network device
596  *      @unit: id for network device
597  *
598  *      Check boot time settings for the base address of device.
599  *      The found settings are set for the device to be used
600  *      later in the device probing.
601  *      Returns 0 if no settings found.
602  */
603 unsigned long netdev_boot_base(const char *prefix, int unit)
604 {
605         const struct netdev_boot_setup *s = dev_boot_setup;
606         char name[IFNAMSIZ];
607         int i;
608
609         sprintf(name, "%s%d", prefix, unit);
610
611         /*
612          * If device already registered then return base of 1
613          * to indicate not to probe for this interface
614          */
615         if (__dev_get_by_name(&init_net, name))
616                 return 1;
617
618         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
619                 if (!strcmp(name, s[i].name))
620                         return s[i].map.base_addr;
621         return 0;
622 }
623
624 /*
625  * Saves at boot time configured settings for any netdevice.
626  */
627 int __init netdev_boot_setup(char *str)
628 {
629         int ints[5];
630         struct ifmap map;
631
632         str = get_options(str, ARRAY_SIZE(ints), ints);
633         if (!str || !*str)
634                 return 0;
635
636         /* Save settings */
637         memset(&map, 0, sizeof(map));
638         if (ints[0] > 0)
639                 map.irq = ints[1];
640         if (ints[0] > 1)
641                 map.base_addr = ints[2];
642         if (ints[0] > 2)
643                 map.mem_start = ints[3];
644         if (ints[0] > 3)
645                 map.mem_end = ints[4];
646
647         /* Add new entry to the list */
648         return netdev_boot_setup_add(str, &map);
649 }
650
651 __setup("netdev=", netdev_boot_setup);
652
653 /*******************************************************************************
654
655                             Device Interface Subroutines
656
657 *******************************************************************************/
658
659 /**
660  *      __dev_get_by_name       - find a device by its name
661  *      @net: the applicable net namespace
662  *      @name: name to find
663  *
664  *      Find an interface by name. Must be called under RTNL semaphore
665  *      or @dev_base_lock. If the name is found a pointer to the device
666  *      is returned. If the name is not found then %NULL is returned. The
667  *      reference counters are not incremented so the caller must be
668  *      careful with locks.
669  */
670
671 struct net_device *__dev_get_by_name(struct net *net, const char *name)
672 {
673         struct net_device *dev;
674         struct hlist_head *head = dev_name_hash(net, name);
675
676         hlist_for_each_entry(dev, head, name_hlist)
677                 if (!strncmp(dev->name, name, IFNAMSIZ))
678                         return dev;
679
680         return NULL;
681 }
682 EXPORT_SYMBOL(__dev_get_by_name);
683
684 /**
685  *      dev_get_by_name_rcu     - find a device by its name
686  *      @net: the applicable net namespace
687  *      @name: name to find
688  *
689  *      Find an interface by name.
690  *      If the name is found a pointer to the device is returned.
691  *      If the name is not found then %NULL is returned.
692  *      The reference counters are not incremented so the caller must be
693  *      careful with locks. The caller must hold RCU lock.
694  */
695
696 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
697 {
698         struct net_device *dev;
699         struct hlist_head *head = dev_name_hash(net, name);
700
701         hlist_for_each_entry_rcu(dev, head, name_hlist)
702                 if (!strncmp(dev->name, name, IFNAMSIZ))
703                         return dev;
704
705         return NULL;
706 }
707 EXPORT_SYMBOL(dev_get_by_name_rcu);
708
709 /**
710  *      dev_get_by_name         - find a device by its name
711  *      @net: the applicable net namespace
712  *      @name: name to find
713  *
714  *      Find an interface by name. This can be called from any
715  *      context and does its own locking. The returned handle has
716  *      the usage count incremented and the caller must use dev_put() to
717  *      release it when it is no longer needed. %NULL is returned if no
718  *      matching device is found.
719  */
720
721 struct net_device *dev_get_by_name(struct net *net, const char *name)
722 {
723         struct net_device *dev;
724
725         rcu_read_lock();
726         dev = dev_get_by_name_rcu(net, name);
727         if (dev)
728                 dev_hold(dev);
729         rcu_read_unlock();
730         return dev;
731 }
732 EXPORT_SYMBOL(dev_get_by_name);
733
734 /**
735  *      __dev_get_by_index - find a device by its ifindex
736  *      @net: the applicable net namespace
737  *      @ifindex: index of device
738  *
739  *      Search for an interface by index. Returns %NULL if the device
740  *      is not found or a pointer to the device. The device has not
741  *      had its reference counter increased so the caller must be careful
742  *      about locking. The caller must hold either the RTNL semaphore
743  *      or @dev_base_lock.
744  */
745
746 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
747 {
748         struct net_device *dev;
749         struct hlist_head *head = dev_index_hash(net, ifindex);
750
751         hlist_for_each_entry(dev, head, index_hlist)
752                 if (dev->ifindex == ifindex)
753                         return dev;
754
755         return NULL;
756 }
757 EXPORT_SYMBOL(__dev_get_by_index);
758
759 /**
760  *      dev_get_by_index_rcu - find a device by its ifindex
761  *      @net: the applicable net namespace
762  *      @ifindex: index of device
763  *
764  *      Search for an interface by index. Returns %NULL if the device
765  *      is not found or a pointer to the device. The device has not
766  *      had its reference counter increased so the caller must be careful
767  *      about locking. The caller must hold RCU lock.
768  */
769
770 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
771 {
772         struct net_device *dev;
773         struct hlist_head *head = dev_index_hash(net, ifindex);
774
775         hlist_for_each_entry_rcu(dev, head, index_hlist)
776                 if (dev->ifindex == ifindex)
777                         return dev;
778
779         return NULL;
780 }
781 EXPORT_SYMBOL(dev_get_by_index_rcu);
782
783
784 /**
785  *      dev_get_by_index - find a device by its ifindex
786  *      @net: the applicable net namespace
787  *      @ifindex: index of device
788  *
789  *      Search for an interface by index. Returns NULL if the device
790  *      is not found or a pointer to the device. The device returned has
791  *      had a reference added and the pointer is safe until the user calls
792  *      dev_put to indicate they have finished with it.
793  */
794
795 struct net_device *dev_get_by_index(struct net *net, int ifindex)
796 {
797         struct net_device *dev;
798
799         rcu_read_lock();
800         dev = dev_get_by_index_rcu(net, ifindex);
801         if (dev)
802                 dev_hold(dev);
803         rcu_read_unlock();
804         return dev;
805 }
806 EXPORT_SYMBOL(dev_get_by_index);
807
808 /**
809  *      netdev_get_name - get a netdevice name, knowing its ifindex.
810  *      @net: network namespace
811  *      @name: a pointer to the buffer where the name will be stored.
812  *      @ifindex: the ifindex of the interface to get the name from.
813  *
814  *      The use of raw_seqcount_begin() and cond_resched() before
815  *      retrying is required as we want to give the writers a chance
816  *      to complete when CONFIG_PREEMPT is not set.
817  */
818 int netdev_get_name(struct net *net, char *name, int ifindex)
819 {
820         struct net_device *dev;
821         unsigned int seq;
822
823 retry:
824         seq = raw_seqcount_begin(&devnet_rename_seq);
825         rcu_read_lock();
826         dev = dev_get_by_index_rcu(net, ifindex);
827         if (!dev) {
828                 rcu_read_unlock();
829                 return -ENODEV;
830         }
831
832         strcpy(name, dev->name);
833         rcu_read_unlock();
834         if (read_seqcount_retry(&devnet_rename_seq, seq)) {
835                 cond_resched();
836                 goto retry;
837         }
838
839         return 0;
840 }
841
842 /**
843  *      dev_getbyhwaddr_rcu - find a device by its hardware address
844  *      @net: the applicable net namespace
845  *      @type: media type of device
846  *      @ha: hardware address
847  *
848  *      Search for an interface by MAC address. Returns NULL if the device
849  *      is not found or a pointer to the device.
850  *      The caller must hold RCU or RTNL.
851  *      The returned device has not had its ref count increased
852  *      and the caller must therefore be careful about locking
853  *
854  */
855
856 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
857                                        const char *ha)
858 {
859         struct net_device *dev;
860
861         for_each_netdev_rcu(net, dev)
862                 if (dev->type == type &&
863                     !memcmp(dev->dev_addr, ha, dev->addr_len))
864                         return dev;
865
866         return NULL;
867 }
868 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
869
870 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
871 {
872         struct net_device *dev;
873
874         ASSERT_RTNL();
875         for_each_netdev(net, dev)
876                 if (dev->type == type)
877                         return dev;
878
879         return NULL;
880 }
881 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
882
883 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
884 {
885         struct net_device *dev, *ret = NULL;
886
887         rcu_read_lock();
888         for_each_netdev_rcu(net, dev)
889                 if (dev->type == type) {
890                         dev_hold(dev);
891                         ret = dev;
892                         break;
893                 }
894         rcu_read_unlock();
895         return ret;
896 }
897 EXPORT_SYMBOL(dev_getfirstbyhwtype);
898
899 /**
900  *      __dev_get_by_flags - find any device with given flags
901  *      @net: the applicable net namespace
902  *      @if_flags: IFF_* values
903  *      @mask: bitmask of bits in if_flags to check
904  *
905  *      Search for any interface with the given flags. Returns NULL if a device
906  *      is not found or a pointer to the device. Must be called inside
907  *      rtnl_lock(), and result refcount is unchanged.
908  */
909
910 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
911                                       unsigned short mask)
912 {
913         struct net_device *dev, *ret;
914
915         ASSERT_RTNL();
916
917         ret = NULL;
918         for_each_netdev(net, dev) {
919                 if (((dev->flags ^ if_flags) & mask) == 0) {
920                         ret = dev;
921                         break;
922                 }
923         }
924         return ret;
925 }
926 EXPORT_SYMBOL(__dev_get_by_flags);
927
928 /**
929  *      dev_valid_name - check if name is okay for network device
930  *      @name: name string
931  *
932  *      Network device names need to be valid file names to
933  *      to allow sysfs to work.  We also disallow any kind of
934  *      whitespace.
935  */
936 bool dev_valid_name(const char *name)
937 {
938         if (*name == '\0')
939                 return false;
940         if (strlen(name) >= IFNAMSIZ)
941                 return false;
942         if (!strcmp(name, ".") || !strcmp(name, ".."))
943                 return false;
944
945         while (*name) {
946                 if (*name == '/' || isspace(*name))
947                         return false;
948                 name++;
949         }
950         return true;
951 }
952 EXPORT_SYMBOL(dev_valid_name);
953
954 /**
955  *      __dev_alloc_name - allocate a name for a device
956  *      @net: network namespace to allocate the device name in
957  *      @name: name format string
958  *      @buf:  scratch buffer and result name string
959  *
960  *      Passed a format string - eg "lt%d" it will try and find a suitable
961  *      id. It scans list of devices to build up a free map, then chooses
962  *      the first empty slot. The caller must hold the dev_base or rtnl lock
963  *      while allocating the name and adding the device in order to avoid
964  *      duplicates.
965  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
966  *      Returns the number of the unit assigned or a negative errno code.
967  */
968
969 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
970 {
971         int i = 0;
972         const char *p;
973         const int max_netdevices = 8*PAGE_SIZE;
974         unsigned long *inuse;
975         struct net_device *d;
976
977         p = strnchr(name, IFNAMSIZ-1, '%');
978         if (p) {
979                 /*
980                  * Verify the string as this thing may have come from
981                  * the user.  There must be either one "%d" and no other "%"
982                  * characters.
983                  */
984                 if (p[1] != 'd' || strchr(p + 2, '%'))
985                         return -EINVAL;
986
987                 /* Use one page as a bit array of possible slots */
988                 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
989                 if (!inuse)
990                         return -ENOMEM;
991
992                 for_each_netdev(net, d) {
993                         if (!sscanf(d->name, name, &i))
994                                 continue;
995                         if (i < 0 || i >= max_netdevices)
996                                 continue;
997
998                         /*  avoid cases where sscanf is not exact inverse of printf */
999                         snprintf(buf, IFNAMSIZ, name, i);
1000                         if (!strncmp(buf, d->name, IFNAMSIZ))
1001                                 set_bit(i, inuse);
1002                 }
1003
1004                 i = find_first_zero_bit(inuse, max_netdevices);
1005                 free_page((unsigned long) inuse);
1006         }
1007
1008         if (buf != name)
1009                 snprintf(buf, IFNAMSIZ, name, i);
1010         if (!__dev_get_by_name(net, buf))
1011                 return i;
1012
1013         /* It is possible to run out of possible slots
1014          * when the name is long and there isn't enough space left
1015          * for the digits, or if all bits are used.
1016          */
1017         return -ENFILE;
1018 }
1019
1020 /**
1021  *      dev_alloc_name - allocate a name for a device
1022  *      @dev: device
1023  *      @name: name format string
1024  *
1025  *      Passed a format string - eg "lt%d" it will try and find a suitable
1026  *      id. It scans list of devices to build up a free map, then chooses
1027  *      the first empty slot. The caller must hold the dev_base or rtnl lock
1028  *      while allocating the name and adding the device in order to avoid
1029  *      duplicates.
1030  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1031  *      Returns the number of the unit assigned or a negative errno code.
1032  */
1033
1034 int dev_alloc_name(struct net_device *dev, const char *name)
1035 {
1036         char buf[IFNAMSIZ];
1037         struct net *net;
1038         int ret;
1039
1040         BUG_ON(!dev_net(dev));
1041         net = dev_net(dev);
1042         ret = __dev_alloc_name(net, name, buf);
1043         if (ret >= 0)
1044                 strlcpy(dev->name, buf, IFNAMSIZ);
1045         return ret;
1046 }
1047 EXPORT_SYMBOL(dev_alloc_name);
1048
1049 static int dev_alloc_name_ns(struct net *net,
1050                              struct net_device *dev,
1051                              const char *name)
1052 {
1053         char buf[IFNAMSIZ];
1054         int ret;
1055
1056         ret = __dev_alloc_name(net, name, buf);
1057         if (ret >= 0)
1058                 strlcpy(dev->name, buf, IFNAMSIZ);
1059         return ret;
1060 }
1061
1062 static int dev_get_valid_name(struct net *net,
1063                               struct net_device *dev,
1064                               const char *name)
1065 {
1066         BUG_ON(!net);
1067
1068         if (!dev_valid_name(name))
1069                 return -EINVAL;
1070
1071         if (strchr(name, '%'))
1072                 return dev_alloc_name_ns(net, dev, name);
1073         else if (__dev_get_by_name(net, name))
1074                 return -EEXIST;
1075         else if (dev->name != name)
1076                 strlcpy(dev->name, name, IFNAMSIZ);
1077
1078         return 0;
1079 }
1080
1081 /**
1082  *      dev_change_name - change name of a device
1083  *      @dev: device
1084  *      @newname: name (or format string) must be at least IFNAMSIZ
1085  *
1086  *      Change name of a device, can pass format strings "eth%d".
1087  *      for wildcarding.
1088  */
1089 int dev_change_name(struct net_device *dev, const char *newname)
1090 {
1091         unsigned char old_assign_type;
1092         char oldname[IFNAMSIZ];
1093         int err = 0;
1094         int ret;
1095         struct net *net;
1096
1097         ASSERT_RTNL();
1098         BUG_ON(!dev_net(dev));
1099
1100         net = dev_net(dev);
1101         if (dev->flags & IFF_UP)
1102                 return -EBUSY;
1103
1104         write_seqcount_begin(&devnet_rename_seq);
1105
1106         if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1107                 write_seqcount_end(&devnet_rename_seq);
1108                 return 0;
1109         }
1110
1111         memcpy(oldname, dev->name, IFNAMSIZ);
1112
1113         err = dev_get_valid_name(net, dev, newname);
1114         if (err < 0) {
1115                 write_seqcount_end(&devnet_rename_seq);
1116                 return err;
1117         }
1118
1119         if (oldname[0] && !strchr(oldname, '%'))
1120                 netdev_info(dev, "renamed from %s\n", oldname);
1121
1122         old_assign_type = dev->name_assign_type;
1123         dev->name_assign_type = NET_NAME_RENAMED;
1124
1125 rollback:
1126         ret = device_rename(&dev->dev, dev->name);
1127         if (ret) {
1128                 memcpy(dev->name, oldname, IFNAMSIZ);
1129                 dev->name_assign_type = old_assign_type;
1130                 write_seqcount_end(&devnet_rename_seq);
1131                 return ret;
1132         }
1133
1134         write_seqcount_end(&devnet_rename_seq);
1135
1136         netdev_adjacent_rename_links(dev, oldname);
1137
1138         write_lock_bh(&dev_base_lock);
1139         hlist_del_rcu(&dev->name_hlist);
1140         write_unlock_bh(&dev_base_lock);
1141
1142         synchronize_rcu();
1143
1144         write_lock_bh(&dev_base_lock);
1145         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1146         write_unlock_bh(&dev_base_lock);
1147
1148         ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1149         ret = notifier_to_errno(ret);
1150
1151         if (ret) {
1152                 /* err >= 0 after dev_alloc_name() or stores the first errno */
1153                 if (err >= 0) {
1154                         err = ret;
1155                         write_seqcount_begin(&devnet_rename_seq);
1156                         memcpy(dev->name, oldname, IFNAMSIZ);
1157                         memcpy(oldname, newname, IFNAMSIZ);
1158                         dev->name_assign_type = old_assign_type;
1159                         old_assign_type = NET_NAME_RENAMED;
1160                         goto rollback;
1161                 } else {
1162                         pr_err("%s: name change rollback failed: %d\n",
1163                                dev->name, ret);
1164                 }
1165         }
1166
1167         return err;
1168 }
1169
1170 /**
1171  *      dev_set_alias - change ifalias of a device
1172  *      @dev: device
1173  *      @alias: name up to IFALIASZ
1174  *      @len: limit of bytes to copy from info
1175  *
1176  *      Set ifalias for a device,
1177  */
1178 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1179 {
1180         char *new_ifalias;
1181
1182         ASSERT_RTNL();
1183
1184         if (len >= IFALIASZ)
1185                 return -EINVAL;
1186
1187         if (!len) {
1188                 kfree(dev->ifalias);
1189                 dev->ifalias = NULL;
1190                 return 0;
1191         }
1192
1193         new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1194         if (!new_ifalias)
1195                 return -ENOMEM;
1196         dev->ifalias = new_ifalias;
1197
1198         strlcpy(dev->ifalias, alias, len+1);
1199         return len;
1200 }
1201
1202
1203 /**
1204  *      netdev_features_change - device changes features
1205  *      @dev: device to cause notification
1206  *
1207  *      Called to indicate a device has changed features.
1208  */
1209 void netdev_features_change(struct net_device *dev)
1210 {
1211         call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1212 }
1213 EXPORT_SYMBOL(netdev_features_change);
1214
1215 /**
1216  *      netdev_state_change - device changes state
1217  *      @dev: device to cause notification
1218  *
1219  *      Called to indicate a device has changed state. This function calls
1220  *      the notifier chains for netdev_chain and sends a NEWLINK message
1221  *      to the routing socket.
1222  */
1223 void netdev_state_change(struct net_device *dev)
1224 {
1225         if (dev->flags & IFF_UP) {
1226                 struct netdev_notifier_change_info change_info;
1227
1228                 change_info.flags_changed = 0;
1229                 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1230                                               &change_info.info);
1231                 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1232         }
1233 }
1234 EXPORT_SYMBOL(netdev_state_change);
1235
1236 /**
1237  *      netdev_notify_peers - notify network peers about existence of @dev
1238  *      @dev: network device
1239  *
1240  * Generate traffic such that interested network peers are aware of
1241  * @dev, such as by generating a gratuitous ARP. This may be used when
1242  * a device wants to inform the rest of the network about some sort of
1243  * reconfiguration such as a failover event or virtual machine
1244  * migration.
1245  */
1246 void netdev_notify_peers(struct net_device *dev)
1247 {
1248         rtnl_lock();
1249         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1250         rtnl_unlock();
1251 }
1252 EXPORT_SYMBOL(netdev_notify_peers);
1253
1254 static int __dev_open(struct net_device *dev)
1255 {
1256         const struct net_device_ops *ops = dev->netdev_ops;
1257         int ret;
1258
1259         ASSERT_RTNL();
1260
1261         if (!netif_device_present(dev))
1262                 return -ENODEV;
1263
1264         /* Block netpoll from trying to do any rx path servicing.
1265          * If we don't do this there is a chance ndo_poll_controller
1266          * or ndo_poll may be running while we open the device
1267          */
1268         netpoll_poll_disable(dev);
1269
1270         ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1271         ret = notifier_to_errno(ret);
1272         if (ret)
1273                 return ret;
1274
1275         set_bit(__LINK_STATE_START, &dev->state);
1276
1277         if (ops->ndo_validate_addr)
1278                 ret = ops->ndo_validate_addr(dev);
1279
1280         if (!ret && ops->ndo_open)
1281                 ret = ops->ndo_open(dev);
1282
1283         netpoll_poll_enable(dev);
1284
1285         if (ret)
1286                 clear_bit(__LINK_STATE_START, &dev->state);
1287         else {
1288                 dev->flags |= IFF_UP;
1289                 dev_set_rx_mode(dev);
1290                 dev_activate(dev);
1291                 add_device_randomness(dev->dev_addr, dev->addr_len);
1292         }
1293
1294         return ret;
1295 }
1296
1297 /**
1298  *      dev_open        - prepare an interface for use.
1299  *      @dev:   device to open
1300  *
1301  *      Takes a device from down to up state. The device's private open
1302  *      function is invoked and then the multicast lists are loaded. Finally
1303  *      the device is moved into the up state and a %NETDEV_UP message is
1304  *      sent to the netdev notifier chain.
1305  *
1306  *      Calling this function on an active interface is a nop. On a failure
1307  *      a negative errno code is returned.
1308  */
1309 int dev_open(struct net_device *dev)
1310 {
1311         int ret;
1312
1313         if (dev->flags & IFF_UP)
1314                 return 0;
1315
1316         ret = __dev_open(dev);
1317         if (ret < 0)
1318                 return ret;
1319
1320         rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1321         call_netdevice_notifiers(NETDEV_UP, dev);
1322
1323         return ret;
1324 }
1325 EXPORT_SYMBOL(dev_open);
1326
1327 static int __dev_close_many(struct list_head *head)
1328 {
1329         struct net_device *dev;
1330
1331         ASSERT_RTNL();
1332         might_sleep();
1333
1334         list_for_each_entry(dev, head, close_list) {
1335                 /* Temporarily disable netpoll until the interface is down */
1336                 netpoll_poll_disable(dev);
1337
1338                 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1339
1340                 clear_bit(__LINK_STATE_START, &dev->state);
1341
1342                 /* Synchronize to scheduled poll. We cannot touch poll list, it
1343                  * can be even on different cpu. So just clear netif_running().
1344                  *
1345                  * dev->stop() will invoke napi_disable() on all of it's
1346                  * napi_struct instances on this device.
1347                  */
1348                 smp_mb__after_atomic(); /* Commit netif_running(). */
1349         }
1350
1351         dev_deactivate_many(head);
1352
1353         list_for_each_entry(dev, head, close_list) {
1354                 const struct net_device_ops *ops = dev->netdev_ops;
1355
1356                 /*
1357                  *      Call the device specific close. This cannot fail.
1358                  *      Only if device is UP
1359                  *
1360                  *      We allow it to be called even after a DETACH hot-plug
1361                  *      event.
1362                  */
1363                 if (ops->ndo_stop)
1364                         ops->ndo_stop(dev);
1365
1366                 dev->flags &= ~IFF_UP;
1367                 netpoll_poll_enable(dev);
1368         }
1369
1370         return 0;
1371 }
1372
1373 static int __dev_close(struct net_device *dev)
1374 {
1375         int retval;
1376         LIST_HEAD(single);
1377
1378         list_add(&dev->close_list, &single);
1379         retval = __dev_close_many(&single);
1380         list_del(&single);
1381
1382         return retval;
1383 }
1384
1385 static int dev_close_many(struct list_head *head)
1386 {
1387         struct net_device *dev, *tmp;
1388
1389         /* Remove the devices that don't need to be closed */
1390         list_for_each_entry_safe(dev, tmp, head, close_list)
1391                 if (!(dev->flags & IFF_UP))
1392                         list_del_init(&dev->close_list);
1393
1394         __dev_close_many(head);
1395
1396         list_for_each_entry_safe(dev, tmp, head, close_list) {
1397                 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1398                 call_netdevice_notifiers(NETDEV_DOWN, dev);
1399                 list_del_init(&dev->close_list);
1400         }
1401
1402         return 0;
1403 }
1404
1405 /**
1406  *      dev_close - shutdown an interface.
1407  *      @dev: device to shutdown
1408  *
1409  *      This function moves an active device into down state. A
1410  *      %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1411  *      is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1412  *      chain.
1413  */
1414 int dev_close(struct net_device *dev)
1415 {
1416         if (dev->flags & IFF_UP) {
1417                 LIST_HEAD(single);
1418
1419                 list_add(&dev->close_list, &single);
1420                 dev_close_many(&single);
1421                 list_del(&single);
1422         }
1423         return 0;
1424 }
1425 EXPORT_SYMBOL(dev_close);
1426
1427
1428 /**
1429  *      dev_disable_lro - disable Large Receive Offload on a device
1430  *      @dev: device
1431  *
1432  *      Disable Large Receive Offload (LRO) on a net device.  Must be
1433  *      called under RTNL.  This is needed if received packets may be
1434  *      forwarded to another interface.
1435  */
1436 void dev_disable_lro(struct net_device *dev)
1437 {
1438         /*
1439          * If we're trying to disable lro on a vlan device
1440          * use the underlying physical device instead
1441          */
1442         if (is_vlan_dev(dev))
1443                 dev = vlan_dev_real_dev(dev);
1444
1445         /* the same for macvlan devices */
1446         if (netif_is_macvlan(dev))
1447                 dev = macvlan_dev_real_dev(dev);
1448
1449         dev->wanted_features &= ~NETIF_F_LRO;
1450         netdev_update_features(dev);
1451
1452         if (unlikely(dev->features & NETIF_F_LRO))
1453                 netdev_WARN(dev, "failed to disable LRO!\n");
1454 }
1455 EXPORT_SYMBOL(dev_disable_lro);
1456
1457 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1458                                    struct net_device *dev)
1459 {
1460         struct netdev_notifier_info info;
1461
1462         netdev_notifier_info_init(&info, dev);
1463         return nb->notifier_call(nb, val, &info);
1464 }
1465
1466 static int dev_boot_phase = 1;
1467
1468 /**
1469  *      register_netdevice_notifier - register a network notifier block
1470  *      @nb: notifier
1471  *
1472  *      Register a notifier to be called when network device events occur.
1473  *      The notifier passed is linked into the kernel structures and must
1474  *      not be reused until it has been unregistered. A negative errno code
1475  *      is returned on a failure.
1476  *
1477  *      When registered all registration and up events are replayed
1478  *      to the new notifier to allow device to have a race free
1479  *      view of the network device list.
1480  */
1481
1482 int register_netdevice_notifier(struct notifier_block *nb)
1483 {
1484         struct net_device *dev;
1485         struct net_device *last;
1486         struct net *net;
1487         int err;
1488
1489         rtnl_lock();
1490         err = raw_notifier_chain_register(&netdev_chain, nb);
1491         if (err)
1492                 goto unlock;
1493         if (dev_boot_phase)
1494                 goto unlock;
1495         for_each_net(net) {
1496                 for_each_netdev(net, dev) {
1497                         err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1498                         err = notifier_to_errno(err);
1499                         if (err)
1500                                 goto rollback;
1501
1502                         if (!(dev->flags & IFF_UP))
1503                                 continue;
1504
1505                         call_netdevice_notifier(nb, NETDEV_UP, dev);
1506                 }
1507         }
1508
1509 unlock:
1510         rtnl_unlock();
1511         return err;
1512
1513 rollback:
1514         last = dev;
1515         for_each_net(net) {
1516                 for_each_netdev(net, dev) {
1517                         if (dev == last)
1518                                 goto outroll;
1519
1520                         if (dev->flags & IFF_UP) {
1521                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1522                                                         dev);
1523                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1524                         }
1525                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1526                 }
1527         }
1528
1529 outroll:
1530         raw_notifier_chain_unregister(&netdev_chain, nb);
1531         goto unlock;
1532 }
1533 EXPORT_SYMBOL(register_netdevice_notifier);
1534
1535 /**
1536  *      unregister_netdevice_notifier - unregister a network notifier block
1537  *      @nb: notifier
1538  *
1539  *      Unregister a notifier previously registered by
1540  *      register_netdevice_notifier(). The notifier is unlinked into the
1541  *      kernel structures and may then be reused. A negative errno code
1542  *      is returned on a failure.
1543  *
1544  *      After unregistering unregister and down device events are synthesized
1545  *      for all devices on the device list to the removed notifier to remove
1546  *      the need for special case cleanup code.
1547  */
1548
1549 int unregister_netdevice_notifier(struct notifier_block *nb)
1550 {
1551         struct net_device *dev;
1552         struct net *net;
1553         int err;
1554
1555         rtnl_lock();
1556         err = raw_notifier_chain_unregister(&netdev_chain, nb);
1557         if (err)
1558                 goto unlock;
1559
1560         for_each_net(net) {
1561                 for_each_netdev(net, dev) {
1562                         if (dev->flags & IFF_UP) {
1563                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1564                                                         dev);
1565                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1566                         }
1567                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1568                 }
1569         }
1570 unlock:
1571         rtnl_unlock();
1572         return err;
1573 }
1574 EXPORT_SYMBOL(unregister_netdevice_notifier);
1575
1576 /**
1577  *      call_netdevice_notifiers_info - call all network notifier blocks
1578  *      @val: value passed unmodified to notifier function
1579  *      @dev: net_device pointer passed unmodified to notifier function
1580  *      @info: notifier information data
1581  *
1582  *      Call all network notifier blocks.  Parameters and return value
1583  *      are as for raw_notifier_call_chain().
1584  */
1585
1586 static int call_netdevice_notifiers_info(unsigned long val,
1587                                          struct net_device *dev,
1588                                          struct netdev_notifier_info *info)
1589 {
1590         ASSERT_RTNL();
1591         netdev_notifier_info_init(info, dev);
1592         return raw_notifier_call_chain(&netdev_chain, val, info);
1593 }
1594
1595 /**
1596  *      call_netdevice_notifiers - call all network notifier blocks
1597  *      @val: value passed unmodified to notifier function
1598  *      @dev: net_device pointer passed unmodified to notifier function
1599  *
1600  *      Call all network notifier blocks.  Parameters and return value
1601  *      are as for raw_notifier_call_chain().
1602  */
1603
1604 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1605 {
1606         struct netdev_notifier_info info;
1607
1608         return call_netdevice_notifiers_info(val, dev, &info);
1609 }
1610 EXPORT_SYMBOL(call_netdevice_notifiers);
1611
1612 static struct static_key netstamp_needed __read_mostly;
1613 #ifdef HAVE_JUMP_LABEL
1614 /* We are not allowed to call static_key_slow_dec() from irq context
1615  * If net_disable_timestamp() is called from irq context, defer the
1616  * static_key_slow_dec() calls.
1617  */
1618 static atomic_t netstamp_needed_deferred;
1619 #endif
1620
1621 void net_enable_timestamp(void)
1622 {
1623 #ifdef HAVE_JUMP_LABEL
1624         int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1625
1626         if (deferred) {
1627                 while (--deferred)
1628                         static_key_slow_dec(&netstamp_needed);
1629                 return;
1630         }
1631 #endif
1632         static_key_slow_inc(&netstamp_needed);
1633 }
1634 EXPORT_SYMBOL(net_enable_timestamp);
1635
1636 void net_disable_timestamp(void)
1637 {
1638 #ifdef HAVE_JUMP_LABEL
1639         if (in_interrupt()) {
1640                 atomic_inc(&netstamp_needed_deferred);
1641                 return;
1642         }
1643 #endif
1644         static_key_slow_dec(&netstamp_needed);
1645 }
1646 EXPORT_SYMBOL(net_disable_timestamp);
1647
1648 static inline void net_timestamp_set(struct sk_buff *skb)
1649 {
1650         skb->tstamp.tv64 = 0;
1651         if (static_key_false(&netstamp_needed))
1652                 __net_timestamp(skb);
1653 }
1654
1655 #define net_timestamp_check(COND, SKB)                  \
1656         if (static_key_false(&netstamp_needed)) {               \
1657                 if ((COND) && !(SKB)->tstamp.tv64)      \
1658                         __net_timestamp(SKB);           \
1659         }                                               \
1660
1661 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1662 {
1663         unsigned int len;
1664
1665         if (!(dev->flags & IFF_UP))
1666                 return false;
1667
1668         len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1669         if (skb->len <= len)
1670                 return true;
1671
1672         /* if TSO is enabled, we don't care about the length as the packet
1673          * could be forwarded without being segmented before
1674          */
1675         if (skb_is_gso(skb))
1676                 return true;
1677
1678         return false;
1679 }
1680 EXPORT_SYMBOL_GPL(is_skb_forwardable);
1681
1682 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1683 {
1684         if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1685                 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1686                         atomic_long_inc(&dev->rx_dropped);
1687                         kfree_skb(skb);
1688                         return NET_RX_DROP;
1689                 }
1690         }
1691
1692         if (unlikely(!is_skb_forwardable(dev, skb))) {
1693                 atomic_long_inc(&dev->rx_dropped);
1694                 kfree_skb(skb);
1695                 return NET_RX_DROP;
1696         }
1697
1698         skb_scrub_packet(skb, true);
1699         skb->protocol = eth_type_trans(skb, dev);
1700
1701         return 0;
1702 }
1703 EXPORT_SYMBOL_GPL(__dev_forward_skb);
1704
1705 /**
1706  * dev_forward_skb - loopback an skb to another netif
1707  *
1708  * @dev: destination network device
1709  * @skb: buffer to forward
1710  *
1711  * return values:
1712  *      NET_RX_SUCCESS  (no congestion)
1713  *      NET_RX_DROP     (packet was dropped, but freed)
1714  *
1715  * dev_forward_skb can be used for injecting an skb from the
1716  * start_xmit function of one device into the receive queue
1717  * of another device.
1718  *
1719  * The receiving device may be in another namespace, so
1720  * we have to clear all information in the skb that could
1721  * impact namespace isolation.
1722  */
1723 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1724 {
1725         return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1726 }
1727 EXPORT_SYMBOL_GPL(dev_forward_skb);
1728
1729 static inline int deliver_skb(struct sk_buff *skb,
1730                               struct packet_type *pt_prev,
1731                               struct net_device *orig_dev)
1732 {
1733         if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1734                 return -ENOMEM;
1735         atomic_inc(&skb->users);
1736         return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1737 }
1738
1739 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1740 {
1741         if (!ptype->af_packet_priv || !skb->sk)
1742                 return false;
1743
1744         if (ptype->id_match)
1745                 return ptype->id_match(ptype, skb->sk);
1746         else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1747                 return true;
1748
1749         return false;
1750 }
1751
1752 /*
1753  *      Support routine. Sends outgoing frames to any network
1754  *      taps currently in use.
1755  */
1756
1757 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1758 {
1759         struct packet_type *ptype;
1760         struct sk_buff *skb2 = NULL;
1761         struct packet_type *pt_prev = NULL;
1762
1763         rcu_read_lock();
1764         list_for_each_entry_rcu(ptype, &ptype_all, list) {
1765                 /* Never send packets back to the socket
1766                  * they originated from - MvS (miquels@drinkel.ow.org)
1767                  */
1768                 if ((ptype->dev == dev || !ptype->dev) &&
1769                     (!skb_loop_sk(ptype, skb))) {
1770                         if (pt_prev) {
1771                                 deliver_skb(skb2, pt_prev, skb->dev);
1772                                 pt_prev = ptype;
1773                                 continue;
1774                         }
1775
1776                         skb2 = skb_clone(skb, GFP_ATOMIC);
1777                         if (!skb2)
1778                                 break;
1779
1780                         net_timestamp_set(skb2);
1781
1782                         /* skb->nh should be correctly
1783                            set by sender, so that the second statement is
1784                            just protection against buggy protocols.
1785                          */
1786                         skb_reset_mac_header(skb2);
1787
1788                         if (skb_network_header(skb2) < skb2->data ||
1789                             skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1790                                 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1791                                                      ntohs(skb2->protocol),
1792                                                      dev->name);
1793                                 skb_reset_network_header(skb2);
1794                         }
1795
1796                         skb2->transport_header = skb2->network_header;
1797                         skb2->pkt_type = PACKET_OUTGOING;
1798                         pt_prev = ptype;
1799                 }
1800         }
1801         if (pt_prev)
1802                 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1803         rcu_read_unlock();
1804 }
1805
1806 /**
1807  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1808  * @dev: Network device
1809  * @txq: number of queues available
1810  *
1811  * If real_num_tx_queues is changed the tc mappings may no longer be
1812  * valid. To resolve this verify the tc mapping remains valid and if
1813  * not NULL the mapping. With no priorities mapping to this
1814  * offset/count pair it will no longer be used. In the worst case TC0
1815  * is invalid nothing can be done so disable priority mappings. If is
1816  * expected that drivers will fix this mapping if they can before
1817  * calling netif_set_real_num_tx_queues.
1818  */
1819 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1820 {
1821         int i;
1822         struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1823
1824         /* If TC0 is invalidated disable TC mapping */
1825         if (tc->offset + tc->count > txq) {
1826                 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1827                 dev->num_tc = 0;
1828                 return;
1829         }
1830
1831         /* Invalidated prio to tc mappings set to TC0 */
1832         for (i = 1; i < TC_BITMASK + 1; i++) {
1833                 int q = netdev_get_prio_tc_map(dev, i);
1834
1835                 tc = &dev->tc_to_txq[q];
1836                 if (tc->offset + tc->count > txq) {
1837                         pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1838                                 i, q);
1839                         netdev_set_prio_tc_map(dev, i, 0);
1840                 }
1841         }
1842 }
1843
1844 #ifdef CONFIG_XPS
1845 static DEFINE_MUTEX(xps_map_mutex);
1846 #define xmap_dereference(P)             \
1847         rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1848
1849 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1850                                         int cpu, u16 index)
1851 {
1852         struct xps_map *map = NULL;
1853         int pos;
1854
1855         if (dev_maps)
1856                 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1857
1858         for (pos = 0; map && pos < map->len; pos++) {
1859                 if (map->queues[pos] == index) {
1860                         if (map->len > 1) {
1861                                 map->queues[pos] = map->queues[--map->len];
1862                         } else {
1863                                 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1864                                 kfree_rcu(map, rcu);
1865                                 map = NULL;
1866                         }
1867                         break;
1868                 }
1869         }
1870
1871         return map;
1872 }
1873
1874 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1875 {
1876         struct xps_dev_maps *dev_maps;
1877         int cpu, i;
1878         bool active = false;
1879
1880         mutex_lock(&xps_map_mutex);
1881         dev_maps = xmap_dereference(dev->xps_maps);
1882
1883         if (!dev_maps)
1884                 goto out_no_maps;
1885
1886         for_each_possible_cpu(cpu) {
1887                 for (i = index; i < dev->num_tx_queues; i++) {
1888                         if (!remove_xps_queue(dev_maps, cpu, i))
1889                                 break;
1890                 }
1891                 if (i == dev->num_tx_queues)
1892                         active = true;
1893         }
1894
1895         if (!active) {
1896                 RCU_INIT_POINTER(dev->xps_maps, NULL);
1897                 kfree_rcu(dev_maps, rcu);
1898         }
1899
1900         for (i = index; i < dev->num_tx_queues; i++)
1901                 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1902                                              NUMA_NO_NODE);
1903
1904 out_no_maps:
1905         mutex_unlock(&xps_map_mutex);
1906 }
1907
1908 static struct xps_map *expand_xps_map(struct xps_map *map,
1909                                       int cpu, u16 index)
1910 {
1911         struct xps_map *new_map;
1912         int alloc_len = XPS_MIN_MAP_ALLOC;
1913         int i, pos;
1914
1915         for (pos = 0; map && pos < map->len; pos++) {
1916                 if (map->queues[pos] != index)
1917                         continue;
1918                 return map;
1919         }
1920
1921         /* Need to add queue to this CPU's existing map */
1922         if (map) {
1923                 if (pos < map->alloc_len)
1924                         return map;
1925
1926                 alloc_len = map->alloc_len * 2;
1927         }
1928
1929         /* Need to allocate new map to store queue on this CPU's map */
1930         new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1931                                cpu_to_node(cpu));
1932         if (!new_map)
1933                 return NULL;
1934
1935         for (i = 0; i < pos; i++)
1936                 new_map->queues[i] = map->queues[i];
1937         new_map->alloc_len = alloc_len;
1938         new_map->len = pos;
1939
1940         return new_map;
1941 }
1942
1943 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1944                         u16 index)
1945 {
1946         struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
1947         struct xps_map *map, *new_map;
1948         int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
1949         int cpu, numa_node_id = -2;
1950         bool active = false;
1951
1952         mutex_lock(&xps_map_mutex);
1953
1954         dev_maps = xmap_dereference(dev->xps_maps);
1955
1956         /* allocate memory for queue storage */
1957         for_each_online_cpu(cpu) {
1958                 if (!cpumask_test_cpu(cpu, mask))
1959                         continue;
1960
1961                 if (!new_dev_maps)
1962                         new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1963                 if (!new_dev_maps) {
1964                         mutex_unlock(&xps_map_mutex);
1965                         return -ENOMEM;
1966                 }
1967
1968                 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1969                                  NULL;
1970
1971                 map = expand_xps_map(map, cpu, index);
1972                 if (!map)
1973                         goto error;
1974
1975                 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1976         }
1977
1978         if (!new_dev_maps)
1979                 goto out_no_new_maps;
1980
1981         for_each_possible_cpu(cpu) {
1982                 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1983                         /* add queue to CPU maps */
1984                         int pos = 0;
1985
1986                         map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1987                         while ((pos < map->len) && (map->queues[pos] != index))
1988                                 pos++;
1989
1990                         if (pos == map->len)
1991                                 map->queues[map->len++] = index;
1992 #ifdef CONFIG_NUMA
1993                         if (numa_node_id == -2)
1994                                 numa_node_id = cpu_to_node(cpu);
1995                         else if (numa_node_id != cpu_to_node(cpu))
1996                                 numa_node_id = -1;
1997 #endif
1998                 } else if (dev_maps) {
1999                         /* fill in the new device map from the old device map */
2000                         map = xmap_dereference(dev_maps->cpu_map[cpu]);
2001                         RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2002                 }
2003
2004         }
2005
2006         rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2007
2008         /* Cleanup old maps */
2009         if (dev_maps) {
2010                 for_each_possible_cpu(cpu) {
2011                         new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2012                         map = xmap_dereference(dev_maps->cpu_map[cpu]);
2013                         if (map && map != new_map)
2014                                 kfree_rcu(map, rcu);
2015                 }
2016
2017                 kfree_rcu(dev_maps, rcu);
2018         }
2019
2020         dev_maps = new_dev_maps;
2021         active = true;
2022
2023 out_no_new_maps:
2024         /* update Tx queue numa node */
2025         netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2026                                      (numa_node_id >= 0) ? numa_node_id :
2027                                      NUMA_NO_NODE);
2028
2029         if (!dev_maps)
2030                 goto out_no_maps;
2031
2032         /* removes queue from unused CPUs */
2033         for_each_possible_cpu(cpu) {
2034                 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2035                         continue;
2036
2037                 if (remove_xps_queue(dev_maps, cpu, index))
2038                         active = true;
2039         }
2040
2041         /* free map if not active */
2042         if (!active) {
2043                 RCU_INIT_POINTER(dev->xps_maps, NULL);
2044                 kfree_rcu(dev_maps, rcu);
2045         }
2046
2047 out_no_maps:
2048         mutex_unlock(&xps_map_mutex);
2049
2050         return 0;
2051 error:
2052         /* remove any maps that we added */
2053         for_each_possible_cpu(cpu) {
2054                 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2055                 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2056                                  NULL;
2057                 if (new_map && new_map != map)
2058                         kfree(new_map);
2059         }
2060
2061         mutex_unlock(&xps_map_mutex);
2062
2063         kfree(new_dev_maps);
2064         return -ENOMEM;
2065 }
2066 EXPORT_SYMBOL(netif_set_xps_queue);
2067
2068 #endif
2069 /*
2070  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2071  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2072  */
2073 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2074 {
2075         int rc;
2076
2077         if (txq < 1 || txq > dev->num_tx_queues)
2078                 return -EINVAL;
2079
2080         if (dev->reg_state == NETREG_REGISTERED ||
2081             dev->reg_state == NETREG_UNREGISTERING) {
2082                 ASSERT_RTNL();
2083
2084                 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2085                                                   txq);
2086                 if (rc)
2087                         return rc;
2088
2089                 if (dev->num_tc)
2090                         netif_setup_tc(dev, txq);
2091
2092                 if (txq < dev->real_num_tx_queues) {
2093                         qdisc_reset_all_tx_gt(dev, txq);
2094 #ifdef CONFIG_XPS
2095                         netif_reset_xps_queues_gt(dev, txq);
2096 #endif
2097                 }
2098         }
2099
2100         dev->real_num_tx_queues = txq;
2101         return 0;
2102 }
2103 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2104
2105 #ifdef CONFIG_SYSFS
2106 /**
2107  *      netif_set_real_num_rx_queues - set actual number of RX queues used
2108  *      @dev: Network device
2109  *      @rxq: Actual number of RX queues
2110  *
2111  *      This must be called either with the rtnl_lock held or before
2112  *      registration of the net device.  Returns 0 on success, or a
2113  *      negative error code.  If called before registration, it always
2114  *      succeeds.
2115  */
2116 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2117 {
2118         int rc;
2119
2120         if (rxq < 1 || rxq > dev->num_rx_queues)
2121                 return -EINVAL;
2122
2123         if (dev->reg_state == NETREG_REGISTERED) {
2124                 ASSERT_RTNL();
2125
2126                 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2127                                                   rxq);
2128                 if (rc)
2129                         return rc;
2130         }
2131
2132         dev->real_num_rx_queues = rxq;
2133         return 0;
2134 }
2135 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2136 #endif
2137
2138 /**
2139  * netif_get_num_default_rss_queues - default number of RSS queues
2140  *
2141  * This routine should set an upper limit on the number of RSS queues
2142  * used by default by multiqueue devices.
2143  */
2144 int netif_get_num_default_rss_queues(void)
2145 {
2146         return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2147 }
2148 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2149
2150 static inline void __netif_reschedule(struct Qdisc *q)
2151 {
2152         struct softnet_data *sd;
2153         unsigned long flags;
2154
2155         local_irq_save(flags);
2156         sd = this_cpu_ptr(&softnet_data);
2157         q->next_sched = NULL;
2158         *sd->output_queue_tailp = q;
2159         sd->output_queue_tailp = &q->next_sched;
2160         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2161         local_irq_restore(flags);
2162 }
2163
2164 void __netif_schedule(struct Qdisc *q)
2165 {
2166         if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2167                 __netif_reschedule(q);
2168 }
2169 EXPORT_SYMBOL(__netif_schedule);
2170
2171 struct dev_kfree_skb_cb {
2172         enum skb_free_reason reason;
2173 };
2174
2175 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2176 {
2177         return (struct dev_kfree_skb_cb *)skb->cb;
2178 }
2179
2180 void netif_schedule_queue(struct netdev_queue *txq)
2181 {
2182         rcu_read_lock();
2183         if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2184                 struct Qdisc *q = rcu_dereference(txq->qdisc);
2185
2186                 __netif_schedule(q);
2187         }
2188         rcu_read_unlock();
2189 }
2190 EXPORT_SYMBOL(netif_schedule_queue);
2191
2192 /**
2193  *      netif_wake_subqueue - allow sending packets on subqueue
2194  *      @dev: network device
2195  *      @queue_index: sub queue index
2196  *
2197  * Resume individual transmit queue of a device with multiple transmit queues.
2198  */
2199 void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2200 {
2201         struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2202
2203         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2204                 struct Qdisc *q;
2205
2206                 rcu_read_lock();
2207                 q = rcu_dereference(txq->qdisc);
2208                 __netif_schedule(q);
2209                 rcu_read_unlock();
2210         }
2211 }
2212 EXPORT_SYMBOL(netif_wake_subqueue);
2213
2214 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2215 {
2216         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2217                 struct Qdisc *q;
2218
2219                 rcu_read_lock();
2220                 q = rcu_dereference(dev_queue->qdisc);
2221                 __netif_schedule(q);
2222                 rcu_read_unlock();
2223         }
2224 }
2225 EXPORT_SYMBOL(netif_tx_wake_queue);
2226
2227 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2228 {
2229         unsigned long flags;
2230
2231         if (likely(atomic_read(&skb->users) == 1)) {
2232                 smp_rmb();
2233                 atomic_set(&skb->users, 0);
2234         } else if (likely(!atomic_dec_and_test(&skb->users))) {
2235                 return;
2236         }
2237         get_kfree_skb_cb(skb)->reason = reason;
2238         local_irq_save(flags);
2239         skb->next = __this_cpu_read(softnet_data.completion_queue);
2240         __this_cpu_write(softnet_data.completion_queue, skb);
2241         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2242         local_irq_restore(flags);
2243 }
2244 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2245
2246 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2247 {
2248         if (in_irq() || irqs_disabled())
2249                 __dev_kfree_skb_irq(skb, reason);
2250         else
2251                 dev_kfree_skb(skb);
2252 }
2253 EXPORT_SYMBOL(__dev_kfree_skb_any);
2254
2255
2256 /**
2257  * netif_device_detach - mark device as removed
2258  * @dev: network device
2259  *
2260  * Mark device as removed from system and therefore no longer available.
2261  */
2262 void netif_device_detach(struct net_device *dev)
2263 {
2264         if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2265             netif_running(dev)) {
2266                 netif_tx_stop_all_queues(dev);
2267         }
2268 }
2269 EXPORT_SYMBOL(netif_device_detach);
2270
2271 /**
2272  * netif_device_attach - mark device as attached
2273  * @dev: network device
2274  *
2275  * Mark device as attached from system and restart if needed.
2276  */
2277 void netif_device_attach(struct net_device *dev)
2278 {
2279         if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2280             netif_running(dev)) {
2281                 netif_tx_wake_all_queues(dev);
2282                 __netdev_watchdog_up(dev);
2283         }
2284 }
2285 EXPORT_SYMBOL(netif_device_attach);
2286
2287 static void skb_warn_bad_offload(const struct sk_buff *skb)
2288 {
2289         static const netdev_features_t null_features = 0;
2290         struct net_device *dev = skb->dev;
2291         const char *driver = "";
2292
2293         if (!net_ratelimit())
2294                 return;
2295
2296         if (dev && dev->dev.parent)
2297                 driver = dev_driver_string(dev->dev.parent);
2298
2299         WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2300              "gso_type=%d ip_summed=%d\n",
2301              driver, dev ? &dev->features : &null_features,
2302              skb->sk ? &skb->sk->sk_route_caps : &null_features,
2303              skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2304              skb_shinfo(skb)->gso_type, skb->ip_summed);
2305 }
2306
2307 /*
2308  * Invalidate hardware checksum when packet is to be mangled, and
2309  * complete checksum manually on outgoing path.
2310  */
2311 int skb_checksum_help(struct sk_buff *skb)
2312 {
2313         __wsum csum;
2314         int ret = 0, offset;
2315
2316         if (skb->ip_summed == CHECKSUM_COMPLETE)
2317                 goto out_set_summed;
2318
2319         if (unlikely(skb_shinfo(skb)->gso_size)) {
2320                 skb_warn_bad_offload(skb);
2321                 return -EINVAL;
2322         }
2323
2324         /* Before computing a checksum, we should make sure no frag could
2325          * be modified by an external entity : checksum could be wrong.
2326          */
2327         if (skb_has_shared_frag(skb)) {
2328                 ret = __skb_linearize(skb);
2329                 if (ret)
2330                         goto out;
2331         }
2332
2333         offset = skb_checksum_start_offset(skb);
2334         BUG_ON(offset >= skb_headlen(skb));
2335         csum = skb_checksum(skb, offset, skb->len - offset, 0);
2336
2337         offset += skb->csum_offset;
2338         BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2339
2340         if (skb_cloned(skb) &&
2341             !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2342                 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2343                 if (ret)
2344                         goto out;
2345         }
2346
2347         *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2348 out_set_summed:
2349         skb->ip_summed = CHECKSUM_NONE;
2350 out:
2351         return ret;
2352 }
2353 EXPORT_SYMBOL(skb_checksum_help);
2354
2355 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2356 {
2357         unsigned int vlan_depth = skb->mac_len;
2358         __be16 type = skb->protocol;
2359
2360         /* Tunnel gso handlers can set protocol to ethernet. */
2361         if (type == htons(ETH_P_TEB)) {
2362                 struct ethhdr *eth;
2363
2364                 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2365                         return 0;
2366
2367                 eth = (struct ethhdr *)skb_mac_header(skb);
2368                 type = eth->h_proto;
2369         }
2370
2371         /* if skb->protocol is 802.1Q/AD then the header should already be
2372          * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2373          * ETH_HLEN otherwise
2374          */
2375         if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2376                 if (vlan_depth) {
2377                         if (WARN_ON(vlan_depth < VLAN_HLEN))
2378                                 return 0;
2379                         vlan_depth -= VLAN_HLEN;
2380                 } else {
2381                         vlan_depth = ETH_HLEN;
2382                 }
2383                 do {
2384                         struct vlan_hdr *vh;
2385
2386                         if (unlikely(!pskb_may_pull(skb,
2387                                                     vlan_depth + VLAN_HLEN)))
2388                                 return 0;
2389
2390                         vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2391                         type = vh->h_vlan_encapsulated_proto;
2392                         vlan_depth += VLAN_HLEN;
2393                 } while (type == htons(ETH_P_8021Q) ||
2394                          type == htons(ETH_P_8021AD));
2395         }
2396
2397         *depth = vlan_depth;
2398
2399         return type;
2400 }
2401
2402 /**
2403  *      skb_mac_gso_segment - mac layer segmentation handler.
2404  *      @skb: buffer to segment
2405  *      @features: features for the output path (see dev->features)
2406  */
2407 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2408                                     netdev_features_t features)
2409 {
2410         struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2411         struct packet_offload *ptype;
2412         int vlan_depth = skb->mac_len;
2413         __be16 type = skb_network_protocol(skb, &vlan_depth);
2414
2415         if (unlikely(!type))
2416                 return ERR_PTR(-EINVAL);
2417
2418         __skb_pull(skb, vlan_depth);
2419
2420         rcu_read_lock();
2421         list_for_each_entry_rcu(ptype, &offload_base, list) {
2422                 if (ptype->type == type && ptype->callbacks.gso_segment) {
2423                         segs = ptype->callbacks.gso_segment(skb, features);
2424                         break;
2425                 }
2426         }
2427         rcu_read_unlock();
2428
2429         __skb_push(skb, skb->data - skb_mac_header(skb));
2430
2431         return segs;
2432 }
2433 EXPORT_SYMBOL(skb_mac_gso_segment);
2434
2435
2436 /* openvswitch calls this on rx path, so we need a different check.
2437  */
2438 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2439 {
2440         if (tx_path)
2441                 return skb->ip_summed != CHECKSUM_PARTIAL;
2442         else
2443                 return skb->ip_summed == CHECKSUM_NONE;
2444 }
2445
2446 /**
2447  *      __skb_gso_segment - Perform segmentation on skb.
2448  *      @skb: buffer to segment
2449  *      @features: features for the output path (see dev->features)
2450  *      @tx_path: whether it is called in TX path
2451  *
2452  *      This function segments the given skb and returns a list of segments.
2453  *
2454  *      It may return NULL if the skb requires no segmentation.  This is
2455  *      only possible when GSO is used for verifying header integrity.
2456  */
2457 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2458                                   netdev_features_t features, bool tx_path)
2459 {
2460         if (unlikely(skb_needs_check(skb, tx_path))) {
2461                 int err;
2462
2463                 skb_warn_bad_offload(skb);
2464
2465                 err = skb_cow_head(skb, 0);
2466                 if (err < 0)
2467                         return ERR_PTR(err);
2468         }
2469
2470         SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2471         SKB_GSO_CB(skb)->encap_level = 0;
2472
2473         skb_reset_mac_header(skb);
2474         skb_reset_mac_len(skb);
2475
2476         return skb_mac_gso_segment(skb, features);
2477 }
2478 EXPORT_SYMBOL(__skb_gso_segment);
2479
2480 /* Take action when hardware reception checksum errors are detected. */
2481 #ifdef CONFIG_BUG
2482 void netdev_rx_csum_fault(struct net_device *dev)
2483 {
2484         if (net_ratelimit()) {
2485                 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2486                 dump_stack();
2487         }
2488 }
2489 EXPORT_SYMBOL(netdev_rx_csum_fault);
2490 #endif
2491
2492 /* Actually, we should eliminate this check as soon as we know, that:
2493  * 1. IOMMU is present and allows to map all the memory.
2494  * 2. No high memory really exists on this machine.
2495  */
2496
2497 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2498 {
2499 #ifdef CONFIG_HIGHMEM
2500         int i;
2501         if (!(dev->features & NETIF_F_HIGHDMA)) {
2502                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2503                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2504                         if (PageHighMem(skb_frag_page(frag)))
2505                                 return 1;
2506                 }
2507         }
2508
2509         if (PCI_DMA_BUS_IS_PHYS) {
2510                 struct device *pdev = dev->dev.parent;
2511
2512                 if (!pdev)
2513                         return 0;
2514                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2515                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2516                         dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2517                         if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2518                                 return 1;
2519                 }
2520         }
2521 #endif
2522         return 0;
2523 }
2524
2525 /* If MPLS offload request, verify we are testing hardware MPLS features
2526  * instead of standard features for the netdev.
2527  */
2528 #ifdef CONFIG_NET_MPLS_GSO
2529 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2530                                            netdev_features_t features,
2531                                            __be16 type)
2532 {
2533         if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
2534                 features &= skb->dev->mpls_features;
2535
2536         return features;
2537 }
2538 #else
2539 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2540                                            netdev_features_t features,
2541                                            __be16 type)
2542 {
2543         return features;
2544 }
2545 #endif
2546
2547 static netdev_features_t harmonize_features(struct sk_buff *skb,
2548         netdev_features_t features)
2549 {
2550         int tmp;
2551         __be16 type;
2552
2553         type = skb_network_protocol(skb, &tmp);
2554         features = net_mpls_features(skb, features, type);
2555
2556         if (skb->ip_summed != CHECKSUM_NONE &&
2557             !can_checksum_protocol(features, type)) {
2558                 features &= ~NETIF_F_ALL_CSUM;
2559         } else if (illegal_highdma(skb->dev, skb)) {
2560                 features &= ~NETIF_F_SG;
2561         }
2562
2563         return features;
2564 }
2565
2566 netdev_features_t netif_skb_features(struct sk_buff *skb)
2567 {
2568         const struct net_device *dev = skb->dev;
2569         netdev_features_t features = dev->features;
2570         u16 gso_segs = skb_shinfo(skb)->gso_segs;
2571         __be16 protocol = skb->protocol;
2572
2573         if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
2574                 features &= ~NETIF_F_GSO_MASK;
2575
2576         if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2577                 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2578                 protocol = veh->h_vlan_encapsulated_proto;
2579         } else if (!vlan_tx_tag_present(skb)) {
2580                 return harmonize_features(skb, features);
2581         }
2582
2583         features = netdev_intersect_features(features,
2584                                              dev->vlan_features |
2585                                              NETIF_F_HW_VLAN_CTAG_TX |
2586                                              NETIF_F_HW_VLAN_STAG_TX);
2587
2588         if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
2589                 features = netdev_intersect_features(features,
2590                                                      NETIF_F_SG |
2591                                                      NETIF_F_HIGHDMA |
2592                                                      NETIF_F_FRAGLIST |
2593                                                      NETIF_F_GEN_CSUM |
2594                                                      NETIF_F_HW_VLAN_CTAG_TX |
2595                                                      NETIF_F_HW_VLAN_STAG_TX);
2596
2597         return harmonize_features(skb, features);
2598 }
2599 EXPORT_SYMBOL(netif_skb_features);
2600
2601 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
2602                     struct netdev_queue *txq, bool more)
2603 {
2604         unsigned int len;
2605         int rc;
2606
2607         if (!list_empty(&ptype_all))
2608                 dev_queue_xmit_nit(skb, dev);
2609
2610         len = skb->len;
2611         trace_net_dev_start_xmit(skb, dev);
2612         rc = netdev_start_xmit(skb, dev, txq, more);
2613         trace_net_dev_xmit(skb, rc, dev, len);
2614
2615         return rc;
2616 }
2617
2618 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2619                                     struct netdev_queue *txq, int *ret)
2620 {
2621         struct sk_buff *skb = first;
2622         int rc = NETDEV_TX_OK;
2623
2624         while (skb) {
2625                 struct sk_buff *next = skb->next;
2626
2627                 skb->next = NULL;
2628                 rc = xmit_one(skb, dev, txq, next != NULL);
2629                 if (unlikely(!dev_xmit_complete(rc))) {
2630                         skb->next = next;
2631                         goto out;
2632                 }
2633
2634                 skb = next;
2635                 if (netif_xmit_stopped(txq) && skb) {
2636                         rc = NETDEV_TX_BUSY;
2637                         break;
2638                 }
2639         }
2640
2641 out:
2642         *ret = rc;
2643         return skb;
2644 }
2645
2646 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2647                                           netdev_features_t features)
2648 {
2649         if (vlan_tx_tag_present(skb) &&
2650             !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2651                 skb = __vlan_put_tag(skb, skb->vlan_proto,
2652                                      vlan_tx_tag_get(skb));
2653                 if (skb)
2654                         skb->vlan_tci = 0;
2655         }
2656         return skb;
2657 }
2658
2659 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2660 {
2661         netdev_features_t features;
2662
2663         if (skb->next)
2664                 return skb;
2665
2666         features = netif_skb_features(skb);
2667         skb = validate_xmit_vlan(skb, features);
2668         if (unlikely(!skb))
2669                 goto out_null;
2670
2671         /* If encapsulation offload request, verify we are testing
2672          * hardware encapsulation features instead of standard
2673          * features for the netdev
2674          */
2675         if (skb->encapsulation)
2676                 features &= dev->hw_enc_features;
2677
2678         if (netif_needs_gso(dev, skb, features)) {
2679                 struct sk_buff *segs;
2680
2681                 segs = skb_gso_segment(skb, features);
2682                 if (IS_ERR(segs)) {
2683                         segs = NULL;
2684                 } else if (segs) {
2685                         consume_skb(skb);
2686                         skb = segs;
2687                 }
2688         } else {
2689                 if (skb_needs_linearize(skb, features) &&
2690                     __skb_linearize(skb))
2691                         goto out_kfree_skb;
2692
2693                 /* If packet is not checksummed and device does not
2694                  * support checksumming for this protocol, complete
2695                  * checksumming here.
2696                  */
2697                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2698                         if (skb->encapsulation)
2699                                 skb_set_inner_transport_header(skb,
2700                                                                skb_checksum_start_offset(skb));
2701                         else
2702                                 skb_set_transport_header(skb,
2703                                                          skb_checksum_start_offset(skb));
2704                         if (!(features & NETIF_F_ALL_CSUM) &&
2705                             skb_checksum_help(skb))
2706                                 goto out_kfree_skb;
2707                 }
2708         }
2709
2710         return skb;
2711
2712 out_kfree_skb:
2713         kfree_skb(skb);
2714 out_null:
2715         return NULL;
2716 }
2717
2718 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2719 {
2720         struct sk_buff *next, *head = NULL, *tail;
2721
2722         for (; skb != NULL; skb = next) {
2723                 next = skb->next;
2724                 skb->next = NULL;
2725
2726                 /* in case skb wont be segmented, point to itself */
2727                 skb->prev = skb;
2728
2729                 skb = validate_xmit_skb(skb, dev);
2730                 if (!skb)
2731                         continue;
2732
2733                 if (!head)
2734                         head = skb;
2735                 else
2736                         tail->next = skb;
2737                 /* If skb was segmented, skb->prev points to
2738                  * the last segment. If not, it still contains skb.
2739                  */
2740                 tail = skb->prev;
2741         }
2742         return head;
2743 }
2744
2745 static void qdisc_pkt_len_init(struct sk_buff *skb)
2746 {
2747         const struct skb_shared_info *shinfo = skb_shinfo(skb);
2748
2749         qdisc_skb_cb(skb)->pkt_len = skb->len;
2750
2751         /* To get more precise estimation of bytes sent on wire,
2752          * we add to pkt_len the headers size of all segments
2753          */
2754         if (shinfo->gso_size)  {
2755                 unsigned int hdr_len;
2756                 u16 gso_segs = shinfo->gso_segs;
2757
2758                 /* mac layer + network layer */
2759                 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2760
2761                 /* + transport layer */
2762                 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2763                         hdr_len += tcp_hdrlen(skb);
2764                 else
2765                         hdr_len += sizeof(struct udphdr);
2766
2767                 if (shinfo->gso_type & SKB_GSO_DODGY)
2768                         gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2769                                                 shinfo->gso_size);
2770
2771                 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
2772         }
2773 }
2774
2775 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2776                                  struct net_device *dev,
2777                                  struct netdev_queue *txq)
2778 {
2779         spinlock_t *root_lock = qdisc_lock(q);
2780         bool contended;
2781         int rc;
2782
2783         qdisc_pkt_len_init(skb);
2784         qdisc_calculate_pkt_len(skb, q);
2785         /*
2786          * Heuristic to force contended enqueues to serialize on a
2787          * separate lock before trying to get qdisc main lock.
2788          * This permits __QDISC___STATE_RUNNING owner to get the lock more
2789          * often and dequeue packets faster.
2790          */
2791         contended = qdisc_is_running(q);
2792         if (unlikely(contended))
2793                 spin_lock(&q->busylock);
2794
2795         spin_lock(root_lock);
2796         if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2797                 kfree_skb(skb);
2798                 rc = NET_XMIT_DROP;
2799         } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2800                    qdisc_run_begin(q)) {
2801                 /*
2802                  * This is a work-conserving queue; there are no old skbs
2803                  * waiting to be sent out; and the qdisc is not running -
2804                  * xmit the skb directly.
2805                  */
2806
2807                 qdisc_bstats_update(q, skb);
2808
2809                 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
2810                         if (unlikely(contended)) {
2811                                 spin_unlock(&q->busylock);
2812                                 contended = false;
2813                         }
2814                         __qdisc_run(q);
2815                 } else
2816                         qdisc_run_end(q);
2817
2818                 rc = NET_XMIT_SUCCESS;
2819         } else {
2820                 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2821                 if (qdisc_run_begin(q)) {
2822                         if (unlikely(contended)) {
2823                                 spin_unlock(&q->busylock);
2824                                 contended = false;
2825                         }
2826                         __qdisc_run(q);
2827                 }
2828         }
2829         spin_unlock(root_lock);
2830         if (unlikely(contended))
2831                 spin_unlock(&q->busylock);
2832         return rc;
2833 }
2834
2835 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2836 static void skb_update_prio(struct sk_buff *skb)
2837 {
2838         struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2839
2840         if (!skb->priority && skb->sk && map) {
2841                 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2842
2843                 if (prioidx < map->priomap_len)
2844                         skb->priority = map->priomap[prioidx];
2845         }
2846 }
2847 #else
2848 #define skb_update_prio(skb)
2849 #endif
2850
2851 static DEFINE_PER_CPU(int, xmit_recursion);
2852 #define RECURSION_LIMIT 10
2853
2854 /**
2855  *      dev_loopback_xmit - loop back @skb
2856  *      @skb: buffer to transmit
2857  */
2858 int dev_loopback_xmit(struct sk_buff *skb)
2859 {
2860         skb_reset_mac_header(skb);
2861         __skb_pull(skb, skb_network_offset(skb));
2862         skb->pkt_type = PACKET_LOOPBACK;
2863         skb->ip_summed = CHECKSUM_UNNECESSARY;
2864         WARN_ON(!skb_dst(skb));
2865         skb_dst_force(skb);
2866         netif_rx_ni(skb);
2867         return 0;
2868 }
2869 EXPORT_SYMBOL(dev_loopback_xmit);
2870
2871 /**
2872  *      __dev_queue_xmit - transmit a buffer
2873  *      @skb: buffer to transmit
2874  *      @accel_priv: private data used for L2 forwarding offload
2875  *
2876  *      Queue a buffer for transmission to a network device. The caller must
2877  *      have set the device and priority and built the buffer before calling
2878  *      this function. The function can be called from an interrupt.
2879  *
2880  *      A negative errno code is returned on a failure. A success does not
2881  *      guarantee the frame will be transmitted as it may be dropped due
2882  *      to congestion or traffic shaping.
2883  *
2884  * -----------------------------------------------------------------------------------
2885  *      I notice this method can also return errors from the queue disciplines,
2886  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
2887  *      be positive.
2888  *
2889  *      Regardless of the return value, the skb is consumed, so it is currently
2890  *      difficult to retry a send to this method.  (You can bump the ref count
2891  *      before sending to hold a reference for retry if you are careful.)
2892  *
2893  *      When calling this method, interrupts MUST be enabled.  This is because
2894  *      the BH enable code must have IRQs enabled so that it will not deadlock.
2895  *          --BLG
2896  */
2897 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2898 {
2899         struct net_device *dev = skb->dev;
2900         struct netdev_queue *txq;
2901         struct Qdisc *q;
2902         int rc = -ENOMEM;
2903
2904         skb_reset_mac_header(skb);
2905
2906         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
2907                 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
2908
2909         /* Disable soft irqs for various locks below. Also
2910          * stops preemption for RCU.
2911          */
2912         rcu_read_lock_bh();
2913
2914         skb_update_prio(skb);
2915
2916         /* If device/qdisc don't need skb->dst, release it right now while
2917          * its hot in this cpu cache.
2918          */
2919         if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2920                 skb_dst_drop(skb);
2921         else
2922                 skb_dst_force(skb);
2923
2924         txq = netdev_pick_tx(dev, skb, accel_priv);
2925         q = rcu_dereference_bh(txq->qdisc);
2926
2927 #ifdef CONFIG_NET_CLS_ACT
2928         skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2929 #endif
2930         trace_net_dev_queue(skb);
2931         if (q->enqueue) {
2932                 rc = __dev_xmit_skb(skb, q, dev, txq);
2933                 goto out;
2934         }
2935
2936         /* The device has no queue. Common case for software devices:
2937            loopback, all the sorts of tunnels...
2938
2939            Really, it is unlikely that netif_tx_lock protection is necessary
2940            here.  (f.e. loopback and IP tunnels are clean ignoring statistics
2941            counters.)
2942            However, it is possible, that they rely on protection
2943            made by us here.
2944
2945            Check this and shot the lock. It is not prone from deadlocks.
2946            Either shot noqueue qdisc, it is even simpler 8)
2947          */
2948         if (dev->flags & IFF_UP) {
2949                 int cpu = smp_processor_id(); /* ok because BHs are off */
2950
2951                 if (txq->xmit_lock_owner != cpu) {
2952
2953                         if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2954                                 goto recursion_alert;
2955
2956                         skb = validate_xmit_skb(skb, dev);
2957                         if (!skb)
2958                                 goto drop;
2959
2960                         HARD_TX_LOCK(dev, txq, cpu);
2961
2962                         if (!netif_xmit_stopped(txq)) {
2963                                 __this_cpu_inc(xmit_recursion);
2964                                 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
2965                                 __this_cpu_dec(xmit_recursion);
2966                                 if (dev_xmit_complete(rc)) {
2967                                         HARD_TX_UNLOCK(dev, txq);
2968                                         goto out;
2969                                 }
2970                         }
2971                         HARD_TX_UNLOCK(dev, txq);
2972                         net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2973                                              dev->name);
2974                 } else {
2975                         /* Recursion is detected! It is possible,
2976                          * unfortunately
2977                          */
2978 recursion_alert:
2979                         net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2980                                              dev->name);
2981                 }
2982         }
2983
2984         rc = -ENETDOWN;
2985 drop:
2986         rcu_read_unlock_bh();
2987
2988         atomic_long_inc(&dev->tx_dropped);
2989         kfree_skb_list(skb);
2990         return rc;
2991 out:
2992         rcu_read_unlock_bh();
2993         return rc;
2994 }
2995
2996 int dev_queue_xmit(struct sk_buff *skb)
2997 {
2998         return __dev_queue_xmit(skb, NULL);
2999 }
3000 EXPORT_SYMBOL(dev_queue_xmit);
3001
3002 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3003 {
3004         return __dev_queue_xmit(skb, accel_priv);
3005 }
3006 EXPORT_SYMBOL(dev_queue_xmit_accel);
3007
3008
3009 /*=======================================================================
3010                         Receiver routines
3011   =======================================================================*/
3012
3013 int netdev_max_backlog __read_mostly = 1000;
3014 EXPORT_SYMBOL(netdev_max_backlog);
3015
3016 int netdev_tstamp_prequeue __read_mostly = 1;
3017 int netdev_budget __read_mostly = 300;
3018 int weight_p __read_mostly = 64;            /* old backlog weight */
3019
3020 /* Called with irq disabled */
3021 static inline void ____napi_schedule(struct softnet_data *sd,
3022                                      struct napi_struct *napi)
3023 {
3024         list_add_tail(&napi->poll_list, &sd->poll_list);
3025         __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3026 }
3027
3028 #ifdef CONFIG_RPS
3029
3030 /* One global table that all flow-based protocols share. */
3031 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3032 EXPORT_SYMBOL(rps_sock_flow_table);
3033
3034 struct static_key rps_needed __read_mostly;
3035
3036 static struct rps_dev_flow *
3037 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3038             struct rps_dev_flow *rflow, u16 next_cpu)
3039 {
3040         if (next_cpu != RPS_NO_CPU) {
3041 #ifdef CONFIG_RFS_ACCEL
3042                 struct netdev_rx_queue *rxqueue;
3043                 struct rps_dev_flow_table *flow_table;
3044                 struct rps_dev_flow *old_rflow;
3045                 u32 flow_id;
3046                 u16 rxq_index;
3047                 int rc;
3048
3049                 /* Should we steer this flow to a different hardware queue? */
3050                 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3051                     !(dev->features & NETIF_F_NTUPLE))
3052                         goto out;
3053                 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3054                 if (rxq_index == skb_get_rx_queue(skb))
3055                         goto out;
3056
3057                 rxqueue = dev->_rx + rxq_index;
3058                 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3059                 if (!flow_table)
3060                         goto out;
3061                 flow_id = skb_get_hash(skb) & flow_table->mask;
3062                 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3063                                                         rxq_index, flow_id);
3064                 if (rc < 0)
3065                         goto out;
3066                 old_rflow = rflow;
3067                 rflow = &flow_table->flows[flow_id];
3068                 rflow->filter = rc;
3069                 if (old_rflow->filter == rflow->filter)
3070                         old_rflow->filter = RPS_NO_FILTER;
3071         out:
3072 #endif
3073                 rflow->last_qtail =
3074                         per_cpu(softnet_data, next_cpu).input_queue_head;
3075         }
3076
3077         rflow->cpu = next_cpu;
3078         return rflow;
3079 }
3080
3081 /*
3082  * get_rps_cpu is called from netif_receive_skb and returns the target
3083  * CPU from the RPS map of the receiving queue for a given skb.
3084  * rcu_read_lock must be held on entry.
3085  */
3086 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3087                        struct rps_dev_flow **rflowp)
3088 {
3089         struct netdev_rx_queue *rxqueue;
3090         struct rps_map *map;
3091         struct rps_dev_flow_table *flow_table;
3092         struct rps_sock_flow_table *sock_flow_table;
3093         int cpu = -1;
3094         u16 tcpu;
3095         u32 hash;
3096
3097         if (skb_rx_queue_recorded(skb)) {
3098                 u16 index = skb_get_rx_queue(skb);
3099                 if (unlikely(index >= dev->real_num_rx_queues)) {
3100                         WARN_ONCE(dev->real_num_rx_queues > 1,
3101                                   "%s received packet on queue %u, but number "
3102                                   "of RX queues is %u\n",
3103                                   dev->name, index, dev->real_num_rx_queues);
3104                         goto done;
3105                 }
3106                 rxqueue = dev->_rx + index;
3107         } else
3108                 rxqueue = dev->_rx;
3109
3110         map = rcu_dereference(rxqueue->rps_map);
3111         if (map) {
3112                 if (map->len == 1 &&
3113                     !rcu_access_pointer(rxqueue->rps_flow_table)) {
3114                         tcpu = map->cpus[0];
3115                         if (cpu_online(tcpu))
3116                                 cpu = tcpu;
3117                         goto done;
3118                 }
3119         } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
3120                 goto done;
3121         }
3122
3123         skb_reset_network_header(skb);
3124         hash = skb_get_hash(skb);
3125         if (!hash)
3126                 goto done;
3127
3128         flow_table = rcu_dereference(rxqueue->rps_flow_table);
3129         sock_flow_table = rcu_dereference(rps_sock_flow_table);
3130         if (flow_table && sock_flow_table) {
3131                 u16 next_cpu;
3132                 struct rps_dev_flow *rflow;
3133
3134                 rflow = &flow_table->flows[hash & flow_table->mask];
3135                 tcpu = rflow->cpu;
3136
3137                 next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask];
3138
3139                 /*
3140                  * If the desired CPU (where last recvmsg was done) is
3141                  * different from current CPU (one in the rx-queue flow
3142                  * table entry), switch if one of the following holds:
3143                  *   - Current CPU is unset (equal to RPS_NO_CPU).
3144                  *   - Current CPU is offline.
3145                  *   - The current CPU's queue tail has advanced beyond the
3146                  *     last packet that was enqueued using this table entry.
3147                  *     This guarantees that all previous packets for the flow
3148                  *     have been dequeued, thus preserving in order delivery.
3149                  */
3150                 if (unlikely(tcpu != next_cpu) &&
3151                     (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3152                      ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3153                       rflow->last_qtail)) >= 0)) {
3154                         tcpu = next_cpu;
3155                         rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3156                 }
3157
3158                 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3159                         *rflowp = rflow;
3160                         cpu = tcpu;
3161                         goto done;
3162                 }
3163         }
3164
3165         if (map) {
3166                 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
3167                 if (cpu_online(tcpu)) {
3168                         cpu = tcpu;
3169                         goto done;
3170                 }
3171         }
3172
3173 done:
3174         return cpu;
3175 }
3176
3177 #ifdef CONFIG_RFS_ACCEL
3178
3179 /**
3180  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3181  * @dev: Device on which the filter was set
3182  * @rxq_index: RX queue index
3183  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3184  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3185  *
3186  * Drivers that implement ndo_rx_flow_steer() should periodically call
3187  * this function for each installed filter and remove the filters for
3188  * which it returns %true.
3189  */
3190 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3191                          u32 flow_id, u16 filter_id)
3192 {
3193         struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3194         struct rps_dev_flow_table *flow_table;
3195         struct rps_dev_flow *rflow;
3196         bool expire = true;
3197         int cpu;
3198
3199         rcu_read_lock();
3200         flow_table = rcu_dereference(rxqueue->rps_flow_table);
3201         if (flow_table && flow_id <= flow_table->mask) {
3202                 rflow = &flow_table->flows[flow_id];
3203                 cpu = ACCESS_ONCE(rflow->cpu);
3204                 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3205                     ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3206                            rflow->last_qtail) <
3207                      (int)(10 * flow_table->mask)))
3208                         expire = false;
3209         }
3210         rcu_read_unlock();
3211         return expire;
3212 }
3213 EXPORT_SYMBOL(rps_may_expire_flow);
3214
3215 #endif /* CONFIG_RFS_ACCEL */
3216
3217 /* Called from hardirq (IPI) context */
3218 static void rps_trigger_softirq(void *data)
3219 {
3220         struct softnet_data *sd = data;
3221
3222         ____napi_schedule(sd, &sd->backlog);
3223         sd->received_rps++;
3224 }
3225
3226 #endif /* CONFIG_RPS */
3227
3228 /*
3229  * Check if this softnet_data structure is another cpu one
3230  * If yes, queue it to our IPI list and return 1
3231  * If no, return 0
3232  */
3233 static int rps_ipi_queued(struct softnet_data *sd)
3234 {
3235 #ifdef CONFIG_RPS
3236         struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3237
3238         if (sd != mysd) {
3239                 sd->rps_ipi_next = mysd->rps_ipi_list;
3240                 mysd->rps_ipi_list = sd;
3241
3242                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3243                 return 1;
3244         }
3245 #endif /* CONFIG_RPS */
3246         return 0;
3247 }
3248
3249 #ifdef CONFIG_NET_FLOW_LIMIT
3250 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3251 #endif
3252
3253 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3254 {
3255 #ifdef CONFIG_NET_FLOW_LIMIT
3256         struct sd_flow_limit *fl;
3257         struct softnet_data *sd;
3258         unsigned int old_flow, new_flow;
3259
3260         if (qlen < (netdev_max_backlog >> 1))
3261                 return false;
3262
3263         sd = this_cpu_ptr(&softnet_data);
3264
3265         rcu_read_lock();
3266         fl = rcu_dereference(sd->flow_limit);
3267         if (fl) {
3268                 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3269                 old_flow = fl->history[fl->history_head];
3270                 fl->history[fl->history_head] = new_flow;
3271
3272                 fl->history_head++;
3273                 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3274
3275                 if (likely(fl->buckets[old_flow]))
3276                         fl->buckets[old_flow]--;
3277
3278                 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3279                         fl->count++;
3280                         rcu_read_unlock();
3281                         return true;
3282                 }
3283         }
3284         rcu_read_unlock();
3285 #endif
3286         return false;
3287 }
3288
3289 /*
3290  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3291  * queue (may be a remote CPU queue).
3292  */
3293 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3294                               unsigned int *qtail)
3295 {
3296         struct softnet_data *sd;
3297         unsigned long flags;
3298         unsigned int qlen;
3299
3300         sd = &per_cpu(softnet_data, cpu);
3301
3302         local_irq_save(flags);
3303
3304         rps_lock(sd);
3305         qlen = skb_queue_len(&sd->input_pkt_queue);
3306         if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3307                 if (skb_queue_len(&sd->input_pkt_queue)) {
3308 enqueue:
3309                         __skb_queue_tail(&sd->input_pkt_queue, skb);
3310                         input_queue_tail_incr_save(sd, qtail);
3311                         rps_unlock(sd);
3312                         local_irq_restore(flags);
3313                         return NET_RX_SUCCESS;
3314                 }
3315
3316                 /* Schedule NAPI for backlog device
3317                  * We can use non atomic operation since we own the queue lock
3318                  */
3319                 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3320                         if (!rps_ipi_queued(sd))
3321                                 ____napi_schedule(sd, &sd->backlog);
3322                 }
3323                 goto enqueue;
3324         }
3325
3326         sd->dropped++;
3327         rps_unlock(sd);
3328
3329         local_irq_restore(flags);
3330
3331         atomic_long_inc(&skb->dev->rx_dropped);
3332         kfree_skb(skb);
3333         return NET_RX_DROP;
3334 }
3335
3336 static int netif_rx_internal(struct sk_buff *skb)
3337 {
3338         int ret;
3339
3340         net_timestamp_check(netdev_tstamp_prequeue, skb);
3341
3342         trace_netif_rx(skb);
3343 #ifdef CONFIG_RPS
3344         if (static_key_false(&rps_needed)) {
3345                 struct rps_dev_flow voidflow, *rflow = &voidflow;
3346                 int cpu;
3347
3348                 preempt_disable();
3349                 rcu_read_lock();
3350
3351                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3352                 if (cpu < 0)
3353                         cpu = smp_processor_id();
3354
3355                 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3356
3357                 rcu_read_unlock();
3358                 preempt_enable();
3359         } else
3360 #endif
3361         {
3362                 unsigned int qtail;
3363                 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3364                 put_cpu();
3365         }
3366         return ret;
3367 }
3368
3369 /**
3370  *      netif_rx        -       post buffer to the network code
3371  *      @skb: buffer to post
3372  *
3373  *      This function receives a packet from a device driver and queues it for
3374  *      the upper (protocol) levels to process.  It always succeeds. The buffer
3375  *      may be dropped during processing for congestion control or by the
3376  *      protocol layers.
3377  *
3378  *      return values:
3379  *      NET_RX_SUCCESS  (no congestion)
3380  *      NET_RX_DROP     (packet was dropped)
3381  *
3382  */
3383
3384 int netif_rx(struct sk_buff *skb)
3385 {
3386         trace_netif_rx_entry(skb);
3387
3388         return netif_rx_internal(skb);
3389 }
3390 EXPORT_SYMBOL(netif_rx);
3391
3392 int netif_rx_ni(struct sk_buff *skb)
3393 {
3394         int err;
3395
3396         trace_netif_rx_ni_entry(skb);
3397
3398         preempt_disable();
3399         err = netif_rx_internal(skb);
3400         if (local_softirq_pending())
3401                 do_softirq();
3402         preempt_enable();
3403
3404         return err;
3405 }
3406 EXPORT_SYMBOL(netif_rx_ni);
3407
3408 static void net_tx_action(struct softirq_action *h)
3409 {
3410         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3411
3412         if (sd->completion_queue) {
3413                 struct sk_buff *clist;
3414
3415                 local_irq_disable();
3416                 clist = sd->completion_queue;
3417                 sd->completion_queue = NULL;
3418                 local_irq_enable();
3419
3420                 while (clist) {
3421                         struct sk_buff *skb = clist;
3422                         clist = clist->next;
3423
3424                         WARN_ON(atomic_read(&skb->users));
3425                         if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3426                                 trace_consume_skb(skb);
3427                         else
3428                                 trace_kfree_skb(skb, net_tx_action);
3429                         __kfree_skb(skb);
3430                 }
3431         }
3432
3433         if (sd->output_queue) {
3434                 struct Qdisc *head;
3435
3436                 local_irq_disable();
3437                 head = sd->output_queue;
3438                 sd->output_queue = NULL;
3439                 sd->output_queue_tailp = &sd->output_queue;
3440                 local_irq_enable();
3441
3442                 while (head) {
3443                         struct Qdisc *q = head;
3444                         spinlock_t *root_lock;
3445
3446                         head = head->next_sched;
3447
3448                         root_lock = qdisc_lock(q);
3449                         if (spin_trylock(root_lock)) {
3450                                 smp_mb__before_atomic();
3451                                 clear_bit(__QDISC_STATE_SCHED,
3452                                           &q->state);
3453                                 qdisc_run(q);
3454                                 spin_unlock(root_lock);
3455                         } else {
3456                                 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3457                                               &q->state)) {
3458                                         __netif_reschedule(q);
3459                                 } else {
3460                                         smp_mb__before_atomic();
3461                                         clear_bit(__QDISC_STATE_SCHED,
3462                                                   &q->state);
3463                                 }
3464                         }
3465                 }
3466         }
3467 }
3468
3469 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3470     (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3471 /* This hook is defined here for ATM LANE */
3472 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3473                              unsigned char *addr) __read_mostly;
3474 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3475 #endif
3476
3477 #ifdef CONFIG_NET_CLS_ACT
3478 /* TODO: Maybe we should just force sch_ingress to be compiled in
3479  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3480  * a compare and 2 stores extra right now if we dont have it on
3481  * but have CONFIG_NET_CLS_ACT
3482  * NOTE: This doesn't stop any functionality; if you dont have
3483  * the ingress scheduler, you just can't add policies on ingress.
3484  *
3485  */
3486 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3487 {
3488         struct net_device *dev = skb->dev;
3489         u32 ttl = G_TC_RTTL(skb->tc_verd);
3490         int result = TC_ACT_OK;
3491         struct Qdisc *q;
3492
3493         if (unlikely(MAX_RED_LOOP < ttl++)) {
3494                 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3495                                      skb->skb_iif, dev->ifindex);
3496                 return TC_ACT_SHOT;
3497         }
3498
3499         skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3500         skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3501
3502         q = rcu_dereference(rxq->qdisc);
3503         if (q != &noop_qdisc) {
3504                 spin_lock(qdisc_lock(q));
3505                 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3506                         result = qdisc_enqueue_root(skb, q);
3507                 spin_unlock(qdisc_lock(q));
3508         }
3509
3510         return result;
3511 }
3512
3513 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3514                                          struct packet_type **pt_prev,
3515                                          int *ret, struct net_device *orig_dev)
3516 {
3517         struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3518
3519         if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
3520                 goto out;
3521
3522         if (*pt_prev) {
3523                 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3524                 *pt_prev = NULL;
3525         }
3526
3527         switch (ing_filter(skb, rxq)) {
3528         case TC_ACT_SHOT:
3529         case TC_ACT_STOLEN:
3530                 kfree_skb(skb);
3531                 return NULL;
3532         }
3533
3534 out:
3535         skb->tc_verd = 0;
3536         return skb;
3537 }
3538 #endif
3539
3540 /**
3541  *      netdev_rx_handler_register - register receive handler
3542  *      @dev: device to register a handler for
3543  *      @rx_handler: receive handler to register
3544  *      @rx_handler_data: data pointer that is used by rx handler
3545  *
3546  *      Register a receive handler for a device. This handler will then be
3547  *      called from __netif_receive_skb. A negative errno code is returned
3548  *      on a failure.
3549  *
3550  *      The caller must hold the rtnl_mutex.
3551  *
3552  *      For a general description of rx_handler, see enum rx_handler_result.
3553  */
3554 int netdev_rx_handler_register(struct net_device *dev,
3555                                rx_handler_func_t *rx_handler,
3556                                void *rx_handler_data)
3557 {
3558         ASSERT_RTNL();
3559
3560         if (dev->rx_handler)
3561                 return -EBUSY;
3562
3563         /* Note: rx_handler_data must be set before rx_handler */
3564         rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3565         rcu_assign_pointer(dev->rx_handler, rx_handler);
3566
3567         return 0;
3568 }
3569 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3570
3571 /**
3572  *      netdev_rx_handler_unregister - unregister receive handler
3573  *      @dev: device to unregister a handler from
3574  *
3575  *      Unregister a receive handler from a device.
3576  *
3577  *      The caller must hold the rtnl_mutex.
3578  */
3579 void netdev_rx_handler_unregister(struct net_device *dev)
3580 {
3581
3582         ASSERT_RTNL();
3583         RCU_INIT_POINTER(dev->rx_handler, NULL);
3584         /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3585          * section has a guarantee to see a non NULL rx_handler_data
3586          * as well.
3587          */
3588         synchronize_net();
3589         RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3590 }
3591 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3592
3593 /*
3594  * Limit the use of PFMEMALLOC reserves to those protocols that implement
3595  * the special handling of PFMEMALLOC skbs.
3596  */
3597 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3598 {
3599         switch (skb->protocol) {
3600         case htons(ETH_P_ARP):
3601         case htons(ETH_P_IP):
3602         case htons(ETH_P_IPV6):
3603         case htons(ETH_P_8021Q):
3604         case htons(ETH_P_8021AD):
3605                 return true;
3606         default:
3607                 return false;
3608         }
3609 }
3610
3611 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3612 {
3613         struct packet_type *ptype, *pt_prev;
3614         rx_handler_func_t *rx_handler;
3615         struct net_device *orig_dev;
3616         struct net_device *null_or_dev;
3617         bool deliver_exact = false;
3618         int ret = NET_RX_DROP;
3619         __be16 type;
3620
3621         net_timestamp_check(!netdev_tstamp_prequeue, skb);
3622
3623         trace_netif_receive_skb(skb);
3624
3625         orig_dev = skb->dev;
3626
3627         skb_reset_network_header(skb);
3628         if (!skb_transport_header_was_set(skb))
3629                 skb_reset_transport_header(skb);
3630         skb_reset_mac_len(skb);
3631
3632         pt_prev = NULL;
3633
3634         rcu_read_lock();
3635
3636 another_round:
3637         skb->skb_iif = skb->dev->ifindex;
3638
3639         __this_cpu_inc(softnet_data.processed);
3640
3641         if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3642             skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3643                 skb = skb_vlan_untag(skb);
3644                 if (unlikely(!skb))
3645                         goto unlock;
3646         }
3647
3648 #ifdef CONFIG_NET_CLS_ACT
3649         if (skb->tc_verd & TC_NCLS) {
3650                 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3651                 goto ncls;
3652         }
3653 #endif
3654
3655         if (pfmemalloc)
3656                 goto skip_taps;
3657
3658         list_for_each_entry_rcu(ptype, &ptype_all, list) {
3659                 if (!ptype->dev || ptype->dev == skb->dev) {
3660                         if (pt_prev)
3661                                 ret = deliver_skb(skb, pt_prev, orig_dev);
3662                         pt_prev = ptype;
3663                 }
3664         }
3665
3666 skip_taps:
3667 #ifdef CONFIG_NET_CLS_ACT
3668         skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3669         if (!skb)
3670                 goto unlock;
3671 ncls:
3672 #endif
3673
3674         if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3675                 goto drop;
3676
3677         if (vlan_tx_tag_present(skb)) {
3678                 if (pt_prev) {
3679                         ret = deliver_skb(skb, pt_prev, orig_dev);
3680                         pt_prev = NULL;
3681                 }
3682                 if (vlan_do_receive(&skb))
3683                         goto another_round;
3684                 else if (unlikely(!skb))
3685                         goto unlock;
3686         }
3687
3688         rx_handler = rcu_dereference(skb->dev->rx_handler);
3689         if (rx_handler) {
3690                 if (pt_prev) {
3691                         ret = deliver_skb(skb, pt_prev, orig_dev);
3692                         pt_prev = NULL;
3693                 }
3694                 switch (rx_handler(&skb)) {
3695                 case RX_HANDLER_CONSUMED:
3696                         ret = NET_RX_SUCCESS;
3697                         goto unlock;
3698                 case RX_HANDLER_ANOTHER:
3699                         goto another_round;
3700                 case RX_HANDLER_EXACT:
3701                         deliver_exact = true;
3702                 case RX_HANDLER_PASS:
3703                         break;
3704                 default:
3705                         BUG();
3706                 }
3707         }
3708
3709         if (unlikely(vlan_tx_tag_present(skb))) {
3710                 if (vlan_tx_tag_get_id(skb))
3711                         skb->pkt_type = PACKET_OTHERHOST;
3712                 /* Note: we might in the future use prio bits
3713                  * and set skb->priority like in vlan_do_receive()
3714                  * For the time being, just ignore Priority Code Point
3715                  */
3716                 skb->vlan_tci = 0;
3717         }
3718
3719         /* deliver only exact match when indicated */
3720         null_or_dev = deliver_exact ? skb->dev : NULL;
3721
3722         type = skb->protocol;
3723         list_for_each_entry_rcu(ptype,
3724                         &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3725                 if (ptype->type == type &&
3726                     (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3727                      ptype->dev == orig_dev)) {
3728                         if (pt_prev)
3729                                 ret = deliver_skb(skb, pt_prev, orig_dev);
3730                         pt_prev = ptype;
3731                 }
3732         }
3733
3734         if (pt_prev) {
3735                 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3736                         goto drop;
3737                 else
3738                         ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3739         } else {
3740 drop:
3741                 atomic_long_inc(&skb->dev->rx_dropped);
3742                 kfree_skb(skb);
3743                 /* Jamal, now you will not able to escape explaining
3744                  * me how you were going to use this. :-)
3745                  */
3746                 ret = NET_RX_DROP;
3747         }
3748
3749 unlock:
3750         rcu_read_unlock();
3751         return ret;
3752 }
3753
3754 static int __netif_receive_skb(struct sk_buff *skb)
3755 {
3756         int ret;
3757
3758         if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3759                 unsigned long pflags = current->flags;
3760
3761                 /*
3762                  * PFMEMALLOC skbs are special, they should
3763                  * - be delivered to SOCK_MEMALLOC sockets only
3764                  * - stay away from userspace
3765                  * - have bounded memory usage
3766                  *
3767                  * Use PF_MEMALLOC as this saves us from propagating the allocation
3768                  * context down to all allocation sites.
3769                  */
3770                 current->flags |= PF_MEMALLOC;
3771                 ret = __netif_receive_skb_core(skb, true);
3772                 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3773         } else
3774                 ret = __netif_receive_skb_core(skb, false);
3775
3776         return ret;
3777 }
3778
3779 static int netif_receive_skb_internal(struct sk_buff *skb)
3780 {
3781         net_timestamp_check(netdev_tstamp_prequeue, skb);
3782
3783         if (skb_defer_rx_timestamp(skb))
3784                 return NET_RX_SUCCESS;
3785
3786 #ifdef CONFIG_RPS
3787         if (static_key_false(&rps_needed)) {
3788                 struct rps_dev_flow voidflow, *rflow = &voidflow;
3789                 int cpu, ret;
3790
3791                 rcu_read_lock();
3792
3793                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3794
3795                 if (cpu >= 0) {
3796                         ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3797                         rcu_read_unlock();
3798                         return ret;
3799                 }
3800                 rcu_read_unlock();
3801         }
3802 #endif
3803         return __netif_receive_skb(skb);
3804 }
3805
3806 /**
3807  *      netif_receive_skb - process receive buffer from network
3808  *      @skb: buffer to process
3809  *
3810  *      netif_receive_skb() is the main receive data processing function.
3811  *      It always succeeds. The buffer may be dropped during processing
3812  *      for congestion control or by the protocol layers.
3813  *
3814  *      This function may only be called from softirq context and interrupts
3815  *      should be enabled.
3816  *
3817  *      Return values (usually ignored):
3818  *      NET_RX_SUCCESS: no congestion
3819  *      NET_RX_DROP: packet was dropped
3820  */
3821 int netif_receive_skb(struct sk_buff *skb)
3822 {
3823         trace_netif_receive_skb_entry(skb);
3824
3825         return netif_receive_skb_internal(skb);
3826 }
3827 EXPORT_SYMBOL(netif_receive_skb);
3828
3829 /* Network device is going away, flush any packets still pending
3830  * Called with irqs disabled.
3831  */
3832 static void flush_backlog(void *arg)
3833 {
3834         struct net_device *dev = arg;
3835         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3836         struct sk_buff *skb, *tmp;
3837
3838         rps_lock(sd);
3839         skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3840                 if (skb->dev == dev) {
3841                         __skb_unlink(skb, &sd->input_pkt_queue);
3842                         kfree_skb(skb);
3843                         input_queue_head_incr(sd);
3844                 }
3845         }
3846         rps_unlock(sd);
3847
3848         skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3849                 if (skb->dev == dev) {
3850                         __skb_unlink(skb, &sd->process_queue);
3851                         kfree_skb(skb);
3852                         input_queue_head_incr(sd);
3853                 }
3854         }
3855 }
3856
3857 static int napi_gro_complete(struct sk_buff *skb)
3858 {
3859         struct packet_offload *ptype;
3860         __be16 type = skb->protocol;
3861         struct list_head *head = &offload_base;
3862         int err = -ENOENT;
3863
3864         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3865
3866         if (NAPI_GRO_CB(skb)->count == 1) {
3867                 skb_shinfo(skb)->gso_size = 0;
3868                 goto out;
3869         }
3870
3871         rcu_read_lock();
3872         list_for_each_entry_rcu(ptype, head, list) {
3873                 if (ptype->type != type || !ptype->callbacks.gro_complete)
3874                         continue;
3875
3876                 err = ptype->callbacks.gro_complete(skb, 0);
3877                 break;
3878         }
3879         rcu_read_unlock();
3880
3881         if (err) {
3882                 WARN_ON(&ptype->list == head);
3883                 kfree_skb(skb);
3884                 return NET_RX_SUCCESS;
3885         }
3886
3887 out:
3888         return netif_receive_skb_internal(skb);
3889 }
3890
3891 /* napi->gro_list contains packets ordered by age.
3892  * youngest packets at the head of it.
3893  * Complete skbs in reverse order to reduce latencies.
3894  */
3895 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
3896 {
3897         struct sk_buff *skb, *prev = NULL;
3898
3899         /* scan list and build reverse chain */
3900         for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3901                 skb->prev = prev;
3902                 prev = skb;
3903         }
3904
3905         for (skb = prev; skb; skb = prev) {
3906                 skb->next = NULL;
3907
3908                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3909                         return;
3910
3911                 prev = skb->prev;
3912                 napi_gro_complete(skb);
3913                 napi->gro_count--;
3914         }
3915
3916         napi->gro_list = NULL;
3917 }
3918 EXPORT_SYMBOL(napi_gro_flush);
3919
3920 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3921 {
3922         struct sk_buff *p;
3923         unsigned int maclen = skb->dev->hard_header_len;
3924         u32 hash = skb_get_hash_raw(skb);
3925
3926         for (p = napi->gro_list; p; p = p->next) {
3927                 unsigned long diffs;
3928
3929                 NAPI_GRO_CB(p)->flush = 0;
3930
3931                 if (hash != skb_get_hash_raw(p)) {
3932                         NAPI_GRO_CB(p)->same_flow = 0;
3933                         continue;
3934                 }
3935
3936                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3937                 diffs |= p->vlan_tci ^ skb->vlan_tci;
3938                 if (maclen == ETH_HLEN)
3939                         diffs |= compare_ether_header(skb_mac_header(p),
3940                                                       skb_mac_header(skb));
3941                 else if (!diffs)
3942                         diffs = memcmp(skb_mac_header(p),
3943                                        skb_mac_header(skb),
3944                                        maclen);
3945                 NAPI_GRO_CB(p)->same_flow = !diffs;
3946         }
3947 }
3948
3949 static void skb_gro_reset_offset(struct sk_buff *skb)
3950 {
3951         const struct skb_shared_info *pinfo = skb_shinfo(skb);
3952         const skb_frag_t *frag0 = &pinfo->frags[0];
3953
3954         NAPI_GRO_CB(skb)->data_offset = 0;
3955         NAPI_GRO_CB(skb)->frag0 = NULL;
3956         NAPI_GRO_CB(skb)->frag0_len = 0;
3957
3958         if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3959             pinfo->nr_frags &&
3960             !PageHighMem(skb_frag_page(frag0))) {
3961                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3962                 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3963         }
3964 }
3965
3966 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
3967 {
3968         struct skb_shared_info *pinfo = skb_shinfo(skb);
3969
3970         BUG_ON(skb->end - skb->tail < grow);
3971
3972         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3973
3974         skb->data_len -= grow;
3975         skb->tail += grow;
3976
3977         pinfo->frags[0].page_offset += grow;
3978         skb_frag_size_sub(&pinfo->frags[0], grow);
3979
3980         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
3981                 skb_frag_unref(skb, 0);
3982                 memmove(pinfo->frags, pinfo->frags + 1,
3983                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
3984         }
3985 }
3986
3987 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3988 {
3989         struct sk_buff **pp = NULL;
3990         struct packet_offload *ptype;
3991         __be16 type = skb->protocol;
3992         struct list_head *head = &offload_base;
3993         int same_flow;
3994         enum gro_result ret;
3995         int grow;
3996
3997         if (!(skb->dev->features & NETIF_F_GRO))
3998                 goto normal;
3999
4000         if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
4001                 goto normal;
4002
4003         gro_list_prepare(napi, skb);
4004
4005         rcu_read_lock();
4006         list_for_each_entry_rcu(ptype, head, list) {
4007                 if (ptype->type != type || !ptype->callbacks.gro_receive)
4008                         continue;
4009
4010                 skb_set_network_header(skb, skb_gro_offset(skb));
4011                 skb_reset_mac_len(skb);
4012                 NAPI_GRO_CB(skb)->same_flow = 0;
4013                 NAPI_GRO_CB(skb)->flush = 0;
4014                 NAPI_GRO_CB(skb)->free = 0;
4015                 NAPI_GRO_CB(skb)->udp_mark = 0;
4016
4017                 /* Setup for GRO checksum validation */
4018                 switch (skb->ip_summed) {
4019                 case CHECKSUM_COMPLETE:
4020                         NAPI_GRO_CB(skb)->csum = skb->csum;
4021                         NAPI_GRO_CB(skb)->csum_valid = 1;
4022                         NAPI_GRO_CB(skb)->csum_cnt = 0;
4023                         break;
4024                 case CHECKSUM_UNNECESSARY:
4025                         NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4026                         NAPI_GRO_CB(skb)->csum_valid = 0;
4027                         break;
4028                 default:
4029                         NAPI_GRO_CB(skb)->csum_cnt = 0;
4030                         NAPI_GRO_CB(skb)->csum_valid = 0;
4031                 }
4032
4033                 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
4034                 break;
4035         }
4036         rcu_read_unlock();
4037
4038         if (&ptype->list == head)
4039                 goto normal;
4040
4041         same_flow = NAPI_GRO_CB(skb)->same_flow;
4042         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
4043
4044         if (pp) {
4045                 struct sk_buff *nskb = *pp;
4046
4047                 *pp = nskb->next;
4048                 nskb->next = NULL;
4049                 napi_gro_complete(nskb);
4050                 napi->gro_count--;
4051         }
4052
4053         if (same_flow)
4054                 goto ok;
4055
4056         if (NAPI_GRO_CB(skb)->flush)
4057                 goto normal;
4058
4059         if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4060                 struct sk_buff *nskb = napi->gro_list;
4061
4062                 /* locate the end of the list to select the 'oldest' flow */
4063                 while (nskb->next) {
4064                         pp = &nskb->next;
4065                         nskb = *pp;
4066                 }
4067                 *pp = NULL;
4068                 nskb->next = NULL;
4069                 napi_gro_complete(nskb);
4070         } else {
4071                 napi->gro_count++;
4072         }
4073         NAPI_GRO_CB(skb)->count = 1;
4074         NAPI_GRO_CB(skb)->age = jiffies;
4075         NAPI_GRO_CB(skb)->last = skb;
4076         skb_shinfo(skb)->gso_size = skb_gro_len(skb);
4077         skb->next = napi->gro_list;
4078         napi->gro_list = skb;
4079         ret = GRO_HELD;
4080
4081 pull:
4082         grow = skb_gro_offset(skb) - skb_headlen(skb);
4083         if (grow > 0)
4084                 gro_pull_from_frag0(skb, grow);
4085 ok:
4086         return ret;
4087
4088 normal:
4089         ret = GRO_NORMAL;
4090         goto pull;
4091 }
4092
4093 struct packet_offload *gro_find_receive_by_type(__be16 type)
4094 {
4095         struct list_head *offload_head = &offload_base;
4096         struct packet_offload *ptype;
4097
4098         list_for_each_entry_rcu(ptype, offload_head, list) {
4099                 if (ptype->type != type || !ptype->callbacks.gro_receive)
4100                         continue;
4101                 return ptype;
4102         }
4103         return NULL;
4104 }
4105 EXPORT_SYMBOL(gro_find_receive_by_type);
4106
4107 struct packet_offload *gro_find_complete_by_type(__be16 type)
4108 {
4109         struct list_head *offload_head = &offload_base;
4110         struct packet_offload *ptype;
4111
4112         list_for_each_entry_rcu(ptype, offload_head, list) {
4113                 if (ptype->type != type || !ptype->callbacks.gro_complete)
4114                         continue;
4115                 return ptype;
4116         }
4117         return NULL;
4118 }
4119 EXPORT_SYMBOL(gro_find_complete_by_type);
4120
4121 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4122 {
4123         switch (ret) {
4124         case GRO_NORMAL:
4125                 if (netif_receive_skb_internal(skb))
4126                         ret = GRO_DROP;
4127                 break;
4128
4129         case GRO_DROP:
4130                 kfree_skb(skb);
4131                 break;
4132
4133         case GRO_MERGED_FREE:
4134                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4135                         kmem_cache_free(skbuff_head_cache, skb);
4136                 else
4137                         __kfree_skb(skb);
4138                 break;
4139
4140         case GRO_HELD:
4141         case GRO_MERGED:
4142                 break;
4143         }
4144
4145         return ret;
4146 }
4147
4148 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4149 {
4150         trace_napi_gro_receive_entry(skb);
4151
4152         skb_gro_reset_offset(skb);
4153
4154         return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4155 }
4156 EXPORT_SYMBOL(napi_gro_receive);
4157
4158 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4159 {
4160         __skb_pull(skb, skb_headlen(skb));
4161         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4162         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4163         skb->vlan_tci = 0;
4164         skb->dev = napi->dev;
4165         skb->skb_iif = 0;
4166         skb->encapsulation = 0;
4167         skb_shinfo(skb)->gso_type = 0;
4168         skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4169
4170         napi->skb = skb;
4171 }
4172
4173 struct sk_buff *napi_get_frags(struct napi_struct *napi)
4174 {
4175         struct sk_buff *skb = napi->skb;
4176
4177         if (!skb) {
4178                 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
4179                 napi->skb = skb;
4180         }
4181         return skb;
4182 }
4183 EXPORT_SYMBOL(napi_get_frags);
4184
4185 static gro_result_t napi_frags_finish(struct napi_struct *napi,
4186                                       struct sk_buff *skb,
4187                                       gro_result_t ret)
4188 {
4189         switch (ret) {
4190         case GRO_NORMAL:
4191         case GRO_HELD:
4192                 __skb_push(skb, ETH_HLEN);
4193                 skb->protocol = eth_type_trans(skb, skb->dev);
4194                 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
4195                         ret = GRO_DROP;
4196                 break;
4197
4198         case GRO_DROP:
4199         case GRO_MERGED_FREE:
4200                 napi_reuse_skb(napi, skb);
4201                 break;
4202
4203         case GRO_MERGED:
4204                 break;
4205         }
4206
4207         return ret;
4208 }
4209
4210 /* Upper GRO stack assumes network header starts at gro_offset=0
4211  * Drivers could call both napi_gro_frags() and napi_gro_receive()
4212  * We copy ethernet header into skb->data to have a common layout.
4213  */
4214 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4215 {
4216         struct sk_buff *skb = napi->skb;
4217         const struct ethhdr *eth;
4218         unsigned int hlen = sizeof(*eth);
4219
4220         napi->skb = NULL;
4221
4222         skb_reset_mac_header(skb);
4223         skb_gro_reset_offset(skb);
4224
4225         eth = skb_gro_header_fast(skb, 0);
4226         if (unlikely(skb_gro_header_hard(skb, hlen))) {
4227                 eth = skb_gro_header_slow(skb, hlen, 0);
4228                 if (unlikely(!eth)) {
4229                         napi_reuse_skb(napi, skb);
4230                         return NULL;
4231                 }
4232         } else {
4233                 gro_pull_from_frag0(skb, hlen);
4234                 NAPI_GRO_CB(skb)->frag0 += hlen;
4235                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
4236         }
4237         __skb_pull(skb, hlen);
4238
4239         /*
4240          * This works because the only protocols we care about don't require
4241          * special handling.
4242          * We'll fix it up properly in napi_frags_finish()
4243          */
4244         skb->protocol = eth->h_proto;
4245
4246         return skb;
4247 }
4248
4249 gro_result_t napi_gro_frags(struct napi_struct *napi)
4250 {
4251         struct sk_buff *skb = napi_frags_skb(napi);
4252
4253         if (!skb)
4254                 return GRO_DROP;
4255
4256         trace_napi_gro_frags_entry(skb);
4257
4258         return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4259 }
4260 EXPORT_SYMBOL(napi_gro_frags);
4261
4262 /* Compute the checksum from gro_offset and return the folded value
4263  * after adding in any pseudo checksum.
4264  */
4265 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4266 {
4267         __wsum wsum;
4268         __sum16 sum;
4269
4270         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4271
4272         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4273         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4274         if (likely(!sum)) {
4275                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4276                     !skb->csum_complete_sw)
4277                         netdev_rx_csum_fault(skb->dev);
4278         }
4279
4280         NAPI_GRO_CB(skb)->csum = wsum;
4281         NAPI_GRO_CB(skb)->csum_valid = 1;
4282
4283         return sum;
4284 }
4285 EXPORT_SYMBOL(__skb_gro_checksum_complete);
4286
4287 /*
4288  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4289  * Note: called with local irq disabled, but exits with local irq enabled.
4290  */
4291 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4292 {
4293 #ifdef CONFIG_RPS
4294         struct softnet_data *remsd = sd->rps_ipi_list;
4295
4296         if (remsd) {
4297                 sd->rps_ipi_list = NULL;
4298
4299                 local_irq_enable();
4300
4301                 /* Send pending IPI's to kick RPS processing on remote cpus. */
4302                 while (remsd) {
4303                         struct softnet_data *next = remsd->rps_ipi_next;
4304
4305                         if (cpu_online(remsd->cpu))
4306                                 smp_call_function_single_async(remsd->cpu,
4307                                                            &remsd->csd);
4308                         remsd = next;
4309                 }
4310         } else
4311 #endif
4312                 local_irq_enable();
4313 }
4314
4315 static int process_backlog(struct napi_struct *napi, int quota)
4316 {
4317         int work = 0;
4318         struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4319
4320 #ifdef CONFIG_RPS
4321         /* Check if we have pending ipi, its better to send them now,
4322          * not waiting net_rx_action() end.
4323          */
4324         if (sd->rps_ipi_list) {
4325                 local_irq_disable();
4326                 net_rps_action_and_irq_enable(sd);
4327         }
4328 #endif
4329         napi->weight = weight_p;
4330         local_irq_disable();
4331         while (1) {
4332                 struct sk_buff *skb;
4333
4334                 while ((skb = __skb_dequeue(&sd->process_queue))) {
4335                         local_irq_enable();
4336                         __netif_receive_skb(skb);
4337                         local_irq_disable();
4338                         input_queue_head_incr(sd);
4339                         if (++work >= quota) {
4340                                 local_irq_enable();
4341                                 return work;
4342                         }
4343                 }
4344
4345                 rps_lock(sd);
4346                 if (skb_queue_empty(&sd->input_pkt_queue)) {
4347                         /*
4348                          * Inline a custom version of __napi_complete().
4349                          * only current cpu owns and manipulates this napi,
4350                          * and NAPI_STATE_SCHED is the only possible flag set
4351                          * on backlog.
4352                          * We can use a plain write instead of clear_bit(),
4353                          * and we dont need an smp_mb() memory barrier.
4354                          */
4355                         list_del(&napi->poll_list);
4356                         napi->state = 0;
4357                         rps_unlock(sd);
4358
4359                         break;
4360                 }
4361
4362                 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4363                                            &sd->process_queue);
4364                 rps_unlock(sd);
4365         }
4366         local_irq_enable();
4367
4368         return work;
4369 }
4370
4371 /**
4372  * __napi_schedule - schedule for receive
4373  * @n: entry to schedule
4374  *
4375  * The entry's receive function will be scheduled to run
4376  */
4377 void __napi_schedule(struct napi_struct *n)
4378 {
4379         unsigned long flags;
4380
4381         local_irq_save(flags);
4382         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4383         local_irq_restore(flags);
4384 }
4385 EXPORT_SYMBOL(__napi_schedule);
4386
4387 void __napi_complete(struct napi_struct *n)
4388 {
4389         BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4390         BUG_ON(n->gro_list);
4391
4392         list_del(&n->poll_list);
4393         smp_mb__before_atomic();
4394         clear_bit(NAPI_STATE_SCHED, &n->state);
4395 }
4396 EXPORT_SYMBOL(__napi_complete);
4397
4398 void napi_complete(struct napi_struct *n)
4399 {
4400         unsigned long flags;
4401
4402         /*
4403          * don't let napi dequeue from the cpu poll list
4404          * just in case its running on a different cpu
4405          */
4406         if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4407                 return;
4408
4409         napi_gro_flush(n, false);
4410         local_irq_save(flags);
4411         __napi_complete(n);
4412         local_irq_restore(flags);
4413 }
4414 EXPORT_SYMBOL(napi_complete);
4415
4416 /* must be called under rcu_read_lock(), as we dont take a reference */
4417 struct napi_struct *napi_by_id(unsigned int napi_id)
4418 {
4419         unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4420         struct napi_struct *napi;
4421
4422         hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4423                 if (napi->napi_id == napi_id)
4424                         return napi;
4425
4426         return NULL;
4427 }
4428 EXPORT_SYMBOL_GPL(napi_by_id);
4429
4430 void napi_hash_add(struct napi_struct *napi)
4431 {
4432         if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4433
4434                 spin_lock(&napi_hash_lock);
4435
4436                 /* 0 is not a valid id, we also skip an id that is taken
4437                  * we expect both events to be extremely rare
4438                  */
4439                 napi->napi_id = 0;
4440                 while (!napi->napi_id) {
4441                         napi->napi_id = ++napi_gen_id;
4442                         if (napi_by_id(napi->napi_id))
4443                                 napi->napi_id = 0;
4444                 }
4445
4446                 hlist_add_head_rcu(&napi->napi_hash_node,
4447                         &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4448
4449                 spin_unlock(&napi_hash_lock);
4450         }
4451 }
4452 EXPORT_SYMBOL_GPL(napi_hash_add);
4453
4454 /* Warning : caller is responsible to make sure rcu grace period
4455  * is respected before freeing memory containing @napi
4456  */
4457 void napi_hash_del(struct napi_struct *napi)
4458 {
4459         spin_lock(&napi_hash_lock);
4460
4461         if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4462                 hlist_del_rcu(&napi->napi_hash_node);
4463
4464         spin_unlock(&napi_hash_lock);
4465 }
4466 EXPORT_SYMBOL_GPL(napi_hash_del);
4467
4468 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4469                     int (*poll)(struct napi_struct *, int), int weight)
4470 {
4471         INIT_LIST_HEAD(&napi->poll_list);
4472         napi->gro_count = 0;
4473         napi->gro_list = NULL;
4474         napi->skb = NULL;
4475         napi->poll = poll;
4476         if (weight > NAPI_POLL_WEIGHT)
4477                 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4478                             weight, dev->name);
4479         napi->weight = weight;
4480         list_add(&napi->dev_list, &dev->napi_list);
4481         napi->dev = dev;
4482 #ifdef CONFIG_NETPOLL
4483         spin_lock_init(&napi->poll_lock);
4484         napi->poll_owner = -1;
4485 #endif
4486         set_bit(NAPI_STATE_SCHED, &napi->state);
4487 }
4488 EXPORT_SYMBOL(netif_napi_add);
4489
4490 void netif_napi_del(struct napi_struct *napi)
4491 {
4492         list_del_init(&napi->dev_list);
4493         napi_free_frags(napi);
4494
4495         kfree_skb_list(napi->gro_list);
4496         napi->gro_list = NULL;
4497         napi->gro_count = 0;
4498 }
4499 EXPORT_SYMBOL(netif_napi_del);
4500
4501 static void net_rx_action(struct softirq_action *h)
4502 {
4503         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4504         unsigned long time_limit = jiffies + 2;
4505         int budget = netdev_budget;
4506         void *have;
4507
4508         local_irq_disable();
4509
4510         while (!list_empty(&sd->poll_list)) {
4511                 struct napi_struct *n;
4512                 int work, weight;
4513
4514                 /* If softirq window is exhuasted then punt.
4515                  * Allow this to run for 2 jiffies since which will allow
4516                  * an average latency of 1.5/HZ.
4517                  */
4518                 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
4519                         goto softnet_break;
4520
4521                 local_irq_enable();
4522
4523                 /* Even though interrupts have been re-enabled, this
4524                  * access is safe because interrupts can only add new
4525                  * entries to the tail of this list, and only ->poll()
4526                  * calls can remove this head entry from the list.
4527                  */
4528                 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
4529
4530                 have = netpoll_poll_lock(n);
4531
4532                 weight = n->weight;
4533
4534                 /* This NAPI_STATE_SCHED test is for avoiding a race
4535                  * with netpoll's poll_napi().  Only the entity which
4536                  * obtains the lock and sees NAPI_STATE_SCHED set will
4537                  * actually make the ->poll() call.  Therefore we avoid
4538                  * accidentally calling ->poll() when NAPI is not scheduled.
4539                  */
4540                 work = 0;
4541                 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4542                         work = n->poll(n, weight);
4543                         trace_napi_poll(n);
4544                 }
4545
4546                 WARN_ON_ONCE(work > weight);
4547
4548                 budget -= work;
4549
4550                 local_irq_disable();
4551
4552                 /* Drivers must not modify the NAPI state if they
4553                  * consume the entire weight.  In such cases this code
4554                  * still "owns" the NAPI instance and therefore can
4555                  * move the instance around on the list at-will.
4556                  */
4557                 if (unlikely(work == weight)) {
4558                         if (unlikely(napi_disable_pending(n))) {
4559                                 local_irq_enable();
4560                                 napi_complete(n);
4561                                 local_irq_disable();
4562                         } else {
4563                                 if (n->gro_list) {
4564                                         /* flush too old packets
4565                                          * If HZ < 1000, flush all packets.
4566                                          */
4567                                         local_irq_enable();
4568                                         napi_gro_flush(n, HZ >= 1000);
4569                                         local_irq_disable();
4570                                 }
4571                                 list_move_tail(&n->poll_list, &sd->poll_list);
4572                         }
4573                 }
4574
4575                 netpoll_poll_unlock(have);
4576         }
4577 out:
4578         net_rps_action_and_irq_enable(sd);
4579
4580         return;
4581
4582 softnet_break:
4583         sd->time_squeeze++;
4584         __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4585         goto out;
4586 }
4587
4588 struct netdev_adjacent {
4589         struct net_device *dev;
4590
4591         /* upper master flag, there can only be one master device per list */
4592         bool master;
4593
4594         /* counter for the number of times this device was added to us */
4595         u16 ref_nr;
4596
4597         /* private field for the users */
4598         void *private;
4599
4600         struct list_head list;
4601         struct rcu_head rcu;
4602 };
4603
4604 static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4605                                                  struct net_device *adj_dev,
4606                                                  struct list_head *adj_list)
4607 {
4608         struct netdev_adjacent *adj;
4609
4610         list_for_each_entry(adj, adj_list, list) {
4611                 if (adj->dev == adj_dev)
4612                         return adj;
4613         }
4614         return NULL;
4615 }
4616
4617 /**
4618  * netdev_has_upper_dev - Check if device is linked to an upper device
4619  * @dev: device
4620  * @upper_dev: upper device to check
4621  *
4622  * Find out if a device is linked to specified upper device and return true
4623  * in case it is. Note that this checks only immediate upper device,
4624  * not through a complete stack of devices. The caller must hold the RTNL lock.
4625  */
4626 bool netdev_has_upper_dev(struct net_device *dev,
4627                           struct net_device *upper_dev)
4628 {
4629         ASSERT_RTNL();
4630
4631         return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
4632 }
4633 EXPORT_SYMBOL(netdev_has_upper_dev);
4634
4635 /**
4636  * netdev_has_any_upper_dev - Check if device is linked to some device
4637  * @dev: device
4638  *
4639  * Find out if a device is linked to an upper device and return true in case
4640  * it is. The caller must hold the RTNL lock.
4641  */
4642 static bool netdev_has_any_upper_dev(struct net_device *dev)
4643 {
4644         ASSERT_RTNL();
4645
4646         return !list_empty(&dev->all_adj_list.upper);
4647 }
4648
4649 /**
4650  * netdev_master_upper_dev_get - Get master upper device
4651  * @dev: device
4652  *
4653  * Find a master upper device and return pointer to it or NULL in case
4654  * it's not there. The caller must hold the RTNL lock.
4655  */
4656 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4657 {
4658         struct netdev_adjacent *upper;
4659
4660         ASSERT_RTNL();
4661
4662         if (list_empty(&dev->adj_list.upper))
4663                 return NULL;
4664
4665         upper = list_first_entry(&dev->adj_list.upper,
4666                                  struct netdev_adjacent, list);
4667         if (likely(upper->master))
4668                 return upper->dev;
4669         return NULL;
4670 }
4671 EXPORT_SYMBOL(netdev_master_upper_dev_get);
4672
4673 void *netdev_adjacent_get_private(struct list_head *adj_list)
4674 {
4675         struct netdev_adjacent *adj;
4676
4677         adj = list_entry(adj_list, struct netdev_adjacent, list);
4678
4679         return adj->private;
4680 }
4681 EXPORT_SYMBOL(netdev_adjacent_get_private);
4682
4683 /**
4684  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4685  * @dev: device
4686  * @iter: list_head ** of the current position
4687  *
4688  * Gets the next device from the dev's upper list, starting from iter
4689  * position. The caller must hold RCU read lock.
4690  */
4691 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4692                                                  struct list_head **iter)
4693 {
4694         struct netdev_adjacent *upper;
4695
4696         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4697
4698         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4699
4700         if (&upper->list == &dev->adj_list.upper)
4701                 return NULL;
4702
4703         *iter = &upper->list;
4704
4705         return upper->dev;
4706 }
4707 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4708
4709 /**
4710  * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4711  * @dev: device
4712  * @iter: list_head ** of the current position
4713  *
4714  * Gets the next device from the dev's upper list, starting from iter
4715  * position. The caller must hold RCU read lock.
4716  */
4717 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4718                                                      struct list_head **iter)
4719 {
4720         struct netdev_adjacent *upper;
4721
4722         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4723
4724         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4725
4726         if (&upper->list == &dev->all_adj_list.upper)
4727                 return NULL;
4728
4729         *iter = &upper->list;
4730
4731         return upper->dev;
4732 }
4733 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
4734
4735 /**
4736  * netdev_lower_get_next_private - Get the next ->private from the
4737  *                                 lower neighbour list
4738  * @dev: device
4739  * @iter: list_head ** of the current position
4740  *
4741  * Gets the next netdev_adjacent->private from the dev's lower neighbour
4742  * list, starting from iter position. The caller must hold either hold the
4743  * RTNL lock or its own locking that guarantees that the neighbour lower
4744  * list will remain unchainged.
4745  */
4746 void *netdev_lower_get_next_private(struct net_device *dev,
4747                                     struct list_head **iter)
4748 {
4749         struct netdev_adjacent *lower;
4750
4751         lower = list_entry(*iter, struct netdev_adjacent, list);
4752
4753         if (&lower->list == &dev->adj_list.lower)
4754                 return NULL;
4755
4756         *iter = lower->list.next;
4757
4758         return lower->private;
4759 }
4760 EXPORT_SYMBOL(netdev_lower_get_next_private);
4761
4762 /**
4763  * netdev_lower_get_next_private_rcu - Get the next ->private from the
4764  *                                     lower neighbour list, RCU
4765  *                                     variant
4766  * @dev: device
4767  * @iter: list_head ** of the current position
4768  *
4769  * Gets the next netdev_adjacent->private from the dev's lower neighbour
4770  * list, starting from iter position. The caller must hold RCU read lock.
4771  */
4772 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4773                                         struct list_head **iter)
4774 {
4775         struct netdev_adjacent *lower;
4776
4777         WARN_ON_ONCE(!rcu_read_lock_held());
4778
4779         lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4780
4781         if (&lower->list == &dev->adj_list.lower)
4782                 return NULL;
4783
4784         *iter = &lower->list;
4785
4786         return lower->private;
4787 }
4788 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4789
4790 /**
4791  * netdev_lower_get_next - Get the next device from the lower neighbour
4792  *                         list
4793  * @dev: device
4794  * @iter: list_head ** of the current position
4795  *
4796  * Gets the next netdev_adjacent from the dev's lower neighbour
4797  * list, starting from iter position. The caller must hold RTNL lock or
4798  * its own locking that guarantees that the neighbour lower
4799  * list will remain unchainged.
4800  */
4801 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4802 {
4803         struct netdev_adjacent *lower;
4804
4805         lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4806
4807         if (&lower->list == &dev->adj_list.lower)
4808                 return NULL;
4809
4810         *iter = &lower->list;
4811
4812         return lower->dev;
4813 }
4814 EXPORT_SYMBOL(netdev_lower_get_next);
4815
4816 /**
4817  * netdev_lower_get_first_private_rcu - Get the first ->private from the
4818  *                                     lower neighbour list, RCU
4819  *                                     variant
4820  * @dev: device
4821  *
4822  * Gets the first netdev_adjacent->private from the dev's lower neighbour
4823  * list. The caller must hold RCU read lock.
4824  */
4825 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4826 {
4827         struct netdev_adjacent *lower;
4828
4829         lower = list_first_or_null_rcu(&dev->adj_list.lower,
4830                         struct netdev_adjacent, list);
4831         if (lower)
4832                 return lower->private;
4833         return NULL;
4834 }
4835 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4836
4837 /**
4838  * netdev_master_upper_dev_get_rcu - Get master upper device
4839  * @dev: device
4840  *
4841  * Find a master upper device and return pointer to it or NULL in case
4842  * it's not there. The caller must hold the RCU read lock.
4843  */
4844 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4845 {
4846         struct netdev_adjacent *upper;
4847
4848         upper = list_first_or_null_rcu(&dev->adj_list.upper,
4849                                        struct netdev_adjacent, list);
4850         if (upper && likely(upper->master))
4851                 return upper->dev;
4852         return NULL;
4853 }
4854 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4855
4856 static int netdev_adjacent_sysfs_add(struct net_device *dev,
4857                               struct net_device *adj_dev,
4858                               struct list_head *dev_list)
4859 {
4860         char linkname[IFNAMSIZ+7];
4861         sprintf(linkname, dev_list == &dev->adj_list.upper ?
4862                 "upper_%s" : "lower_%s", adj_dev->name);
4863         return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4864                                  linkname);
4865 }
4866 static void netdev_adjacent_sysfs_del(struct net_device *dev,
4867                                char *name,
4868                                struct list_head *dev_list)
4869 {
4870         char linkname[IFNAMSIZ+7];
4871         sprintf(linkname, dev_list == &dev->adj_list.upper ?
4872                 "upper_%s" : "lower_%s", name);
4873         sysfs_remove_link(&(dev->dev.kobj), linkname);
4874 }
4875
4876 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
4877                                                  struct net_device *adj_dev,
4878                                                  struct list_head *dev_list)
4879 {
4880         return (dev_list == &dev->adj_list.upper ||
4881                 dev_list == &dev->adj_list.lower) &&
4882                 net_eq(dev_net(dev), dev_net(adj_dev));
4883 }
4884
4885 static int __netdev_adjacent_dev_insert(struct net_device *dev,
4886                                         struct net_device *adj_dev,
4887                                         struct list_head *dev_list,
4888                                         void *private, bool master)
4889 {
4890         struct netdev_adjacent *adj;
4891         int ret;
4892
4893         adj = __netdev_find_adj(dev, adj_dev, dev_list);
4894
4895         if (adj) {
4896                 adj->ref_nr++;
4897                 return 0;
4898         }
4899
4900         adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4901         if (!adj)
4902                 return -ENOMEM;
4903
4904         adj->dev = adj_dev;
4905         adj->master = master;
4906         adj->ref_nr = 1;
4907         adj->private = private;
4908         dev_hold(adj_dev);
4909
4910         pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4911                  adj_dev->name, dev->name, adj_dev->name);
4912
4913         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
4914                 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
4915                 if (ret)
4916                         goto free_adj;
4917         }
4918
4919         /* Ensure that master link is always the first item in list. */
4920         if (master) {
4921                 ret = sysfs_create_link(&(dev->dev.kobj),
4922                                         &(adj_dev->dev.kobj), "master");
4923                 if (ret)
4924                         goto remove_symlinks;
4925
4926                 list_add_rcu(&adj->list, dev_list);
4927         } else {
4928                 list_add_tail_rcu(&adj->list, dev_list);
4929         }
4930
4931         return 0;
4932
4933 remove_symlinks:
4934         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
4935                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
4936 free_adj:
4937         kfree(adj);
4938         dev_put(adj_dev);
4939
4940         return ret;
4941 }
4942
4943 static void __netdev_adjacent_dev_remove(struct net_device *dev,
4944                                          struct net_device *adj_dev,
4945                                          struct list_head *dev_list)
4946 {
4947         struct netdev_adjacent *adj;
4948
4949         adj = __netdev_find_adj(dev, adj_dev, dev_list);
4950
4951         if (!adj) {
4952                 pr_err("tried to remove device %s from %s\n",
4953                        dev->name, adj_dev->name);
4954                 BUG();
4955         }
4956
4957         if (adj->ref_nr > 1) {
4958                 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
4959                          adj->ref_nr-1);
4960                 adj->ref_nr--;
4961                 return;
4962         }
4963
4964         if (adj->master)
4965                 sysfs_remove_link(&(dev->dev.kobj), "master");
4966
4967         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
4968                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
4969
4970         list_del_rcu(&adj->list);
4971         pr_debug("dev_put for %s, because link removed from %s to %s\n",
4972                  adj_dev->name, dev->name, adj_dev->name);
4973         dev_put(adj_dev);
4974         kfree_rcu(adj, rcu);
4975 }
4976
4977 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
4978                                             struct net_device *upper_dev,
4979                                             struct list_head *up_list,
4980                                             struct list_head *down_list,
4981                                             void *private, bool master)
4982 {
4983         int ret;
4984
4985         ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
4986                                            master);
4987         if (ret)
4988                 return ret;
4989
4990         ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
4991                                            false);
4992         if (ret) {
4993                 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
4994                 return ret;
4995         }
4996
4997         return 0;
4998 }
4999
5000 static int __netdev_adjacent_dev_link(struct net_device *dev,
5001                                       struct net_device *upper_dev)
5002 {
5003         return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5004                                                 &dev->all_adj_list.upper,
5005                                                 &upper_dev->all_adj_list.lower,
5006                                                 NULL, false);
5007 }
5008
5009 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5010                                                struct net_device *upper_dev,
5011                                                struct list_head *up_list,
5012                                                struct list_head *down_list)
5013 {
5014         __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5015         __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5016 }
5017
5018 static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5019                                          struct net_device *upper_dev)
5020 {
5021         __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5022                                            &dev->all_adj_list.upper,
5023                                            &upper_dev->all_adj_list.lower);
5024 }
5025
5026 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5027                                                 struct net_device *upper_dev,
5028                                                 void *private, bool master)
5029 {
5030         int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5031
5032         if (ret)
5033                 return ret;
5034
5035         ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5036                                                &dev->adj_list.upper,
5037                                                &upper_dev->adj_list.lower,
5038                                                private, master);
5039         if (ret) {
5040                 __netdev_adjacent_dev_unlink(dev, upper_dev);
5041                 return ret;
5042         }
5043
5044         return 0;
5045 }
5046
5047 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5048                                                    struct net_device *upper_dev)
5049 {
5050         __netdev_adjacent_dev_unlink(dev, upper_dev);
5051         __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5052                                            &dev->adj_list.upper,
5053                                            &upper_dev->adj_list.lower);
5054 }
5055
5056 static int __netdev_upper_dev_link(struct net_device *dev,
5057                                    struct net_device *upper_dev, bool master,
5058                                    void *private)
5059 {
5060         struct netdev_adjacent *i, *j, *to_i, *to_j;
5061         int ret = 0;
5062
5063         ASSERT_RTNL();
5064
5065         if (dev == upper_dev)
5066                 return -EBUSY;
5067
5068         /* To prevent loops, check if dev is not upper device to upper_dev. */
5069         if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
5070                 return -EBUSY;
5071
5072         if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
5073                 return -EEXIST;
5074
5075         if (master && netdev_master_upper_dev_get(dev))
5076                 return -EBUSY;
5077
5078         ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5079                                                    master);
5080         if (ret)
5081                 return ret;
5082
5083         /* Now that we linked these devs, make all the upper_dev's
5084          * all_adj_list.upper visible to every dev's all_adj_list.lower an
5085          * versa, and don't forget the devices itself. All of these
5086          * links are non-neighbours.
5087          */
5088         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5089                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5090                         pr_debug("Interlinking %s with %s, non-neighbour\n",
5091                                  i->dev->name, j->dev->name);
5092                         ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5093                         if (ret)
5094                                 goto rollback_mesh;
5095                 }
5096         }
5097
5098         /* add dev to every upper_dev's upper device */
5099         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5100                 pr_debug("linking %s's upper device %s with %s\n",
5101                          upper_dev->name, i->dev->name, dev->name);
5102                 ret = __netdev_adjacent_dev_link(dev, i->dev);
5103                 if (ret)
5104                         goto rollback_upper_mesh;
5105         }
5106
5107         /* add upper_dev to every dev's lower device */
5108         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5109                 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5110                          i->dev->name, upper_dev->name);
5111                 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5112                 if (ret)
5113                         goto rollback_lower_mesh;
5114         }
5115
5116         call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5117         return 0;
5118
5119 rollback_lower_mesh:
5120         to_i = i;
5121         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5122                 if (i == to_i)
5123                         break;
5124                 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5125         }
5126
5127         i = NULL;
5128
5129 rollback_upper_mesh:
5130         to_i = i;
5131         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5132                 if (i == to_i)
5133                         break;
5134                 __netdev_adjacent_dev_unlink(dev, i->dev);
5135         }
5136
5137         i = j = NULL;
5138
5139 rollback_mesh:
5140         to_i = i;
5141         to_j = j;
5142         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5143                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5144                         if (i == to_i && j == to_j)
5145                                 break;
5146                         __netdev_adjacent_dev_unlink(i->dev, j->dev);
5147                 }
5148                 if (i == to_i)
5149                         break;
5150         }
5151
5152         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5153
5154         return ret;
5155 }
5156
5157 /**
5158  * netdev_upper_dev_link - Add a link to the upper device
5159  * @dev: device
5160  * @upper_dev: new upper device
5161  *
5162  * Adds a link to device which is upper to this one. The caller must hold
5163  * the RTNL lock. On a failure a negative errno code is returned.
5164  * On success the reference counts are adjusted and the function
5165  * returns zero.
5166  */
5167 int netdev_upper_dev_link(struct net_device *dev,
5168                           struct net_device *upper_dev)
5169 {
5170         return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
5171 }
5172 EXPORT_SYMBOL(netdev_upper_dev_link);
5173
5174 /**
5175  * netdev_master_upper_dev_link - Add a master link to the upper device
5176  * @dev: device
5177  * @upper_dev: new upper device
5178  *
5179  * Adds a link to device which is upper to this one. In this case, only
5180  * one master upper device can be linked, although other non-master devices
5181  * might be linked as well. The caller must hold the RTNL lock.
5182  * On a failure a negative errno code is returned. On success the reference
5183  * counts are adjusted and the function returns zero.
5184  */
5185 int netdev_master_upper_dev_link(struct net_device *dev,
5186                                  struct net_device *upper_dev)
5187 {
5188         return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
5189 }
5190 EXPORT_SYMBOL(netdev_master_upper_dev_link);
5191
5192 int netdev_master_upper_dev_link_private(struct net_device *dev,
5193                                          struct net_device *upper_dev,
5194                                          void *private)
5195 {
5196         return __netdev_upper_dev_link(dev, upper_dev, true, private);
5197 }
5198 EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5199
5200 /**
5201  * netdev_upper_dev_unlink - Removes a link to upper device
5202  * @dev: device
5203  * @upper_dev: new upper device
5204  *
5205  * Removes a link to device which is upper to this one. The caller must hold
5206  * the RTNL lock.
5207  */
5208 void netdev_upper_dev_unlink(struct net_device *dev,
5209                              struct net_device *upper_dev)
5210 {
5211         struct netdev_adjacent *i, *j;
5212         ASSERT_RTNL();
5213
5214         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5215
5216         /* Here is the tricky part. We must remove all dev's lower
5217          * devices from all upper_dev's upper devices and vice
5218          * versa, to maintain the graph relationship.
5219          */
5220         list_for_each_entry(i, &dev->all_adj_list.lower, list)
5221                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5222                         __netdev_adjacent_dev_unlink(i->dev, j->dev);
5223
5224         /* remove also the devices itself from lower/upper device
5225          * list
5226          */
5227         list_for_each_entry(i, &dev->all_adj_list.lower, list)
5228                 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5229
5230         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5231                 __netdev_adjacent_dev_unlink(dev, i->dev);
5232
5233         call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5234 }
5235 EXPORT_SYMBOL(netdev_upper_dev_unlink);
5236
5237 void netdev_adjacent_add_links(struct net_device *dev)
5238 {
5239         struct netdev_adjacent *iter;
5240
5241         struct net *net = dev_net(dev);
5242
5243         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5244                 if (!net_eq(net,dev_net(iter->dev)))
5245                         continue;
5246                 netdev_adjacent_sysfs_add(iter->dev, dev,
5247                                           &iter->dev->adj_list.lower);
5248                 netdev_adjacent_sysfs_add(dev, iter->dev,
5249                                           &dev->adj_list.upper);
5250         }
5251
5252         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5253                 if (!net_eq(net,dev_net(iter->dev)))
5254                         continue;
5255                 netdev_adjacent_sysfs_add(iter->dev, dev,
5256                                           &iter->dev->adj_list.upper);
5257                 netdev_adjacent_sysfs_add(dev, iter->dev,
5258                                           &dev->adj_list.lower);
5259         }
5260 }
5261
5262 void netdev_adjacent_del_links(struct net_device *dev)
5263 {
5264         struct netdev_adjacent *iter;
5265
5266         struct net *net = dev_net(dev);
5267
5268         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5269                 if (!net_eq(net,dev_net(iter->dev)))
5270                         continue;
5271                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5272                                           &iter->dev->adj_list.lower);
5273                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5274                                           &dev->adj_list.upper);
5275         }
5276
5277         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5278                 if (!net_eq(net,dev_net(iter->dev)))
5279                         continue;
5280                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5281                                           &iter->dev->adj_list.upper);
5282                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5283                                           &dev->adj_list.lower);
5284         }
5285 }
5286
5287 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
5288 {
5289         struct netdev_adjacent *iter;
5290
5291         struct net *net = dev_net(dev);
5292
5293         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5294                 if (!net_eq(net,dev_net(iter->dev)))
5295                         continue;
5296                 netdev_adjacent_sysfs_del(iter->dev, oldname,
5297                                           &iter->dev->adj_list.lower);
5298                 netdev_adjacent_sysfs_add(iter->dev, dev,
5299                                           &iter->dev->adj_list.lower);
5300         }
5301
5302         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5303                 if (!net_eq(net,dev_net(iter->dev)))
5304                         continue;
5305                 netdev_adjacent_sysfs_del(iter->dev, oldname,
5306                                           &iter->dev->adj_list.upper);
5307                 netdev_adjacent_sysfs_add(iter->dev, dev,
5308                                           &iter->dev->adj_list.upper);
5309         }
5310 }
5311
5312 void *netdev_lower_dev_get_private(struct net_device *dev,
5313                                    struct net_device *lower_dev)
5314 {
5315         struct netdev_adjacent *lower;
5316
5317         if (!lower_dev)
5318                 return NULL;
5319         lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5320         if (!lower)
5321                 return NULL;
5322
5323         return lower->private;
5324 }
5325 EXPORT_SYMBOL(netdev_lower_dev_get_private);
5326
5327
5328 int dev_get_nest_level(struct net_device *dev,
5329                        bool (*type_check)(struct net_device *dev))
5330 {
5331         struct net_device *lower = NULL;
5332         struct list_head *iter;
5333         int max_nest = -1;
5334         int nest;
5335
5336         ASSERT_RTNL();
5337
5338         netdev_for_each_lower_dev(dev, lower, iter) {
5339                 nest = dev_get_nest_level(lower, type_check);
5340                 if (max_nest < nest)
5341                         max_nest = nest;
5342         }
5343
5344         if (type_check(dev))
5345                 max_nest++;
5346
5347         return max_nest;
5348 }
5349 EXPORT_SYMBOL(dev_get_nest_level);
5350
5351 static void dev_change_rx_flags(struct net_device *dev, int flags)
5352 {
5353         const struct net_device_ops *ops = dev->netdev_ops;
5354
5355         if (ops->ndo_change_rx_flags)
5356                 ops->ndo_change_rx_flags(dev, flags);
5357 }
5358
5359 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
5360 {
5361         unsigned int old_flags = dev->flags;
5362         kuid_t uid;
5363         kgid_t gid;
5364
5365         ASSERT_RTNL();
5366
5367         dev->flags |= IFF_PROMISC;
5368         dev->promiscuity += inc;
5369         if (dev->promiscuity == 0) {
5370                 /*
5371                  * Avoid overflow.
5372                  * If inc causes overflow, untouch promisc and return error.
5373                  */
5374                 if (inc < 0)
5375                         dev->flags &= ~IFF_PROMISC;
5376                 else {
5377                         dev->promiscuity -= inc;
5378                         pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5379                                 dev->name);
5380                         return -EOVERFLOW;
5381                 }
5382         }
5383         if (dev->flags != old_flags) {
5384                 pr_info("device %s %s promiscuous mode\n",
5385                         dev->name,
5386                         dev->flags & IFF_PROMISC ? "entered" : "left");
5387                 if (audit_enabled) {
5388                         current_uid_gid(&uid, &gid);
5389                         audit_log(current->audit_context, GFP_ATOMIC,
5390                                 AUDIT_ANOM_PROMISCUOUS,
5391                                 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5392                                 dev->name, (dev->flags & IFF_PROMISC),
5393                                 (old_flags & IFF_PROMISC),
5394                                 from_kuid(&init_user_ns, audit_get_loginuid(current)),
5395                                 from_kuid(&init_user_ns, uid),
5396                                 from_kgid(&init_user_ns, gid),
5397                                 audit_get_sessionid(current));
5398                 }
5399
5400                 dev_change_rx_flags(dev, IFF_PROMISC);
5401         }
5402         if (notify)
5403                 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
5404         return 0;
5405 }
5406
5407 /**
5408  *      dev_set_promiscuity     - update promiscuity count on a device
5409  *      @dev: device
5410  *      @inc: modifier
5411  *
5412  *      Add or remove promiscuity from a device. While the count in the device
5413  *      remains above zero the interface remains promiscuous. Once it hits zero
5414  *      the device reverts back to normal filtering operation. A negative inc
5415  *      value is used to drop promiscuity on the device.
5416  *      Return 0 if successful or a negative errno code on error.
5417  */
5418 int dev_set_promiscuity(struct net_device *dev, int inc)
5419 {
5420         unsigned int old_flags = dev->flags;
5421         int err;
5422
5423         err = __dev_set_promiscuity(dev, inc, true);
5424         if (err < 0)
5425                 return err;
5426         if (dev->flags != old_flags)
5427                 dev_set_rx_mode(dev);
5428         return err;
5429 }
5430 EXPORT_SYMBOL(dev_set_promiscuity);
5431
5432 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
5433 {
5434         unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
5435
5436         ASSERT_RTNL();
5437
5438         dev->flags |= IFF_ALLMULTI;
5439         dev->allmulti += inc;
5440         if (dev->allmulti == 0) {
5441                 /*
5442                  * Avoid overflow.
5443                  * If inc causes overflow, untouch allmulti and return error.
5444                  */
5445                 if (inc < 0)
5446                         dev->flags &= ~IFF_ALLMULTI;
5447                 else {
5448                         dev->allmulti -= inc;
5449                         pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5450                                 dev->name);
5451                         return -EOVERFLOW;
5452                 }
5453         }
5454         if (dev->flags ^ old_flags) {
5455                 dev_change_rx_flags(dev, IFF_ALLMULTI);
5456                 dev_set_rx_mode(dev);
5457                 if (notify)
5458                         __dev_notify_flags(dev, old_flags,
5459                                            dev->gflags ^ old_gflags);
5460         }
5461         return 0;
5462 }
5463
5464 /**
5465  *      dev_set_allmulti        - update allmulti count on a device
5466  *      @dev: device
5467  *      @inc: modifier
5468  *
5469  *      Add or remove reception of all multicast frames to a device. While the
5470  *      count in the device remains above zero the interface remains listening
5471  *      to all interfaces. Once it hits zero the device reverts back to normal
5472  *      filtering operation. A negative @inc value is used to drop the counter
5473  *      when releasing a resource needing all multicasts.
5474  *      Return 0 if successful or a negative errno code on error.
5475  */
5476
5477 int dev_set_allmulti(struct net_device *dev, int inc)
5478 {
5479         return __dev_set_allmulti(dev, inc, true);
5480 }
5481 EXPORT_SYMBOL(dev_set_allmulti);
5482
5483 /*
5484  *      Upload unicast and multicast address lists to device and
5485  *      configure RX filtering. When the device doesn't support unicast
5486  *      filtering it is put in promiscuous mode while unicast addresses
5487  *      are present.
5488  */
5489 void __dev_set_rx_mode(struct net_device *dev)
5490 {
5491         const struct net_device_ops *ops = dev->netdev_ops;
5492
5493         /* dev_open will call this function so the list will stay sane. */
5494         if (!(dev->flags&IFF_UP))
5495                 return;
5496
5497         if (!netif_device_present(dev))
5498                 return;
5499
5500         if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
5501                 /* Unicast addresses changes may only happen under the rtnl,
5502                  * therefore calling __dev_set_promiscuity here is safe.
5503                  */
5504                 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
5505                         __dev_set_promiscuity(dev, 1, false);
5506                         dev->uc_promisc = true;
5507                 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
5508                         __dev_set_promiscuity(dev, -1, false);
5509                         dev->uc_promisc = false;
5510                 }
5511         }
5512
5513         if (ops->ndo_set_rx_mode)
5514                 ops->ndo_set_rx_mode(dev);
5515 }
5516
5517 void dev_set_rx_mode(struct net_device *dev)
5518 {
5519         netif_addr_lock_bh(dev);
5520         __dev_set_rx_mode(dev);
5521         netif_addr_unlock_bh(dev);
5522 }
5523
5524 /**
5525  *      dev_get_flags - get flags reported to userspace
5526  *      @dev: device
5527  *
5528  *      Get the combination of flag bits exported through APIs to userspace.
5529  */
5530 unsigned int dev_get_flags(const struct net_device *dev)
5531 {
5532         unsigned int flags;
5533
5534         flags = (dev->flags & ~(IFF_PROMISC |
5535                                 IFF_ALLMULTI |
5536                                 IFF_RUNNING |
5537                                 IFF_LOWER_UP |
5538                                 IFF_DORMANT)) |
5539                 (dev->gflags & (IFF_PROMISC |
5540                                 IFF_ALLMULTI));
5541
5542         if (netif_running(dev)) {
5543                 if (netif_oper_up(dev))
5544                         flags |= IFF_RUNNING;
5545                 if (netif_carrier_ok(dev))
5546                         flags |= IFF_LOWER_UP;
5547                 if (netif_dormant(dev))
5548                         flags |= IFF_DORMANT;
5549         }
5550
5551         return flags;
5552 }
5553 EXPORT_SYMBOL(dev_get_flags);
5554
5555 int __dev_change_flags(struct net_device *dev, unsigned int flags)
5556 {
5557         unsigned int old_flags = dev->flags;
5558         int ret;
5559
5560         ASSERT_RTNL();
5561
5562         /*
5563          *      Set the flags on our device.
5564          */
5565
5566         dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5567                                IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5568                                IFF_AUTOMEDIA)) |
5569                      (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5570                                     IFF_ALLMULTI));
5571
5572         /*
5573          *      Load in the correct multicast list now the flags have changed.
5574          */
5575
5576         if ((old_flags ^ flags) & IFF_MULTICAST)
5577                 dev_change_rx_flags(dev, IFF_MULTICAST);
5578
5579         dev_set_rx_mode(dev);
5580
5581         /*
5582          *      Have we downed the interface. We handle IFF_UP ourselves
5583          *      according to user attempts to set it, rather than blindly
5584          *      setting it.
5585          */
5586
5587         ret = 0;
5588         if ((old_flags ^ flags) & IFF_UP)
5589                 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
5590
5591         if ((flags ^ dev->gflags) & IFF_PROMISC) {
5592                 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5593                 unsigned int old_flags = dev->flags;
5594
5595                 dev->gflags ^= IFF_PROMISC;
5596
5597                 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5598                         if (dev->flags != old_flags)
5599                                 dev_set_rx_mode(dev);
5600         }
5601
5602         /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5603            is important. Some (broken) drivers set IFF_PROMISC, when
5604            IFF_ALLMULTI is requested not asking us and not reporting.
5605          */
5606         if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
5607                 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5608
5609                 dev->gflags ^= IFF_ALLMULTI;
5610                 __dev_set_allmulti(dev, inc, false);
5611         }
5612
5613         return ret;
5614 }
5615
5616 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5617                         unsigned int gchanges)
5618 {
5619         unsigned int changes = dev->flags ^ old_flags;
5620
5621         if (gchanges)
5622                 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
5623
5624         if (changes & IFF_UP) {
5625                 if (dev->flags & IFF_UP)
5626                         call_netdevice_notifiers(NETDEV_UP, dev);
5627                 else
5628                         call_netdevice_notifiers(NETDEV_DOWN, dev);
5629         }
5630
5631         if (dev->flags & IFF_UP &&
5632             (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5633                 struct netdev_notifier_change_info change_info;
5634
5635                 change_info.flags_changed = changes;
5636                 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5637                                               &change_info.info);
5638         }
5639 }
5640
5641 /**
5642  *      dev_change_flags - change device settings
5643  *      @dev: device
5644  *      @flags: device state flags
5645  *
5646  *      Change settings on device based state flags. The flags are
5647  *      in the userspace exported format.
5648  */
5649 int dev_change_flags(struct net_device *dev, unsigned int flags)
5650 {
5651         int ret;
5652         unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
5653
5654         ret = __dev_change_flags(dev, flags);
5655         if (ret < 0)
5656                 return ret;
5657
5658         changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
5659         __dev_notify_flags(dev, old_flags, changes);
5660         return ret;
5661 }
5662 EXPORT_SYMBOL(dev_change_flags);
5663
5664 static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5665 {
5666         const struct net_device_ops *ops = dev->netdev_ops;
5667
5668         if (ops->ndo_change_mtu)
5669                 return ops->ndo_change_mtu(dev, new_mtu);
5670
5671         dev->mtu = new_mtu;
5672         return 0;
5673 }
5674
5675 /**
5676  *      dev_set_mtu - Change maximum transfer unit
5677  *      @dev: device
5678  *      @new_mtu: new transfer unit
5679  *
5680  *      Change the maximum transfer size of the network device.
5681  */
5682 int dev_set_mtu(struct net_device *dev, int new_mtu)
5683 {
5684         int err, orig_mtu;
5685
5686         if (new_mtu == dev->mtu)
5687                 return 0;
5688
5689         /*      MTU must be positive.    */
5690         if (new_mtu < 0)
5691                 return -EINVAL;
5692
5693         if (!netif_device_present(dev))
5694                 return -ENODEV;
5695
5696         err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5697         err = notifier_to_errno(err);
5698         if (err)
5699                 return err;
5700
5701         orig_mtu = dev->mtu;
5702         err = __dev_set_mtu(dev, new_mtu);
5703
5704         if (!err) {
5705                 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5706                 err = notifier_to_errno(err);
5707                 if (err) {
5708                         /* setting mtu back and notifying everyone again,
5709                          * so that they have a chance to revert changes.
5710                          */
5711                         __dev_set_mtu(dev, orig_mtu);
5712                         call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5713                 }
5714         }
5715         return err;
5716 }
5717 EXPORT_SYMBOL(dev_set_mtu);
5718
5719 /**
5720  *      dev_set_group - Change group this device belongs to
5721  *      @dev: device
5722  *      @new_group: group this device should belong to
5723  */
5724 void dev_set_group(struct net_device *dev, int new_group)
5725 {
5726         dev->group = new_group;
5727 }
5728 EXPORT_SYMBOL(dev_set_group);
5729
5730 /**
5731  *      dev_set_mac_address - Change Media Access Control Address
5732  *      @dev: device
5733  *      @sa: new address
5734  *
5735  *      Change the hardware (MAC) address of the device
5736  */
5737 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5738 {
5739         const struct net_device_ops *ops = dev->netdev_ops;
5740         int err;
5741
5742         if (!ops->ndo_set_mac_address)
5743                 return -EOPNOTSUPP;
5744         if (sa->sa_family != dev->type)
5745                 return -EINVAL;
5746         if (!netif_device_present(dev))
5747                 return -ENODEV;
5748         err = ops->ndo_set_mac_address(dev, sa);
5749         if (err)
5750                 return err;
5751         dev->addr_assign_type = NET_ADDR_SET;
5752         call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5753         add_device_randomness(dev->dev_addr, dev->addr_len);
5754         return 0;
5755 }
5756 EXPORT_SYMBOL(dev_set_mac_address);
5757
5758 /**
5759  *      dev_change_carrier - Change device carrier
5760  *      @dev: device
5761  *      @new_carrier: new value
5762  *
5763  *      Change device carrier
5764  */
5765 int dev_change_carrier(struct net_device *dev, bool new_carrier)
5766 {
5767         const struct net_device_ops *ops = dev->netdev_ops;
5768
5769         if (!ops->ndo_change_carrier)
5770                 return -EOPNOTSUPP;
5771         if (!netif_device_present(dev))
5772                 return -ENODEV;
5773         return ops->ndo_change_carrier(dev, new_carrier);
5774 }
5775 EXPORT_SYMBOL(dev_change_carrier);
5776
5777 /**
5778  *      dev_get_phys_port_id - Get device physical port ID
5779  *      @dev: device
5780  *      @ppid: port ID
5781  *
5782  *      Get device physical port ID
5783  */
5784 int dev_get_phys_port_id(struct net_device *dev,
5785                          struct netdev_phys_port_id *ppid)
5786 {
5787         const struct net_device_ops *ops = dev->netdev_ops;
5788
5789         if (!ops->ndo_get_phys_port_id)
5790                 return -EOPNOTSUPP;
5791         return ops->ndo_get_phys_port_id(dev, ppid);
5792 }
5793 EXPORT_SYMBOL(dev_get_phys_port_id);
5794
5795 /**
5796  *      dev_new_index   -       allocate an ifindex
5797  *      @net: the applicable net namespace
5798  *
5799  *      Returns a suitable unique value for a new device interface
5800  *      number.  The caller must hold the rtnl semaphore or the
5801  *      dev_base_lock to be sure it remains unique.
5802  */
5803 static int dev_new_index(struct net *net)
5804 {
5805         int ifindex = net->ifindex;
5806         for (;;) {
5807                 if (++ifindex <= 0)
5808                         ifindex = 1;
5809                 if (!__dev_get_by_index(net, ifindex))
5810                         return net->ifindex = ifindex;
5811         }
5812 }
5813
5814 /* Delayed registration/unregisteration */
5815 static LIST_HEAD(net_todo_list);
5816 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5817
5818 static void net_set_todo(struct net_device *dev)
5819 {
5820         list_add_tail(&dev->todo_list, &net_todo_list);
5821         dev_net(dev)->dev_unreg_count++;
5822 }
5823
5824 static void rollback_registered_many(struct list_head *head)
5825 {
5826         struct net_device *dev, *tmp;
5827         LIST_HEAD(close_head);
5828
5829         BUG_ON(dev_boot_phase);
5830         ASSERT_RTNL();
5831
5832         list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5833                 /* Some devices call without registering
5834                  * for initialization unwind. Remove those
5835                  * devices and proceed with the remaining.
5836                  */
5837                 if (dev->reg_state == NETREG_UNINITIALIZED) {
5838                         pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5839                                  dev->name, dev);
5840
5841                         WARN_ON(1);
5842                         list_del(&dev->unreg_list);
5843                         continue;
5844                 }
5845                 dev->dismantle = true;
5846                 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5847         }
5848
5849         /* If device is running, close it first. */
5850         list_for_each_entry(dev, head, unreg_list)
5851                 list_add_tail(&dev->close_list, &close_head);
5852         dev_close_many(&close_head);
5853
5854         list_for_each_entry(dev, head, unreg_list) {
5855                 /* And unlink it from device chain. */
5856                 unlist_netdevice(dev);
5857
5858                 dev->reg_state = NETREG_UNREGISTERING;
5859         }
5860
5861         synchronize_net();
5862
5863         list_for_each_entry(dev, head, unreg_list) {
5864                 /* Shutdown queueing discipline. */
5865                 dev_shutdown(dev);
5866
5867
5868                 /* Notify protocols, that we are about to destroy
5869                    this device. They should clean all the things.
5870                 */
5871                 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5872
5873                 /*
5874                  *      Flush the unicast and multicast chains
5875                  */
5876                 dev_uc_flush(dev);
5877                 dev_mc_flush(dev);
5878
5879                 if (dev->netdev_ops->ndo_uninit)
5880                         dev->netdev_ops->ndo_uninit(dev);
5881
5882                 if (!dev->rtnl_link_ops ||
5883                     dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5884                         rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
5885
5886                 /* Notifier chain MUST detach us all upper devices. */
5887                 WARN_ON(netdev_has_any_upper_dev(dev));
5888
5889                 /* Remove entries from kobject tree */
5890                 netdev_unregister_kobject(dev);
5891 #ifdef CONFIG_XPS
5892                 /* Remove XPS queueing entries */
5893                 netif_reset_xps_queues_gt(dev, 0);
5894 #endif
5895         }
5896
5897         synchronize_net();
5898
5899         list_for_each_entry(dev, head, unreg_list)
5900                 dev_put(dev);
5901 }
5902
5903 static void rollback_registered(struct net_device *dev)
5904 {
5905         LIST_HEAD(single);
5906
5907         list_add(&dev->unreg_list, &single);
5908         rollback_registered_many(&single);
5909         list_del(&single);
5910 }
5911
5912 static netdev_features_t netdev_fix_features(struct net_device *dev,
5913         netdev_features_t features)
5914 {
5915         /* Fix illegal checksum combinations */
5916         if ((features & NETIF_F_HW_CSUM) &&
5917             (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5918                 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5919                 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5920         }
5921
5922         /* TSO requires that SG is present as well. */
5923         if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5924                 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5925                 features &= ~NETIF_F_ALL_TSO;
5926         }
5927
5928         if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5929                                         !(features & NETIF_F_IP_CSUM)) {
5930                 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5931                 features &= ~NETIF_F_TSO;
5932                 features &= ~NETIF_F_TSO_ECN;
5933         }
5934
5935         if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5936                                          !(features & NETIF_F_IPV6_CSUM)) {
5937                 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5938                 features &= ~NETIF_F_TSO6;
5939         }
5940
5941         /* TSO ECN requires that TSO is present as well. */
5942         if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5943                 features &= ~NETIF_F_TSO_ECN;
5944
5945         /* Software GSO depends on SG. */
5946         if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5947                 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5948                 features &= ~NETIF_F_GSO;
5949         }
5950
5951         /* UFO needs SG and checksumming */
5952         if (features & NETIF_F_UFO) {
5953                 /* maybe split UFO into V4 and V6? */
5954                 if (!((features & NETIF_F_GEN_CSUM) ||
5955                     (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5956                             == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5957                         netdev_dbg(dev,
5958                                 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5959                         features &= ~NETIF_F_UFO;
5960                 }
5961
5962                 if (!(features & NETIF_F_SG)) {
5963                         netdev_dbg(dev,
5964                                 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5965                         features &= ~NETIF_F_UFO;
5966                 }
5967         }
5968
5969 #ifdef CONFIG_NET_RX_BUSY_POLL
5970         if (dev->netdev_ops->ndo_busy_poll)
5971                 features |= NETIF_F_BUSY_POLL;
5972         else
5973 #endif
5974                 features &= ~NETIF_F_BUSY_POLL;
5975
5976         return features;
5977 }
5978
5979 int __netdev_update_features(struct net_device *dev)
5980 {
5981         netdev_features_t features;
5982         int err = 0;
5983
5984         ASSERT_RTNL();
5985
5986         features = netdev_get_wanted_features(dev);
5987
5988         if (dev->netdev_ops->ndo_fix_features)
5989                 features = dev->netdev_ops->ndo_fix_features(dev, features);
5990
5991         /* driver might be less strict about feature dependencies */
5992         features = netdev_fix_features(dev, features);
5993
5994         if (dev->features == features)
5995                 return 0;
5996
5997         netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5998                 &dev->features, &features);
5999
6000         if (dev->netdev_ops->ndo_set_features)
6001                 err = dev->netdev_ops->ndo_set_features(dev, features);
6002
6003         if (unlikely(err < 0)) {
6004                 netdev_err(dev,
6005                         "set_features() failed (%d); wanted %pNF, left %pNF\n",
6006                         err, &features, &dev->features);
6007                 return -1;
6008         }
6009
6010         if (!err)
6011                 dev->features = features;
6012
6013         return 1;
6014 }
6015
6016 /**
6017  *      netdev_update_features - recalculate device features
6018  *      @dev: the device to check
6019  *
6020  *      Recalculate dev->features set and send notifications if it
6021  *      has changed. Should be called after driver or hardware dependent
6022  *      conditions might have changed that influence the features.
6023  */
6024 void netdev_update_features(struct net_device *dev)
6025 {
6026         if (__netdev_update_features(dev))
6027                 netdev_features_change(dev);
6028 }
6029 EXPORT_SYMBOL(netdev_update_features);
6030
6031 /**
6032  *      netdev_change_features - recalculate device features
6033  *      @dev: the device to check
6034  *
6035  *      Recalculate dev->features set and send notifications even
6036  *      if they have not changed. Should be called instead of
6037  *      netdev_update_features() if also dev->vlan_features might
6038  *      have changed to allow the changes to be propagated to stacked
6039  *      VLAN devices.
6040  */
6041 void netdev_change_features(struct net_device *dev)
6042 {
6043         __netdev_update_features(dev);
6044         netdev_features_change(dev);
6045 }
6046 EXPORT_SYMBOL(netdev_change_features);
6047
6048 /**
6049  *      netif_stacked_transfer_operstate -      transfer operstate
6050  *      @rootdev: the root or lower level device to transfer state from
6051  *      @dev: the device to transfer operstate to
6052  *
6053  *      Transfer operational state from root to device. This is normally
6054  *      called when a stacking relationship exists between the root
6055  *      device and the device(a leaf device).
6056  */
6057 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6058                                         struct net_device *dev)
6059 {
6060         if (rootdev->operstate == IF_OPER_DORMANT)
6061                 netif_dormant_on(dev);
6062         else
6063                 netif_dormant_off(dev);
6064
6065         if (netif_carrier_ok(rootdev)) {
6066                 if (!netif_carrier_ok(dev))
6067                         netif_carrier_on(dev);
6068         } else {
6069                 if (netif_carrier_ok(dev))
6070                         netif_carrier_off(dev);
6071         }
6072 }
6073 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6074
6075 #ifdef CONFIG_SYSFS
6076 static int netif_alloc_rx_queues(struct net_device *dev)
6077 {
6078         unsigned int i, count = dev->num_rx_queues;
6079         struct netdev_rx_queue *rx;
6080
6081         BUG_ON(count < 1);
6082
6083         rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
6084         if (!rx)
6085                 return -ENOMEM;
6086
6087         dev->_rx = rx;
6088
6089         for (i = 0; i < count; i++)
6090                 rx[i].dev = dev;
6091         return 0;
6092 }
6093 #endif
6094
6095 static void netdev_init_one_queue(struct net_device *dev,
6096                                   struct netdev_queue *queue, void *_unused)
6097 {
6098         /* Initialize queue lock */
6099         spin_lock_init(&queue->_xmit_lock);
6100         netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6101         queue->xmit_lock_owner = -1;
6102         netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
6103         queue->dev = dev;
6104 #ifdef CONFIG_BQL
6105         dql_init(&queue->dql, HZ);
6106 #endif
6107 }
6108
6109 static void netif_free_tx_queues(struct net_device *dev)
6110 {
6111         kvfree(dev->_tx);
6112 }
6113
6114 static int netif_alloc_netdev_queues(struct net_device *dev)
6115 {
6116         unsigned int count = dev->num_tx_queues;
6117         struct netdev_queue *tx;
6118         size_t sz = count * sizeof(*tx);
6119
6120         BUG_ON(count < 1 || count > 0xffff);
6121
6122         tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6123         if (!tx) {
6124                 tx = vzalloc(sz);
6125                 if (!tx)
6126                         return -ENOMEM;
6127         }
6128         dev->_tx = tx;
6129
6130         netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6131         spin_lock_init(&dev->tx_global_lock);
6132
6133         return 0;
6134 }
6135
6136 /**
6137  *      register_netdevice      - register a network device
6138  *      @dev: device to register
6139  *
6140  *      Take a completed network device structure and add it to the kernel
6141  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6142  *      chain. 0 is returned on success. A negative errno code is returned
6143  *      on a failure to set up the device, or if the name is a duplicate.
6144  *
6145  *      Callers must hold the rtnl semaphore. You may want
6146  *      register_netdev() instead of this.
6147  *
6148  *      BUGS:
6149  *      The locking appears insufficient to guarantee two parallel registers
6150  *      will not get the same name.
6151  */
6152
6153 int register_netdevice(struct net_device *dev)
6154 {
6155         int ret;
6156         struct net *net = dev_net(dev);
6157
6158         BUG_ON(dev_boot_phase);
6159         ASSERT_RTNL();
6160
6161         might_sleep();
6162
6163         /* When net_device's are persistent, this will be fatal. */
6164         BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
6165         BUG_ON(!net);
6166
6167         spin_lock_init(&dev->addr_list_lock);
6168         netdev_set_addr_lockdep_class(dev);
6169
6170         dev->iflink = -1;
6171
6172         ret = dev_get_valid_name(net, dev, dev->name);
6173         if (ret < 0)
6174                 goto out;
6175
6176         /* Init, if this function is available */
6177         if (dev->netdev_ops->ndo_init) {
6178                 ret = dev->netdev_ops->ndo_init(dev);
6179                 if (ret) {
6180                         if (ret > 0)
6181                                 ret = -EIO;
6182                         goto out;
6183                 }
6184         }
6185
6186         if (((dev->hw_features | dev->features) &
6187              NETIF_F_HW_VLAN_CTAG_FILTER) &&
6188             (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6189              !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6190                 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6191                 ret = -EINVAL;
6192                 goto err_uninit;
6193         }
6194
6195         ret = -EBUSY;
6196         if (!dev->ifindex)
6197                 dev->ifindex = dev_new_index(net);
6198         else if (__dev_get_by_index(net, dev->ifindex))
6199                 goto err_uninit;
6200
6201         if (dev->iflink == -1)
6202                 dev->iflink = dev->ifindex;
6203
6204         /* Transfer changeable features to wanted_features and enable
6205          * software offloads (GSO and GRO).
6206          */
6207         dev->hw_features |= NETIF_F_SOFT_FEATURES;
6208         dev->features |= NETIF_F_SOFT_FEATURES;
6209         dev->wanted_features = dev->features & dev->hw_features;
6210
6211         if (!(dev->flags & IFF_LOOPBACK)) {
6212                 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6213         }
6214
6215         /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
6216          */
6217         dev->vlan_features |= NETIF_F_HIGHDMA;
6218
6219         /* Make NETIF_F_SG inheritable to tunnel devices.
6220          */
6221         dev->hw_enc_features |= NETIF_F_SG;
6222
6223         /* Make NETIF_F_SG inheritable to MPLS.
6224          */
6225         dev->mpls_features |= NETIF_F_SG;
6226
6227         ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6228         ret = notifier_to_errno(ret);
6229         if (ret)
6230                 goto err_uninit;
6231
6232         ret = netdev_register_kobject(dev);
6233         if (ret)
6234                 goto err_uninit;
6235         dev->reg_state = NETREG_REGISTERED;
6236
6237         __netdev_update_features(dev);
6238
6239         /*
6240          *      Default initial state at registry is that the
6241          *      device is present.
6242          */
6243
6244         set_bit(__LINK_STATE_PRESENT, &dev->state);
6245
6246         linkwatch_init_dev(dev);
6247
6248         dev_init_scheduler(dev);
6249         dev_hold(dev);
6250         list_netdevice(dev);
6251         add_device_randomness(dev->dev_addr, dev->addr_len);
6252
6253         /* If the device has permanent device address, driver should
6254          * set dev_addr and also addr_assign_type should be set to
6255          * NET_ADDR_PERM (default value).
6256          */
6257         if (dev->addr_assign_type == NET_ADDR_PERM)
6258                 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6259
6260         /* Notify protocols, that a new device appeared. */
6261         ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
6262         ret = notifier_to_errno(ret);
6263         if (ret) {
6264                 rollback_registered(dev);
6265                 dev->reg_state = NETREG_UNREGISTERED;
6266         }
6267         /*
6268          *      Prevent userspace races by waiting until the network
6269          *      device is fully setup before sending notifications.
6270          */
6271         if (!dev->rtnl_link_ops ||
6272             dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6273                 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
6274
6275 out:
6276         return ret;
6277
6278 err_uninit:
6279         if (dev->netdev_ops->ndo_uninit)
6280                 dev->netdev_ops->ndo_uninit(dev);
6281         goto out;
6282 }
6283 EXPORT_SYMBOL(register_netdevice);
6284
6285 /**
6286  *      init_dummy_netdev       - init a dummy network device for NAPI
6287  *      @dev: device to init
6288  *
6289  *      This takes a network device structure and initialize the minimum
6290  *      amount of fields so it can be used to schedule NAPI polls without
6291  *      registering a full blown interface. This is to be used by drivers
6292  *      that need to tie several hardware interfaces to a single NAPI
6293  *      poll scheduler due to HW limitations.
6294  */
6295 int init_dummy_netdev(struct net_device *dev)
6296 {
6297         /* Clear everything. Note we don't initialize spinlocks
6298          * are they aren't supposed to be taken by any of the
6299          * NAPI code and this dummy netdev is supposed to be
6300          * only ever used for NAPI polls
6301          */
6302         memset(dev, 0, sizeof(struct net_device));
6303
6304         /* make sure we BUG if trying to hit standard
6305          * register/unregister code path
6306          */
6307         dev->reg_state = NETREG_DUMMY;
6308
6309         /* NAPI wants this */
6310         INIT_LIST_HEAD(&dev->napi_list);
6311
6312         /* a dummy interface is started by default */
6313         set_bit(__LINK_STATE_PRESENT, &dev->state);
6314         set_bit(__LINK_STATE_START, &dev->state);
6315
6316         /* Note : We dont allocate pcpu_refcnt for dummy devices,
6317          * because users of this 'device' dont need to change
6318          * its refcount.
6319          */
6320
6321         return 0;
6322 }
6323 EXPORT_SYMBOL_GPL(init_dummy_netdev);
6324
6325
6326 /**
6327  *      register_netdev - register a network device
6328  *      @dev: device to register
6329  *
6330  *      Take a completed network device structure and add it to the kernel
6331  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6332  *      chain. 0 is returned on success. A negative errno code is returned
6333  *      on a failure to set up the device, or if the name is a duplicate.
6334  *
6335  *      This is a wrapper around register_netdevice that takes the rtnl semaphore
6336  *      and expands the device name if you passed a format string to
6337  *      alloc_netdev.
6338  */
6339 int register_netdev(struct net_device *dev)
6340 {
6341         int err;
6342
6343         rtnl_lock();
6344         err = register_netdevice(dev);
6345         rtnl_unlock();
6346         return err;
6347 }
6348 EXPORT_SYMBOL(register_netdev);
6349
6350 int netdev_refcnt_read(const struct net_device *dev)
6351 {
6352         int i, refcnt = 0;
6353
6354         for_each_possible_cpu(i)
6355                 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6356         return refcnt;
6357 }
6358 EXPORT_SYMBOL(netdev_refcnt_read);
6359
6360 /**
6361  * netdev_wait_allrefs - wait until all references are gone.
6362  * @dev: target net_device
6363  *
6364  * This is called when unregistering network devices.
6365  *
6366  * Any protocol or device that holds a reference should register
6367  * for netdevice notification, and cleanup and put back the
6368  * reference if they receive an UNREGISTER event.
6369  * We can get stuck here if buggy protocols don't correctly
6370  * call dev_put.
6371  */
6372 static void netdev_wait_allrefs(struct net_device *dev)
6373 {
6374         unsigned long rebroadcast_time, warning_time;
6375         int refcnt;
6376
6377         linkwatch_forget_dev(dev);
6378
6379         rebroadcast_time = warning_time = jiffies;
6380         refcnt = netdev_refcnt_read(dev);
6381
6382         while (refcnt != 0) {
6383                 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6384                         rtnl_lock();
6385
6386                         /* Rebroadcast unregister notification */
6387                         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6388
6389                         __rtnl_unlock();
6390                         rcu_barrier();
6391                         rtnl_lock();
6392
6393                         call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6394                         if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6395                                      &dev->state)) {
6396                                 /* We must not have linkwatch events
6397                                  * pending on unregister. If this
6398                                  * happens, we simply run the queue
6399                                  * unscheduled, resulting in a noop
6400                                  * for this device.
6401                                  */
6402                                 linkwatch_run_queue();
6403                         }
6404
6405                         __rtnl_unlock();
6406
6407                         rebroadcast_time = jiffies;
6408                 }
6409
6410                 msleep(250);
6411
6412                 refcnt = netdev_refcnt_read(dev);
6413
6414                 if (time_after(jiffies, warning_time + 10 * HZ)) {
6415                         pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6416                                  dev->name, refcnt);
6417                         warning_time = jiffies;
6418                 }
6419         }
6420 }
6421
6422 /* The sequence is:
6423  *
6424  *      rtnl_lock();
6425  *      ...
6426  *      register_netdevice(x1);
6427  *      register_netdevice(x2);
6428  *      ...
6429  *      unregister_netdevice(y1);
6430  *      unregister_netdevice(y2);
6431  *      ...
6432  *      rtnl_unlock();
6433  *      free_netdev(y1);
6434  *      free_netdev(y2);
6435  *
6436  * We are invoked by rtnl_unlock().
6437  * This allows us to deal with problems:
6438  * 1) We can delete sysfs objects which invoke hotplug
6439  *    without deadlocking with linkwatch via keventd.
6440  * 2) Since we run with the RTNL semaphore not held, we can sleep
6441  *    safely in order to wait for the netdev refcnt to drop to zero.
6442  *
6443  * We must not return until all unregister events added during
6444  * the interval the lock was held have been completed.
6445  */
6446 void netdev_run_todo(void)
6447 {
6448         struct list_head list;
6449
6450         /* Snapshot list, allow later requests */
6451         list_replace_init(&net_todo_list, &list);
6452
6453         __rtnl_unlock();
6454
6455
6456         /* Wait for rcu callbacks to finish before next phase */
6457         if (!list_empty(&list))
6458                 rcu_barrier();
6459
6460         while (!list_empty(&list)) {
6461                 struct net_device *dev
6462                         = list_first_entry(&list, struct net_device, todo_list);
6463                 list_del(&dev->todo_list);
6464
6465                 rtnl_lock();
6466                 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6467                 __rtnl_unlock();
6468
6469                 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
6470                         pr_err("network todo '%s' but state %d\n",
6471                                dev->name, dev->reg_state);
6472                         dump_stack();
6473                         continue;
6474                 }
6475
6476                 dev->reg_state = NETREG_UNREGISTERED;
6477
6478                 on_each_cpu(flush_backlog, dev, 1);
6479
6480                 netdev_wait_allrefs(dev);
6481
6482                 /* paranoia */
6483                 BUG_ON(netdev_refcnt_read(dev));
6484                 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6485                 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
6486                 WARN_ON(dev->dn_ptr);
6487
6488                 if (dev->destructor)
6489                         dev->destructor(dev);
6490
6491                 /* Report a network device has been unregistered */
6492                 rtnl_lock();
6493                 dev_net(dev)->dev_unreg_count--;
6494                 __rtnl_unlock();
6495                 wake_up(&netdev_unregistering_wq);
6496
6497                 /* Free network device */
6498                 kobject_put(&dev->dev.kobj);
6499         }
6500 }
6501
6502 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
6503  * fields in the same order, with only the type differing.
6504  */
6505 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6506                              const struct net_device_stats *netdev_stats)
6507 {
6508 #if BITS_PER_LONG == 64
6509         BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6510         memcpy(stats64, netdev_stats, sizeof(*stats64));
6511 #else
6512         size_t i, n = sizeof(*stats64) / sizeof(u64);
6513         const unsigned long *src = (const unsigned long *)netdev_stats;
6514         u64 *dst = (u64 *)stats64;
6515
6516         BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6517                      sizeof(*stats64) / sizeof(u64));
6518         for (i = 0; i < n; i++)
6519                 dst[i] = src[i];
6520 #endif
6521 }
6522 EXPORT_SYMBOL(netdev_stats_to_stats64);
6523
6524 /**
6525  *      dev_get_stats   - get network device statistics
6526  *      @dev: device to get statistics from
6527  *      @storage: place to store stats
6528  *
6529  *      Get network statistics from device. Return @storage.
6530  *      The device driver may provide its own method by setting
6531  *      dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6532  *      otherwise the internal statistics structure is used.
6533  */
6534 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6535                                         struct rtnl_link_stats64 *storage)
6536 {
6537         const struct net_device_ops *ops = dev->netdev_ops;
6538
6539         if (ops->ndo_get_stats64) {
6540                 memset(storage, 0, sizeof(*storage));
6541                 ops->ndo_get_stats64(dev, storage);
6542         } else if (ops->ndo_get_stats) {
6543                 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
6544         } else {
6545                 netdev_stats_to_stats64(storage, &dev->stats);
6546         }
6547         storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
6548         storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
6549         return storage;
6550 }
6551 EXPORT_SYMBOL(dev_get_stats);
6552
6553 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6554 {
6555         struct netdev_queue *queue = dev_ingress_queue(dev);
6556
6557 #ifdef CONFIG_NET_CLS_ACT
6558         if (queue)
6559                 return queue;
6560         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6561         if (!queue)
6562                 return NULL;
6563         netdev_init_one_queue(dev, queue, NULL);
6564         queue->qdisc = &noop_qdisc;
6565         queue->qdisc_sleeping = &noop_qdisc;
6566         rcu_assign_pointer(dev->ingress_queue, queue);
6567 #endif
6568         return queue;
6569 }
6570
6571 static const struct ethtool_ops default_ethtool_ops;
6572
6573 void netdev_set_default_ethtool_ops(struct net_device *dev,
6574                                     const struct ethtool_ops *ops)
6575 {
6576         if (dev->ethtool_ops == &default_ethtool_ops)
6577                 dev->ethtool_ops = ops;
6578 }
6579 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6580
6581 void netdev_freemem(struct net_device *dev)
6582 {
6583         char *addr = (char *)dev - dev->padded;
6584
6585         kvfree(addr);
6586 }
6587
6588 /**
6589  *      alloc_netdev_mqs - allocate network device
6590  *      @sizeof_priv:           size of private data to allocate space for
6591  *      @name:                  device name format string
6592  *      @name_assign_type:      origin of device name
6593  *      @setup:                 callback to initialize device
6594  *      @txqs:                  the number of TX subqueues to allocate
6595  *      @rxqs:                  the number of RX subqueues to allocate
6596  *
6597  *      Allocates a struct net_device with private data area for driver use
6598  *      and performs basic initialization.  Also allocates subqueue structs
6599  *      for each queue on the device.
6600  */
6601 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6602                 unsigned char name_assign_type,
6603                 void (*setup)(struct net_device *),
6604                 unsigned int txqs, unsigned int rxqs)
6605 {
6606         struct net_device *dev;
6607         size_t alloc_size;
6608         struct net_device *p;
6609
6610         BUG_ON(strlen(name) >= sizeof(dev->name));
6611
6612         if (txqs < 1) {
6613                 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6614                 return NULL;
6615         }
6616
6617 #ifdef CONFIG_SYSFS
6618         if (rxqs < 1) {
6619                 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6620                 return NULL;
6621         }
6622 #endif
6623
6624         alloc_size = sizeof(struct net_device);
6625         if (sizeof_priv) {
6626                 /* ensure 32-byte alignment of private area */
6627                 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6628                 alloc_size += sizeof_priv;
6629         }
6630         /* ensure 32-byte alignment of whole construct */
6631         alloc_size += NETDEV_ALIGN - 1;
6632
6633         p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6634         if (!p)
6635                 p = vzalloc(alloc_size);
6636         if (!p)
6637                 return NULL;
6638
6639         dev = PTR_ALIGN(p, NETDEV_ALIGN);
6640         dev->padded = (char *)dev - (char *)p;
6641
6642         dev->pcpu_refcnt = alloc_percpu(int);
6643         if (!dev->pcpu_refcnt)
6644                 goto free_dev;
6645
6646         if (dev_addr_init(dev))
6647                 goto free_pcpu;
6648
6649         dev_mc_init(dev);
6650         dev_uc_init(dev);
6651
6652         dev_net_set(dev, &init_net);
6653
6654         dev->gso_max_size = GSO_MAX_SIZE;
6655         dev->gso_max_segs = GSO_MAX_SEGS;
6656         dev->gso_min_segs = 0;
6657
6658         INIT_LIST_HEAD(&dev->napi_list);
6659         INIT_LIST_HEAD(&dev->unreg_list);
6660         INIT_LIST_HEAD(&dev->close_list);
6661         INIT_LIST_HEAD(&dev->link_watch_list);
6662         INIT_LIST_HEAD(&dev->adj_list.upper);
6663         INIT_LIST_HEAD(&dev->adj_list.lower);
6664         INIT_LIST_HEAD(&dev->all_adj_list.upper);
6665         INIT_LIST_HEAD(&dev->all_adj_list.lower);
6666         dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
6667         setup(dev);
6668
6669         dev->num_tx_queues = txqs;
6670         dev->real_num_tx_queues = txqs;
6671         if (netif_alloc_netdev_queues(dev))
6672                 goto free_all;
6673
6674 #ifdef CONFIG_SYSFS
6675         dev->num_rx_queues = rxqs;
6676         dev->real_num_rx_queues = rxqs;
6677         if (netif_alloc_rx_queues(dev))
6678                 goto free_all;
6679 #endif
6680
6681         strcpy(dev->name, name);
6682         dev->name_assign_type = name_assign_type;
6683         dev->group = INIT_NETDEV_GROUP;
6684         if (!dev->ethtool_ops)
6685                 dev->ethtool_ops = &default_ethtool_ops;
6686         return dev;
6687
6688 free_all:
6689         free_netdev(dev);
6690         return NULL;
6691
6692 free_pcpu:
6693         free_percpu(dev->pcpu_refcnt);
6694 free_dev:
6695         netdev_freemem(dev);
6696         return NULL;
6697 }
6698 EXPORT_SYMBOL(alloc_netdev_mqs);
6699
6700 /**
6701  *      free_netdev - free network device
6702  *      @dev: device
6703  *
6704  *      This function does the last stage of destroying an allocated device
6705  *      interface. The reference to the device object is released.
6706  *      If this is the last reference then it will be freed.
6707  */
6708 void free_netdev(struct net_device *dev)
6709 {
6710         struct napi_struct *p, *n;
6711
6712         release_net(dev_net(dev));
6713
6714         netif_free_tx_queues(dev);
6715 #ifdef CONFIG_SYSFS
6716         kfree(dev->_rx);
6717 #endif
6718
6719         kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6720
6721         /* Flush device addresses */
6722         dev_addr_flush(dev);
6723
6724         list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6725                 netif_napi_del(p);
6726
6727         free_percpu(dev->pcpu_refcnt);
6728         dev->pcpu_refcnt = NULL;
6729
6730         /*  Compatibility with error handling in drivers */
6731         if (dev->reg_state == NETREG_UNINITIALIZED) {
6732                 netdev_freemem(dev);
6733                 return;
6734         }
6735
6736         BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6737         dev->reg_state = NETREG_RELEASED;
6738
6739         /* will free via device release */
6740         put_device(&dev->dev);
6741 }
6742 EXPORT_SYMBOL(free_netdev);
6743
6744 /**
6745  *      synchronize_net -  Synchronize with packet receive processing
6746  *
6747  *      Wait for packets currently being received to be done.
6748  *      Does not block later packets from starting.
6749  */
6750 void synchronize_net(void)
6751 {
6752         might_sleep();
6753         if (rtnl_is_locked())
6754                 synchronize_rcu_expedited();
6755         else
6756                 synchronize_rcu();
6757 }
6758 EXPORT_SYMBOL(synchronize_net);
6759
6760 /**
6761  *      unregister_netdevice_queue - remove device from the kernel
6762  *      @dev: device
6763  *      @head: list
6764  *
6765  *      This function shuts down a device interface and removes it
6766  *      from the kernel tables.
6767  *      If head not NULL, device is queued to be unregistered later.
6768  *
6769  *      Callers must hold the rtnl semaphore.  You may want
6770  *      unregister_netdev() instead of this.
6771  */
6772
6773 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6774 {
6775         ASSERT_RTNL();
6776
6777         if (head) {
6778                 list_move_tail(&dev->unreg_list, head);
6779         } else {
6780                 rollback_registered(dev);
6781                 /* Finish processing unregister after unlock */
6782                 net_set_todo(dev);
6783         }
6784 }
6785 EXPORT_SYMBOL(unregister_netdevice_queue);
6786
6787 /**
6788  *      unregister_netdevice_many - unregister many devices
6789  *      @head: list of devices
6790  *
6791  *  Note: As most callers use a stack allocated list_head,
6792  *  we force a list_del() to make sure stack wont be corrupted later.
6793  */
6794 void unregister_netdevice_many(struct list_head *head)
6795 {
6796         struct net_device *dev;
6797
6798         if (!list_empty(head)) {
6799                 rollback_registered_many(head);
6800                 list_for_each_entry(dev, head, unreg_list)
6801                         net_set_todo(dev);
6802                 list_del(head);
6803         }
6804 }
6805 EXPORT_SYMBOL(unregister_netdevice_many);
6806
6807 /**
6808  *      unregister_netdev - remove device from the kernel
6809  *      @dev: device
6810  *
6811  *      This function shuts down a device interface and removes it
6812  *      from the kernel tables.
6813  *
6814  *      This is just a wrapper for unregister_netdevice that takes
6815  *      the rtnl semaphore.  In general you want to use this and not
6816  *      unregister_netdevice.
6817  */
6818 void unregister_netdev(struct net_device *dev)
6819 {
6820         rtnl_lock();
6821         unregister_netdevice(dev);
6822         rtnl_unlock();
6823 }
6824 EXPORT_SYMBOL(unregister_netdev);
6825
6826 /**
6827  *      dev_change_net_namespace - move device to different nethost namespace
6828  *      @dev: device
6829  *      @net: network namespace
6830  *      @pat: If not NULL name pattern to try if the current device name
6831  *            is already taken in the destination network namespace.
6832  *
6833  *      This function shuts down a device interface and moves it
6834  *      to a new network namespace. On success 0 is returned, on
6835  *      a failure a netagive errno code is returned.
6836  *
6837  *      Callers must hold the rtnl semaphore.
6838  */
6839
6840 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6841 {
6842         int err;
6843
6844         ASSERT_RTNL();
6845
6846         /* Don't allow namespace local devices to be moved. */
6847         err = -EINVAL;
6848         if (dev->features & NETIF_F_NETNS_LOCAL)
6849                 goto out;
6850
6851         /* Ensure the device has been registrered */
6852         if (dev->reg_state != NETREG_REGISTERED)
6853                 goto out;
6854
6855         /* Get out if there is nothing todo */
6856         err = 0;
6857         if (net_eq(dev_net(dev), net))
6858                 goto out;
6859
6860         /* Pick the destination device name, and ensure
6861          * we can use it in the destination network namespace.
6862          */
6863         err = -EEXIST;
6864         if (__dev_get_by_name(net, dev->name)) {
6865                 /* We get here if we can't use the current device name */
6866                 if (!pat)
6867                         goto out;
6868                 if (dev_get_valid_name(net, dev, pat) < 0)
6869                         goto out;
6870         }
6871
6872         /*
6873          * And now a mini version of register_netdevice unregister_netdevice.
6874          */
6875
6876         /* If device is running close it first. */
6877         dev_close(dev);
6878
6879         /* And unlink it from device chain */
6880         err = -ENODEV;
6881         unlist_netdevice(dev);
6882
6883         synchronize_net();
6884
6885         /* Shutdown queueing discipline. */
6886         dev_shutdown(dev);
6887
6888         /* Notify protocols, that we are about to destroy
6889            this device. They should clean all the things.
6890
6891            Note that dev->reg_state stays at NETREG_REGISTERED.
6892            This is wanted because this way 8021q and macvlan know
6893            the device is just moving and can keep their slaves up.
6894         */
6895         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6896         rcu_barrier();
6897         call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6898         rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
6899
6900         /*
6901          *      Flush the unicast and multicast chains
6902          */
6903         dev_uc_flush(dev);
6904         dev_mc_flush(dev);
6905
6906         /* Send a netdev-removed uevent to the old namespace */
6907         kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6908         netdev_adjacent_del_links(dev);
6909
6910         /* Actually switch the network namespace */
6911         dev_net_set(dev, net);
6912
6913         /* If there is an ifindex conflict assign a new one */
6914         if (__dev_get_by_index(net, dev->ifindex)) {
6915                 int iflink = (dev->iflink == dev->ifindex);
6916                 dev->ifindex = dev_new_index(net);
6917                 if (iflink)
6918                         dev->iflink = dev->ifindex;
6919         }
6920
6921         /* Send a netdev-add uevent to the new namespace */
6922         kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6923         netdev_adjacent_add_links(dev);
6924
6925         /* Fixup kobjects */
6926         err = device_rename(&dev->dev, dev->name);
6927         WARN_ON(err);
6928
6929         /* Add the device back in the hashes */
6930         list_netdevice(dev);
6931
6932         /* Notify protocols, that a new device appeared. */
6933         call_netdevice_notifiers(NETDEV_REGISTER, dev);
6934
6935         /*
6936          *      Prevent userspace races by waiting until the network
6937          *      device is fully setup before sending notifications.
6938          */
6939         rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
6940
6941         synchronize_net();
6942         err = 0;
6943 out:
6944         return err;
6945 }
6946 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6947
6948 static int dev_cpu_callback(struct notifier_block *nfb,
6949                             unsigned long action,
6950                             void *ocpu)
6951 {
6952         struct sk_buff **list_skb;
6953         struct sk_buff *skb;
6954         unsigned int cpu, oldcpu = (unsigned long)ocpu;
6955         struct softnet_data *sd, *oldsd;
6956
6957         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
6958                 return NOTIFY_OK;
6959
6960         local_irq_disable();
6961         cpu = smp_processor_id();
6962         sd = &per_cpu(softnet_data, cpu);
6963         oldsd = &per_cpu(softnet_data, oldcpu);
6964
6965         /* Find end of our completion_queue. */
6966         list_skb = &sd->completion_queue;
6967         while (*list_skb)
6968                 list_skb = &(*list_skb)->next;
6969         /* Append completion queue from offline CPU. */
6970         *list_skb = oldsd->completion_queue;
6971         oldsd->completion_queue = NULL;
6972
6973         /* Append output queue from offline CPU. */
6974         if (oldsd->output_queue) {
6975                 *sd->output_queue_tailp = oldsd->output_queue;
6976                 sd->output_queue_tailp = oldsd->output_queue_tailp;
6977                 oldsd->output_queue = NULL;
6978                 oldsd->output_queue_tailp = &oldsd->output_queue;
6979         }
6980         /* Append NAPI poll list from offline CPU. */
6981         if (!list_empty(&oldsd->poll_list)) {
6982                 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6983                 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6984         }
6985
6986         raise_softirq_irqoff(NET_TX_SOFTIRQ);
6987         local_irq_enable();
6988
6989         /* Process offline CPU's input_pkt_queue */
6990         while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6991                 netif_rx_internal(skb);
6992                 input_queue_head_incr(oldsd);
6993         }
6994         while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6995                 netif_rx_internal(skb);
6996                 input_queue_head_incr(oldsd);
6997         }
6998
6999         return NOTIFY_OK;
7000 }
7001
7002
7003 /**
7004  *      netdev_increment_features - increment feature set by one
7005  *      @all: current feature set
7006  *      @one: new feature set
7007  *      @mask: mask feature set
7008  *
7009  *      Computes a new feature set after adding a device with feature set
7010  *      @one to the master device with current feature set @all.  Will not
7011  *      enable anything that is off in @mask. Returns the new feature set.
7012  */
7013 netdev_features_t netdev_increment_features(netdev_features_t all,
7014         netdev_features_t one, netdev_features_t mask)
7015 {
7016         if (mask & NETIF_F_GEN_CSUM)
7017                 mask |= NETIF_F_ALL_CSUM;
7018         mask |= NETIF_F_VLAN_CHALLENGED;
7019
7020         all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7021         all &= one | ~NETIF_F_ALL_FOR_ALL;
7022
7023         /* If one device supports hw checksumming, set for all. */
7024         if (all & NETIF_F_GEN_CSUM)
7025                 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7026
7027         return all;
7028 }
7029 EXPORT_SYMBOL(netdev_increment_features);
7030
7031 static struct hlist_head * __net_init netdev_create_hash(void)
7032 {
7033         int i;
7034         struct hlist_head *hash;
7035
7036         hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7037         if (hash != NULL)
7038                 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7039                         INIT_HLIST_HEAD(&hash[i]);
7040
7041         return hash;
7042 }
7043
7044 /* Initialize per network namespace state */
7045 static int __net_init netdev_init(struct net *net)
7046 {
7047         if (net != &init_net)
7048                 INIT_LIST_HEAD(&net->dev_base_head);
7049
7050         net->dev_name_head = netdev_create_hash();
7051         if (net->dev_name_head == NULL)
7052                 goto err_name;
7053
7054         net->dev_index_head = netdev_create_hash();
7055         if (net->dev_index_head == NULL)
7056                 goto err_idx;
7057
7058         return 0;
7059
7060 err_idx:
7061         kfree(net->dev_name_head);
7062 err_name:
7063         return -ENOMEM;
7064 }
7065
7066 /**
7067  *      netdev_drivername - network driver for the device
7068  *      @dev: network device
7069  *
7070  *      Determine network driver for device.
7071  */
7072 const char *netdev_drivername(const struct net_device *dev)
7073 {
7074         const struct device_driver *driver;
7075         const struct device *parent;
7076         const char *empty = "";
7077
7078         parent = dev->dev.parent;
7079         if (!parent)
7080                 return empty;
7081
7082         driver = parent->driver;
7083         if (driver && driver->name)
7084                 return driver->name;
7085         return empty;
7086 }
7087
7088 static void __netdev_printk(const char *level, const struct net_device *dev,
7089                             struct va_format *vaf)
7090 {
7091         if (dev && dev->dev.parent) {
7092                 dev_printk_emit(level[1] - '0',
7093                                 dev->dev.parent,
7094                                 "%s %s %s%s: %pV",
7095                                 dev_driver_string(dev->dev.parent),
7096                                 dev_name(dev->dev.parent),
7097                                 netdev_name(dev), netdev_reg_state(dev),
7098                                 vaf);
7099         } else if (dev) {
7100                 printk("%s%s%s: %pV",
7101                        level, netdev_name(dev), netdev_reg_state(dev), vaf);
7102         } else {
7103                 printk("%s(NULL net_device): %pV", level, vaf);
7104         }
7105 }
7106
7107 void netdev_printk(const char *level, const struct net_device *dev,
7108                    const char *format, ...)
7109 {
7110         struct va_format vaf;
7111         va_list args;
7112
7113         va_start(args, format);
7114
7115         vaf.fmt = format;
7116         vaf.va = &args;
7117
7118         __netdev_printk(level, dev, &vaf);
7119
7120         va_end(args);
7121 }
7122 EXPORT_SYMBOL(netdev_printk);
7123
7124 #define define_netdev_printk_level(func, level)                 \
7125 void func(const struct net_device *dev, const char *fmt, ...)   \
7126 {                                                               \
7127         struct va_format vaf;                                   \
7128         va_list args;                                           \
7129                                                                 \
7130         va_start(args, fmt);                                    \
7131                                                                 \
7132         vaf.fmt = fmt;                                          \
7133         vaf.va = &args;                                         \
7134                                                                 \
7135         __netdev_printk(level, dev, &vaf);                      \
7136                                                                 \
7137         va_end(args);                                           \
7138 }                                                               \
7139 EXPORT_SYMBOL(func);
7140
7141 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7142 define_netdev_printk_level(netdev_alert, KERN_ALERT);
7143 define_netdev_printk_level(netdev_crit, KERN_CRIT);
7144 define_netdev_printk_level(netdev_err, KERN_ERR);
7145 define_netdev_printk_level(netdev_warn, KERN_WARNING);
7146 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7147 define_netdev_printk_level(netdev_info, KERN_INFO);
7148
7149 static void __net_exit netdev_exit(struct net *net)
7150 {
7151         kfree(net->dev_name_head);
7152         kfree(net->dev_index_head);
7153 }
7154
7155 static struct pernet_operations __net_initdata netdev_net_ops = {
7156         .init = netdev_init,
7157         .exit = netdev_exit,
7158 };
7159
7160 static void __net_exit default_device_exit(struct net *net)
7161 {
7162         struct net_device *dev, *aux;
7163         /*
7164          * Push all migratable network devices back to the
7165          * initial network namespace
7166          */
7167         rtnl_lock();
7168         for_each_netdev_safe(net, dev, aux) {
7169                 int err;
7170                 char fb_name[IFNAMSIZ];
7171
7172                 /* Ignore unmoveable devices (i.e. loopback) */
7173                 if (dev->features & NETIF_F_NETNS_LOCAL)
7174                         continue;
7175
7176                 /* Leave virtual devices for the generic cleanup */
7177                 if (dev->rtnl_link_ops)
7178                         continue;
7179
7180                 /* Push remaining network devices to init_net */
7181                 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7182                 err = dev_change_net_namespace(dev, &init_net, fb_name);
7183                 if (err) {
7184                         pr_emerg("%s: failed to move %s to init_net: %d\n",
7185                                  __func__, dev->name, err);
7186                         BUG();
7187                 }
7188         }
7189         rtnl_unlock();
7190 }
7191
7192 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7193 {
7194         /* Return with the rtnl_lock held when there are no network
7195          * devices unregistering in any network namespace in net_list.
7196          */
7197         struct net *net;
7198         bool unregistering;
7199         DEFINE_WAIT(wait);
7200
7201         for (;;) {
7202                 prepare_to_wait(&netdev_unregistering_wq, &wait,
7203                                 TASK_UNINTERRUPTIBLE);
7204                 unregistering = false;
7205                 rtnl_lock();
7206                 list_for_each_entry(net, net_list, exit_list) {
7207                         if (net->dev_unreg_count > 0) {
7208                                 unregistering = true;
7209                                 break;
7210                         }
7211                 }
7212                 if (!unregistering)
7213                         break;
7214                 __rtnl_unlock();
7215                 schedule();
7216         }
7217         finish_wait(&netdev_unregistering_wq, &wait);
7218 }
7219
7220 static void __net_exit default_device_exit_batch(struct list_head *net_list)
7221 {
7222         /* At exit all network devices most be removed from a network
7223          * namespace.  Do this in the reverse order of registration.
7224          * Do this across as many network namespaces as possible to
7225          * improve batching efficiency.
7226          */
7227         struct net_device *dev;
7228         struct net *net;
7229         LIST_HEAD(dev_kill_list);
7230
7231         /* To prevent network device cleanup code from dereferencing
7232          * loopback devices or network devices that have been freed
7233          * wait here for all pending unregistrations to complete,
7234          * before unregistring the loopback device and allowing the
7235          * network namespace be freed.
7236          *
7237          * The netdev todo list containing all network devices
7238          * unregistrations that happen in default_device_exit_batch
7239          * will run in the rtnl_unlock() at the end of
7240          * default_device_exit_batch.
7241          */
7242         rtnl_lock_unregistering(net_list);
7243         list_for_each_entry(net, net_list, exit_list) {
7244                 for_each_netdev_reverse(net, dev) {
7245                         if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
7246                                 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7247                         else
7248                                 unregister_netdevice_queue(dev, &dev_kill_list);
7249                 }
7250         }
7251         unregister_netdevice_many(&dev_kill_list);
7252         rtnl_unlock();
7253 }
7254
7255 static struct pernet_operations __net_initdata default_device_ops = {
7256         .exit = default_device_exit,
7257         .exit_batch = default_device_exit_batch,
7258 };
7259
7260 /*
7261  *      Initialize the DEV module. At boot time this walks the device list and
7262  *      unhooks any devices that fail to initialise (normally hardware not
7263  *      present) and leaves us with a valid list of present and active devices.
7264  *
7265  */
7266
7267 /*
7268  *       This is called single threaded during boot, so no need
7269  *       to take the rtnl semaphore.
7270  */
7271 static int __init net_dev_init(void)
7272 {
7273         int i, rc = -ENOMEM;
7274
7275         BUG_ON(!dev_boot_phase);
7276
7277         if (dev_proc_init())
7278                 goto out;
7279
7280         if (netdev_kobject_init())
7281                 goto out;
7282
7283         INIT_LIST_HEAD(&ptype_all);
7284         for (i = 0; i < PTYPE_HASH_SIZE; i++)
7285                 INIT_LIST_HEAD(&ptype_base[i]);
7286
7287         INIT_LIST_HEAD(&offload_base);
7288
7289         if (register_pernet_subsys(&netdev_net_ops))
7290                 goto out;
7291
7292         /*
7293          *      Initialise the packet receive queues.
7294          */
7295
7296         for_each_possible_cpu(i) {
7297                 struct softnet_data *sd = &per_cpu(softnet_data, i);
7298
7299                 skb_queue_head_init(&sd->input_pkt_queue);
7300                 skb_queue_head_init(&sd->process_queue);
7301                 INIT_LIST_HEAD(&sd->poll_list);
7302                 sd->output_queue_tailp = &sd->output_queue;
7303 #ifdef CONFIG_RPS
7304                 sd->csd.func = rps_trigger_softirq;
7305                 sd->csd.info = sd;
7306                 sd->cpu = i;
7307 #endif
7308
7309                 sd->backlog.poll = process_backlog;
7310                 sd->backlog.weight = weight_p;
7311         }
7312
7313         dev_boot_phase = 0;
7314
7315         /* The loopback device is special if any other network devices
7316          * is present in a network namespace the loopback device must
7317          * be present. Since we now dynamically allocate and free the
7318          * loopback device ensure this invariant is maintained by
7319          * keeping the loopback device as the first device on the
7320          * list of network devices.  Ensuring the loopback devices
7321          * is the first device that appears and the last network device
7322          * that disappears.
7323          */
7324         if (register_pernet_device(&loopback_net_ops))
7325                 goto out;
7326
7327         if (register_pernet_device(&default_device_ops))
7328                 goto out;
7329
7330         open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7331         open_softirq(NET_RX_SOFTIRQ, net_rx_action);
7332
7333         hotcpu_notifier(dev_cpu_callback, 0);
7334         dst_init();
7335         rc = 0;
7336 out:
7337         return rc;
7338 }
7339
7340 subsys_initcall(net_dev_init);