Merge tag 'ktest-v3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[cascardo/linux.git] / net / core / dev.c
1 /*
2  *      NET3    Protocol independent device support routines.
3  *
4  *              This program is free software; you can redistribute it and/or
5  *              modify it under the terms of the GNU General Public License
6  *              as published by the Free Software Foundation; either version
7  *              2 of the License, or (at your option) any later version.
8  *
9  *      Derived from the non IP parts of dev.c 1.0.19
10  *              Authors:        Ross Biro
11  *                              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12  *                              Mark Evans, <evansmp@uhura.aston.ac.uk>
13  *
14  *      Additional Authors:
15  *              Florian la Roche <rzsfl@rz.uni-sb.de>
16  *              Alan Cox <gw4pts@gw4pts.ampr.org>
17  *              David Hinds <dahinds@users.sourceforge.net>
18  *              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19  *              Adam Sulmicki <adam@cfar.umd.edu>
20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
21  *
22  *      Changes:
23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
24  *                                      to 2 if register_netdev gets called
25  *                                      before net_dev_init & also removed a
26  *                                      few lines of code in the process.
27  *              Alan Cox        :       device private ioctl copies fields back.
28  *              Alan Cox        :       Transmit queue code does relevant
29  *                                      stunts to keep the queue safe.
30  *              Alan Cox        :       Fixed double lock.
31  *              Alan Cox        :       Fixed promisc NULL pointer trap
32  *              ????????        :       Support the full private ioctl range
33  *              Alan Cox        :       Moved ioctl permission check into
34  *                                      drivers
35  *              Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
36  *              Alan Cox        :       100 backlog just doesn't cut it when
37  *                                      you start doing multicast video 8)
38  *              Alan Cox        :       Rewrote net_bh and list manager.
39  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
40  *              Alan Cox        :       Took out transmit every packet pass
41  *                                      Saved a few bytes in the ioctl handler
42  *              Alan Cox        :       Network driver sets packet type before
43  *                                      calling netif_rx. Saves a function
44  *                                      call a packet.
45  *              Alan Cox        :       Hashed net_bh()
46  *              Richard Kooijman:       Timestamp fixes.
47  *              Alan Cox        :       Wrong field in SIOCGIFDSTADDR
48  *              Alan Cox        :       Device lock protection.
49  *              Alan Cox        :       Fixed nasty side effect of device close
50  *                                      changes.
51  *              Rudi Cilibrasi  :       Pass the right thing to
52  *                                      set_mac_address()
53  *              Dave Miller     :       32bit quantity for the device lock to
54  *                                      make it work out on a Sparc.
55  *              Bjorn Ekwall    :       Added KERNELD hack.
56  *              Alan Cox        :       Cleaned up the backlog initialise.
57  *              Craig Metz      :       SIOCGIFCONF fix if space for under
58  *                                      1 device.
59  *          Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
60  *                                      is no device open function.
61  *              Andi Kleen      :       Fix error reporting for SIOCGIFCONF
62  *          Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
63  *              Cyrus Durgin    :       Cleaned for KMOD
64  *              Adam Sulmicki   :       Bug Fix : Network Device Unload
65  *                                      A network device unload needs to purge
66  *                                      the backlog queue.
67  *      Paul Rusty Russell      :       SIOCSIFNAME
68  *              Pekka Riikonen  :       Netdev boot-time settings code
69  *              Andrew Morton   :       Make unregister_netdevice wait
70  *                                      indefinitely on dev->refcnt
71  *              J Hadi Salim    :       - Backlog queue sampling
72  *                                      - netif_rx() feedback
73  */
74
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
86 #include <linux/mm.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
101 #include <net/dst.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
120 #include <net/ip.h>
121 #include <linux/ipv6.h>
122 #include <linux/in.h>
123 #include <linux/jhash.h>
124 #include <linux/random.h>
125 #include <trace/events/napi.h>
126 #include <trace/events/net.h>
127 #include <trace/events/skb.h>
128 #include <linux/pci.h>
129 #include <linux/inetdevice.h>
130 #include <linux/cpu_rmap.h>
131 #include <linux/static_key.h>
132 #include <linux/hashtable.h>
133 #include <linux/vmalloc.h>
134 #include <linux/if_macvlan.h>
135 #include <linux/errqueue.h>
136
137 #include "net-sysfs.h"
138
139 /* Instead of increasing this, you should create a hash table. */
140 #define MAX_GRO_SKBS 8
141
142 /* This should be increased if a protocol with a bigger head is added. */
143 #define GRO_MAX_HEAD (MAX_HEADER + 128)
144
145 static DEFINE_SPINLOCK(ptype_lock);
146 static DEFINE_SPINLOCK(offload_lock);
147 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
148 struct list_head ptype_all __read_mostly;       /* Taps */
149 static struct list_head offload_base __read_mostly;
150
151 static int netif_rx_internal(struct sk_buff *skb);
152 static int call_netdevice_notifiers_info(unsigned long val,
153                                          struct net_device *dev,
154                                          struct netdev_notifier_info *info);
155
156 /*
157  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
158  * semaphore.
159  *
160  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
161  *
162  * Writers must hold the rtnl semaphore while they loop through the
163  * dev_base_head list, and hold dev_base_lock for writing when they do the
164  * actual updates.  This allows pure readers to access the list even
165  * while a writer is preparing to update it.
166  *
167  * To put it another way, dev_base_lock is held for writing only to
168  * protect against pure readers; the rtnl semaphore provides the
169  * protection against other writers.
170  *
171  * See, for example usages, register_netdevice() and
172  * unregister_netdevice(), which must be called with the rtnl
173  * semaphore held.
174  */
175 DEFINE_RWLOCK(dev_base_lock);
176 EXPORT_SYMBOL(dev_base_lock);
177
178 /* protects napi_hash addition/deletion and napi_gen_id */
179 static DEFINE_SPINLOCK(napi_hash_lock);
180
181 static unsigned int napi_gen_id;
182 static DEFINE_HASHTABLE(napi_hash, 8);
183
184 static seqcount_t devnet_rename_seq;
185
186 static inline void dev_base_seq_inc(struct net *net)
187 {
188         while (++net->dev_base_seq == 0);
189 }
190
191 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
192 {
193         unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
194
195         return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
196 }
197
198 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
199 {
200         return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
201 }
202
203 static inline void rps_lock(struct softnet_data *sd)
204 {
205 #ifdef CONFIG_RPS
206         spin_lock(&sd->input_pkt_queue.lock);
207 #endif
208 }
209
210 static inline void rps_unlock(struct softnet_data *sd)
211 {
212 #ifdef CONFIG_RPS
213         spin_unlock(&sd->input_pkt_queue.lock);
214 #endif
215 }
216
217 /* Device list insertion */
218 static void list_netdevice(struct net_device *dev)
219 {
220         struct net *net = dev_net(dev);
221
222         ASSERT_RTNL();
223
224         write_lock_bh(&dev_base_lock);
225         list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
226         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
227         hlist_add_head_rcu(&dev->index_hlist,
228                            dev_index_hash(net, dev->ifindex));
229         write_unlock_bh(&dev_base_lock);
230
231         dev_base_seq_inc(net);
232 }
233
234 /* Device list removal
235  * caller must respect a RCU grace period before freeing/reusing dev
236  */
237 static void unlist_netdevice(struct net_device *dev)
238 {
239         ASSERT_RTNL();
240
241         /* Unlink dev from the device chain */
242         write_lock_bh(&dev_base_lock);
243         list_del_rcu(&dev->dev_list);
244         hlist_del_rcu(&dev->name_hlist);
245         hlist_del_rcu(&dev->index_hlist);
246         write_unlock_bh(&dev_base_lock);
247
248         dev_base_seq_inc(dev_net(dev));
249 }
250
251 /*
252  *      Our notifier list
253  */
254
255 static RAW_NOTIFIER_HEAD(netdev_chain);
256
257 /*
258  *      Device drivers call our routines to queue packets here. We empty the
259  *      queue in the local softnet handler.
260  */
261
262 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
263 EXPORT_PER_CPU_SYMBOL(softnet_data);
264
265 #ifdef CONFIG_LOCKDEP
266 /*
267  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
268  * according to dev->type
269  */
270 static const unsigned short netdev_lock_type[] =
271         {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
272          ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
273          ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
274          ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
275          ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
276          ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
277          ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
278          ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
279          ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
280          ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
281          ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
282          ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
283          ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
284          ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
285          ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
286
287 static const char *const netdev_lock_name[] =
288         {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
289          "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
290          "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
291          "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
292          "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
293          "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
294          "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
295          "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
296          "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
297          "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
298          "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
299          "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
300          "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
301          "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
302          "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
303
304 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
305 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
306
307 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
308 {
309         int i;
310
311         for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
312                 if (netdev_lock_type[i] == dev_type)
313                         return i;
314         /* the last key is used by default */
315         return ARRAY_SIZE(netdev_lock_type) - 1;
316 }
317
318 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
319                                                  unsigned short dev_type)
320 {
321         int i;
322
323         i = netdev_lock_pos(dev_type);
324         lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
325                                    netdev_lock_name[i]);
326 }
327
328 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
329 {
330         int i;
331
332         i = netdev_lock_pos(dev->type);
333         lockdep_set_class_and_name(&dev->addr_list_lock,
334                                    &netdev_addr_lock_key[i],
335                                    netdev_lock_name[i]);
336 }
337 #else
338 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
339                                                  unsigned short dev_type)
340 {
341 }
342 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
343 {
344 }
345 #endif
346
347 /*******************************************************************************
348
349                 Protocol management and registration routines
350
351 *******************************************************************************/
352
353 /*
354  *      Add a protocol ID to the list. Now that the input handler is
355  *      smarter we can dispense with all the messy stuff that used to be
356  *      here.
357  *
358  *      BEWARE!!! Protocol handlers, mangling input packets,
359  *      MUST BE last in hash buckets and checking protocol handlers
360  *      MUST start from promiscuous ptype_all chain in net_bh.
361  *      It is true now, do not change it.
362  *      Explanation follows: if protocol handler, mangling packet, will
363  *      be the first on list, it is not able to sense, that packet
364  *      is cloned and should be copied-on-write, so that it will
365  *      change it and subsequent readers will get broken packet.
366  *                                                      --ANK (980803)
367  */
368
369 static inline struct list_head *ptype_head(const struct packet_type *pt)
370 {
371         if (pt->type == htons(ETH_P_ALL))
372                 return &ptype_all;
373         else
374                 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
375 }
376
377 /**
378  *      dev_add_pack - add packet handler
379  *      @pt: packet type declaration
380  *
381  *      Add a protocol handler to the networking stack. The passed &packet_type
382  *      is linked into kernel lists and may not be freed until it has been
383  *      removed from the kernel lists.
384  *
385  *      This call does not sleep therefore it can not
386  *      guarantee all CPU's that are in middle of receiving packets
387  *      will see the new packet type (until the next received packet).
388  */
389
390 void dev_add_pack(struct packet_type *pt)
391 {
392         struct list_head *head = ptype_head(pt);
393
394         spin_lock(&ptype_lock);
395         list_add_rcu(&pt->list, head);
396         spin_unlock(&ptype_lock);
397 }
398 EXPORT_SYMBOL(dev_add_pack);
399
400 /**
401  *      __dev_remove_pack        - remove packet handler
402  *      @pt: packet type declaration
403  *
404  *      Remove a protocol handler that was previously added to the kernel
405  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
406  *      from the kernel lists and can be freed or reused once this function
407  *      returns.
408  *
409  *      The packet type might still be in use by receivers
410  *      and must not be freed until after all the CPU's have gone
411  *      through a quiescent state.
412  */
413 void __dev_remove_pack(struct packet_type *pt)
414 {
415         struct list_head *head = ptype_head(pt);
416         struct packet_type *pt1;
417
418         spin_lock(&ptype_lock);
419
420         list_for_each_entry(pt1, head, list) {
421                 if (pt == pt1) {
422                         list_del_rcu(&pt->list);
423                         goto out;
424                 }
425         }
426
427         pr_warn("dev_remove_pack: %p not found\n", pt);
428 out:
429         spin_unlock(&ptype_lock);
430 }
431 EXPORT_SYMBOL(__dev_remove_pack);
432
433 /**
434  *      dev_remove_pack  - remove packet handler
435  *      @pt: packet type declaration
436  *
437  *      Remove a protocol handler that was previously added to the kernel
438  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
439  *      from the kernel lists and can be freed or reused once this function
440  *      returns.
441  *
442  *      This call sleeps to guarantee that no CPU is looking at the packet
443  *      type after return.
444  */
445 void dev_remove_pack(struct packet_type *pt)
446 {
447         __dev_remove_pack(pt);
448
449         synchronize_net();
450 }
451 EXPORT_SYMBOL(dev_remove_pack);
452
453
454 /**
455  *      dev_add_offload - register offload handlers
456  *      @po: protocol offload declaration
457  *
458  *      Add protocol offload handlers to the networking stack. The passed
459  *      &proto_offload is linked into kernel lists and may not be freed until
460  *      it has been removed from the kernel lists.
461  *
462  *      This call does not sleep therefore it can not
463  *      guarantee all CPU's that are in middle of receiving packets
464  *      will see the new offload handlers (until the next received packet).
465  */
466 void dev_add_offload(struct packet_offload *po)
467 {
468         struct list_head *head = &offload_base;
469
470         spin_lock(&offload_lock);
471         list_add_rcu(&po->list, head);
472         spin_unlock(&offload_lock);
473 }
474 EXPORT_SYMBOL(dev_add_offload);
475
476 /**
477  *      __dev_remove_offload     - remove offload handler
478  *      @po: packet offload declaration
479  *
480  *      Remove a protocol offload handler that was previously added to the
481  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
482  *      is removed from the kernel lists and can be freed or reused once this
483  *      function returns.
484  *
485  *      The packet type might still be in use by receivers
486  *      and must not be freed until after all the CPU's have gone
487  *      through a quiescent state.
488  */
489 static void __dev_remove_offload(struct packet_offload *po)
490 {
491         struct list_head *head = &offload_base;
492         struct packet_offload *po1;
493
494         spin_lock(&offload_lock);
495
496         list_for_each_entry(po1, head, list) {
497                 if (po == po1) {
498                         list_del_rcu(&po->list);
499                         goto out;
500                 }
501         }
502
503         pr_warn("dev_remove_offload: %p not found\n", po);
504 out:
505         spin_unlock(&offload_lock);
506 }
507
508 /**
509  *      dev_remove_offload       - remove packet offload handler
510  *      @po: packet offload declaration
511  *
512  *      Remove a packet offload handler that was previously added to the kernel
513  *      offload handlers by dev_add_offload(). The passed &offload_type is
514  *      removed from the kernel lists and can be freed or reused once this
515  *      function returns.
516  *
517  *      This call sleeps to guarantee that no CPU is looking at the packet
518  *      type after return.
519  */
520 void dev_remove_offload(struct packet_offload *po)
521 {
522         __dev_remove_offload(po);
523
524         synchronize_net();
525 }
526 EXPORT_SYMBOL(dev_remove_offload);
527
528 /******************************************************************************
529
530                       Device Boot-time Settings Routines
531
532 *******************************************************************************/
533
534 /* Boot time configuration table */
535 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
536
537 /**
538  *      netdev_boot_setup_add   - add new setup entry
539  *      @name: name of the device
540  *      @map: configured settings for the device
541  *
542  *      Adds new setup entry to the dev_boot_setup list.  The function
543  *      returns 0 on error and 1 on success.  This is a generic routine to
544  *      all netdevices.
545  */
546 static int netdev_boot_setup_add(char *name, struct ifmap *map)
547 {
548         struct netdev_boot_setup *s;
549         int i;
550
551         s = dev_boot_setup;
552         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
553                 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
554                         memset(s[i].name, 0, sizeof(s[i].name));
555                         strlcpy(s[i].name, name, IFNAMSIZ);
556                         memcpy(&s[i].map, map, sizeof(s[i].map));
557                         break;
558                 }
559         }
560
561         return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
562 }
563
564 /**
565  *      netdev_boot_setup_check - check boot time settings
566  *      @dev: the netdevice
567  *
568  *      Check boot time settings for the device.
569  *      The found settings are set for the device to be used
570  *      later in the device probing.
571  *      Returns 0 if no settings found, 1 if they are.
572  */
573 int netdev_boot_setup_check(struct net_device *dev)
574 {
575         struct netdev_boot_setup *s = dev_boot_setup;
576         int i;
577
578         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
579                 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
580                     !strcmp(dev->name, s[i].name)) {
581                         dev->irq        = s[i].map.irq;
582                         dev->base_addr  = s[i].map.base_addr;
583                         dev->mem_start  = s[i].map.mem_start;
584                         dev->mem_end    = s[i].map.mem_end;
585                         return 1;
586                 }
587         }
588         return 0;
589 }
590 EXPORT_SYMBOL(netdev_boot_setup_check);
591
592
593 /**
594  *      netdev_boot_base        - get address from boot time settings
595  *      @prefix: prefix for network device
596  *      @unit: id for network device
597  *
598  *      Check boot time settings for the base address of device.
599  *      The found settings are set for the device to be used
600  *      later in the device probing.
601  *      Returns 0 if no settings found.
602  */
603 unsigned long netdev_boot_base(const char *prefix, int unit)
604 {
605         const struct netdev_boot_setup *s = dev_boot_setup;
606         char name[IFNAMSIZ];
607         int i;
608
609         sprintf(name, "%s%d", prefix, unit);
610
611         /*
612          * If device already registered then return base of 1
613          * to indicate not to probe for this interface
614          */
615         if (__dev_get_by_name(&init_net, name))
616                 return 1;
617
618         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
619                 if (!strcmp(name, s[i].name))
620                         return s[i].map.base_addr;
621         return 0;
622 }
623
624 /*
625  * Saves at boot time configured settings for any netdevice.
626  */
627 int __init netdev_boot_setup(char *str)
628 {
629         int ints[5];
630         struct ifmap map;
631
632         str = get_options(str, ARRAY_SIZE(ints), ints);
633         if (!str || !*str)
634                 return 0;
635
636         /* Save settings */
637         memset(&map, 0, sizeof(map));
638         if (ints[0] > 0)
639                 map.irq = ints[1];
640         if (ints[0] > 1)
641                 map.base_addr = ints[2];
642         if (ints[0] > 2)
643                 map.mem_start = ints[3];
644         if (ints[0] > 3)
645                 map.mem_end = ints[4];
646
647         /* Add new entry to the list */
648         return netdev_boot_setup_add(str, &map);
649 }
650
651 __setup("netdev=", netdev_boot_setup);
652
653 /*******************************************************************************
654
655                             Device Interface Subroutines
656
657 *******************************************************************************/
658
659 /**
660  *      __dev_get_by_name       - find a device by its name
661  *      @net: the applicable net namespace
662  *      @name: name to find
663  *
664  *      Find an interface by name. Must be called under RTNL semaphore
665  *      or @dev_base_lock. If the name is found a pointer to the device
666  *      is returned. If the name is not found then %NULL is returned. The
667  *      reference counters are not incremented so the caller must be
668  *      careful with locks.
669  */
670
671 struct net_device *__dev_get_by_name(struct net *net, const char *name)
672 {
673         struct net_device *dev;
674         struct hlist_head *head = dev_name_hash(net, name);
675
676         hlist_for_each_entry(dev, head, name_hlist)
677                 if (!strncmp(dev->name, name, IFNAMSIZ))
678                         return dev;
679
680         return NULL;
681 }
682 EXPORT_SYMBOL(__dev_get_by_name);
683
684 /**
685  *      dev_get_by_name_rcu     - find a device by its name
686  *      @net: the applicable net namespace
687  *      @name: name to find
688  *
689  *      Find an interface by name.
690  *      If the name is found a pointer to the device is returned.
691  *      If the name is not found then %NULL is returned.
692  *      The reference counters are not incremented so the caller must be
693  *      careful with locks. The caller must hold RCU lock.
694  */
695
696 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
697 {
698         struct net_device *dev;
699         struct hlist_head *head = dev_name_hash(net, name);
700
701         hlist_for_each_entry_rcu(dev, head, name_hlist)
702                 if (!strncmp(dev->name, name, IFNAMSIZ))
703                         return dev;
704
705         return NULL;
706 }
707 EXPORT_SYMBOL(dev_get_by_name_rcu);
708
709 /**
710  *      dev_get_by_name         - find a device by its name
711  *      @net: the applicable net namespace
712  *      @name: name to find
713  *
714  *      Find an interface by name. This can be called from any
715  *      context and does its own locking. The returned handle has
716  *      the usage count incremented and the caller must use dev_put() to
717  *      release it when it is no longer needed. %NULL is returned if no
718  *      matching device is found.
719  */
720
721 struct net_device *dev_get_by_name(struct net *net, const char *name)
722 {
723         struct net_device *dev;
724
725         rcu_read_lock();
726         dev = dev_get_by_name_rcu(net, name);
727         if (dev)
728                 dev_hold(dev);
729         rcu_read_unlock();
730         return dev;
731 }
732 EXPORT_SYMBOL(dev_get_by_name);
733
734 /**
735  *      __dev_get_by_index - find a device by its ifindex
736  *      @net: the applicable net namespace
737  *      @ifindex: index of device
738  *
739  *      Search for an interface by index. Returns %NULL if the device
740  *      is not found or a pointer to the device. The device has not
741  *      had its reference counter increased so the caller must be careful
742  *      about locking. The caller must hold either the RTNL semaphore
743  *      or @dev_base_lock.
744  */
745
746 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
747 {
748         struct net_device *dev;
749         struct hlist_head *head = dev_index_hash(net, ifindex);
750
751         hlist_for_each_entry(dev, head, index_hlist)
752                 if (dev->ifindex == ifindex)
753                         return dev;
754
755         return NULL;
756 }
757 EXPORT_SYMBOL(__dev_get_by_index);
758
759 /**
760  *      dev_get_by_index_rcu - find a device by its ifindex
761  *      @net: the applicable net namespace
762  *      @ifindex: index of device
763  *
764  *      Search for an interface by index. Returns %NULL if the device
765  *      is not found or a pointer to the device. The device has not
766  *      had its reference counter increased so the caller must be careful
767  *      about locking. The caller must hold RCU lock.
768  */
769
770 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
771 {
772         struct net_device *dev;
773         struct hlist_head *head = dev_index_hash(net, ifindex);
774
775         hlist_for_each_entry_rcu(dev, head, index_hlist)
776                 if (dev->ifindex == ifindex)
777                         return dev;
778
779         return NULL;
780 }
781 EXPORT_SYMBOL(dev_get_by_index_rcu);
782
783
784 /**
785  *      dev_get_by_index - find a device by its ifindex
786  *      @net: the applicable net namespace
787  *      @ifindex: index of device
788  *
789  *      Search for an interface by index. Returns NULL if the device
790  *      is not found or a pointer to the device. The device returned has
791  *      had a reference added and the pointer is safe until the user calls
792  *      dev_put to indicate they have finished with it.
793  */
794
795 struct net_device *dev_get_by_index(struct net *net, int ifindex)
796 {
797         struct net_device *dev;
798
799         rcu_read_lock();
800         dev = dev_get_by_index_rcu(net, ifindex);
801         if (dev)
802                 dev_hold(dev);
803         rcu_read_unlock();
804         return dev;
805 }
806 EXPORT_SYMBOL(dev_get_by_index);
807
808 /**
809  *      netdev_get_name - get a netdevice name, knowing its ifindex.
810  *      @net: network namespace
811  *      @name: a pointer to the buffer where the name will be stored.
812  *      @ifindex: the ifindex of the interface to get the name from.
813  *
814  *      The use of raw_seqcount_begin() and cond_resched() before
815  *      retrying is required as we want to give the writers a chance
816  *      to complete when CONFIG_PREEMPT is not set.
817  */
818 int netdev_get_name(struct net *net, char *name, int ifindex)
819 {
820         struct net_device *dev;
821         unsigned int seq;
822
823 retry:
824         seq = raw_seqcount_begin(&devnet_rename_seq);
825         rcu_read_lock();
826         dev = dev_get_by_index_rcu(net, ifindex);
827         if (!dev) {
828                 rcu_read_unlock();
829                 return -ENODEV;
830         }
831
832         strcpy(name, dev->name);
833         rcu_read_unlock();
834         if (read_seqcount_retry(&devnet_rename_seq, seq)) {
835                 cond_resched();
836                 goto retry;
837         }
838
839         return 0;
840 }
841
842 /**
843  *      dev_getbyhwaddr_rcu - find a device by its hardware address
844  *      @net: the applicable net namespace
845  *      @type: media type of device
846  *      @ha: hardware address
847  *
848  *      Search for an interface by MAC address. Returns NULL if the device
849  *      is not found or a pointer to the device.
850  *      The caller must hold RCU or RTNL.
851  *      The returned device has not had its ref count increased
852  *      and the caller must therefore be careful about locking
853  *
854  */
855
856 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
857                                        const char *ha)
858 {
859         struct net_device *dev;
860
861         for_each_netdev_rcu(net, dev)
862                 if (dev->type == type &&
863                     !memcmp(dev->dev_addr, ha, dev->addr_len))
864                         return dev;
865
866         return NULL;
867 }
868 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
869
870 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
871 {
872         struct net_device *dev;
873
874         ASSERT_RTNL();
875         for_each_netdev(net, dev)
876                 if (dev->type == type)
877                         return dev;
878
879         return NULL;
880 }
881 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
882
883 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
884 {
885         struct net_device *dev, *ret = NULL;
886
887         rcu_read_lock();
888         for_each_netdev_rcu(net, dev)
889                 if (dev->type == type) {
890                         dev_hold(dev);
891                         ret = dev;
892                         break;
893                 }
894         rcu_read_unlock();
895         return ret;
896 }
897 EXPORT_SYMBOL(dev_getfirstbyhwtype);
898
899 /**
900  *      __dev_get_by_flags - find any device with given flags
901  *      @net: the applicable net namespace
902  *      @if_flags: IFF_* values
903  *      @mask: bitmask of bits in if_flags to check
904  *
905  *      Search for any interface with the given flags. Returns NULL if a device
906  *      is not found or a pointer to the device. Must be called inside
907  *      rtnl_lock(), and result refcount is unchanged.
908  */
909
910 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
911                                       unsigned short mask)
912 {
913         struct net_device *dev, *ret;
914
915         ASSERT_RTNL();
916
917         ret = NULL;
918         for_each_netdev(net, dev) {
919                 if (((dev->flags ^ if_flags) & mask) == 0) {
920                         ret = dev;
921                         break;
922                 }
923         }
924         return ret;
925 }
926 EXPORT_SYMBOL(__dev_get_by_flags);
927
928 /**
929  *      dev_valid_name - check if name is okay for network device
930  *      @name: name string
931  *
932  *      Network device names need to be valid file names to
933  *      to allow sysfs to work.  We also disallow any kind of
934  *      whitespace.
935  */
936 bool dev_valid_name(const char *name)
937 {
938         if (*name == '\0')
939                 return false;
940         if (strlen(name) >= IFNAMSIZ)
941                 return false;
942         if (!strcmp(name, ".") || !strcmp(name, ".."))
943                 return false;
944
945         while (*name) {
946                 if (*name == '/' || isspace(*name))
947                         return false;
948                 name++;
949         }
950         return true;
951 }
952 EXPORT_SYMBOL(dev_valid_name);
953
954 /**
955  *      __dev_alloc_name - allocate a name for a device
956  *      @net: network namespace to allocate the device name in
957  *      @name: name format string
958  *      @buf:  scratch buffer and result name string
959  *
960  *      Passed a format string - eg "lt%d" it will try and find a suitable
961  *      id. It scans list of devices to build up a free map, then chooses
962  *      the first empty slot. The caller must hold the dev_base or rtnl lock
963  *      while allocating the name and adding the device in order to avoid
964  *      duplicates.
965  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
966  *      Returns the number of the unit assigned or a negative errno code.
967  */
968
969 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
970 {
971         int i = 0;
972         const char *p;
973         const int max_netdevices = 8*PAGE_SIZE;
974         unsigned long *inuse;
975         struct net_device *d;
976
977         p = strnchr(name, IFNAMSIZ-1, '%');
978         if (p) {
979                 /*
980                  * Verify the string as this thing may have come from
981                  * the user.  There must be either one "%d" and no other "%"
982                  * characters.
983                  */
984                 if (p[1] != 'd' || strchr(p + 2, '%'))
985                         return -EINVAL;
986
987                 /* Use one page as a bit array of possible slots */
988                 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
989                 if (!inuse)
990                         return -ENOMEM;
991
992                 for_each_netdev(net, d) {
993                         if (!sscanf(d->name, name, &i))
994                                 continue;
995                         if (i < 0 || i >= max_netdevices)
996                                 continue;
997
998                         /*  avoid cases where sscanf is not exact inverse of printf */
999                         snprintf(buf, IFNAMSIZ, name, i);
1000                         if (!strncmp(buf, d->name, IFNAMSIZ))
1001                                 set_bit(i, inuse);
1002                 }
1003
1004                 i = find_first_zero_bit(inuse, max_netdevices);
1005                 free_page((unsigned long) inuse);
1006         }
1007
1008         if (buf != name)
1009                 snprintf(buf, IFNAMSIZ, name, i);
1010         if (!__dev_get_by_name(net, buf))
1011                 return i;
1012
1013         /* It is possible to run out of possible slots
1014          * when the name is long and there isn't enough space left
1015          * for the digits, or if all bits are used.
1016          */
1017         return -ENFILE;
1018 }
1019
1020 /**
1021  *      dev_alloc_name - allocate a name for a device
1022  *      @dev: device
1023  *      @name: name format string
1024  *
1025  *      Passed a format string - eg "lt%d" it will try and find a suitable
1026  *      id. It scans list of devices to build up a free map, then chooses
1027  *      the first empty slot. The caller must hold the dev_base or rtnl lock
1028  *      while allocating the name and adding the device in order to avoid
1029  *      duplicates.
1030  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1031  *      Returns the number of the unit assigned or a negative errno code.
1032  */
1033
1034 int dev_alloc_name(struct net_device *dev, const char *name)
1035 {
1036         char buf[IFNAMSIZ];
1037         struct net *net;
1038         int ret;
1039
1040         BUG_ON(!dev_net(dev));
1041         net = dev_net(dev);
1042         ret = __dev_alloc_name(net, name, buf);
1043         if (ret >= 0)
1044                 strlcpy(dev->name, buf, IFNAMSIZ);
1045         return ret;
1046 }
1047 EXPORT_SYMBOL(dev_alloc_name);
1048
1049 static int dev_alloc_name_ns(struct net *net,
1050                              struct net_device *dev,
1051                              const char *name)
1052 {
1053         char buf[IFNAMSIZ];
1054         int ret;
1055
1056         ret = __dev_alloc_name(net, name, buf);
1057         if (ret >= 0)
1058                 strlcpy(dev->name, buf, IFNAMSIZ);
1059         return ret;
1060 }
1061
1062 static int dev_get_valid_name(struct net *net,
1063                               struct net_device *dev,
1064                               const char *name)
1065 {
1066         BUG_ON(!net);
1067
1068         if (!dev_valid_name(name))
1069                 return -EINVAL;
1070
1071         if (strchr(name, '%'))
1072                 return dev_alloc_name_ns(net, dev, name);
1073         else if (__dev_get_by_name(net, name))
1074                 return -EEXIST;
1075         else if (dev->name != name)
1076                 strlcpy(dev->name, name, IFNAMSIZ);
1077
1078         return 0;
1079 }
1080
1081 /**
1082  *      dev_change_name - change name of a device
1083  *      @dev: device
1084  *      @newname: name (or format string) must be at least IFNAMSIZ
1085  *
1086  *      Change name of a device, can pass format strings "eth%d".
1087  *      for wildcarding.
1088  */
1089 int dev_change_name(struct net_device *dev, const char *newname)
1090 {
1091         unsigned char old_assign_type;
1092         char oldname[IFNAMSIZ];
1093         int err = 0;
1094         int ret;
1095         struct net *net;
1096
1097         ASSERT_RTNL();
1098         BUG_ON(!dev_net(dev));
1099
1100         net = dev_net(dev);
1101         if (dev->flags & IFF_UP)
1102                 return -EBUSY;
1103
1104         write_seqcount_begin(&devnet_rename_seq);
1105
1106         if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1107                 write_seqcount_end(&devnet_rename_seq);
1108                 return 0;
1109         }
1110
1111         memcpy(oldname, dev->name, IFNAMSIZ);
1112
1113         err = dev_get_valid_name(net, dev, newname);
1114         if (err < 0) {
1115                 write_seqcount_end(&devnet_rename_seq);
1116                 return err;
1117         }
1118
1119         if (oldname[0] && !strchr(oldname, '%'))
1120                 netdev_info(dev, "renamed from %s\n", oldname);
1121
1122         old_assign_type = dev->name_assign_type;
1123         dev->name_assign_type = NET_NAME_RENAMED;
1124
1125 rollback:
1126         ret = device_rename(&dev->dev, dev->name);
1127         if (ret) {
1128                 memcpy(dev->name, oldname, IFNAMSIZ);
1129                 dev->name_assign_type = old_assign_type;
1130                 write_seqcount_end(&devnet_rename_seq);
1131                 return ret;
1132         }
1133
1134         write_seqcount_end(&devnet_rename_seq);
1135
1136         netdev_adjacent_rename_links(dev, oldname);
1137
1138         write_lock_bh(&dev_base_lock);
1139         hlist_del_rcu(&dev->name_hlist);
1140         write_unlock_bh(&dev_base_lock);
1141
1142         synchronize_rcu();
1143
1144         write_lock_bh(&dev_base_lock);
1145         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1146         write_unlock_bh(&dev_base_lock);
1147
1148         ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1149         ret = notifier_to_errno(ret);
1150
1151         if (ret) {
1152                 /* err >= 0 after dev_alloc_name() or stores the first errno */
1153                 if (err >= 0) {
1154                         err = ret;
1155                         write_seqcount_begin(&devnet_rename_seq);
1156                         memcpy(dev->name, oldname, IFNAMSIZ);
1157                         memcpy(oldname, newname, IFNAMSIZ);
1158                         dev->name_assign_type = old_assign_type;
1159                         old_assign_type = NET_NAME_RENAMED;
1160                         goto rollback;
1161                 } else {
1162                         pr_err("%s: name change rollback failed: %d\n",
1163                                dev->name, ret);
1164                 }
1165         }
1166
1167         return err;
1168 }
1169
1170 /**
1171  *      dev_set_alias - change ifalias of a device
1172  *      @dev: device
1173  *      @alias: name up to IFALIASZ
1174  *      @len: limit of bytes to copy from info
1175  *
1176  *      Set ifalias for a device,
1177  */
1178 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1179 {
1180         char *new_ifalias;
1181
1182         ASSERT_RTNL();
1183
1184         if (len >= IFALIASZ)
1185                 return -EINVAL;
1186
1187         if (!len) {
1188                 kfree(dev->ifalias);
1189                 dev->ifalias = NULL;
1190                 return 0;
1191         }
1192
1193         new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1194         if (!new_ifalias)
1195                 return -ENOMEM;
1196         dev->ifalias = new_ifalias;
1197
1198         strlcpy(dev->ifalias, alias, len+1);
1199         return len;
1200 }
1201
1202
1203 /**
1204  *      netdev_features_change - device changes features
1205  *      @dev: device to cause notification
1206  *
1207  *      Called to indicate a device has changed features.
1208  */
1209 void netdev_features_change(struct net_device *dev)
1210 {
1211         call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1212 }
1213 EXPORT_SYMBOL(netdev_features_change);
1214
1215 /**
1216  *      netdev_state_change - device changes state
1217  *      @dev: device to cause notification
1218  *
1219  *      Called to indicate a device has changed state. This function calls
1220  *      the notifier chains for netdev_chain and sends a NEWLINK message
1221  *      to the routing socket.
1222  */
1223 void netdev_state_change(struct net_device *dev)
1224 {
1225         if (dev->flags & IFF_UP) {
1226                 struct netdev_notifier_change_info change_info;
1227
1228                 change_info.flags_changed = 0;
1229                 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1230                                               &change_info.info);
1231                 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1232         }
1233 }
1234 EXPORT_SYMBOL(netdev_state_change);
1235
1236 /**
1237  *      netdev_notify_peers - notify network peers about existence of @dev
1238  *      @dev: network device
1239  *
1240  * Generate traffic such that interested network peers are aware of
1241  * @dev, such as by generating a gratuitous ARP. This may be used when
1242  * a device wants to inform the rest of the network about some sort of
1243  * reconfiguration such as a failover event or virtual machine
1244  * migration.
1245  */
1246 void netdev_notify_peers(struct net_device *dev)
1247 {
1248         rtnl_lock();
1249         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1250         rtnl_unlock();
1251 }
1252 EXPORT_SYMBOL(netdev_notify_peers);
1253
1254 static int __dev_open(struct net_device *dev)
1255 {
1256         const struct net_device_ops *ops = dev->netdev_ops;
1257         int ret;
1258
1259         ASSERT_RTNL();
1260
1261         if (!netif_device_present(dev))
1262                 return -ENODEV;
1263
1264         /* Block netpoll from trying to do any rx path servicing.
1265          * If we don't do this there is a chance ndo_poll_controller
1266          * or ndo_poll may be running while we open the device
1267          */
1268         netpoll_poll_disable(dev);
1269
1270         ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1271         ret = notifier_to_errno(ret);
1272         if (ret)
1273                 return ret;
1274
1275         set_bit(__LINK_STATE_START, &dev->state);
1276
1277         if (ops->ndo_validate_addr)
1278                 ret = ops->ndo_validate_addr(dev);
1279
1280         if (!ret && ops->ndo_open)
1281                 ret = ops->ndo_open(dev);
1282
1283         netpoll_poll_enable(dev);
1284
1285         if (ret)
1286                 clear_bit(__LINK_STATE_START, &dev->state);
1287         else {
1288                 dev->flags |= IFF_UP;
1289                 dev_set_rx_mode(dev);
1290                 dev_activate(dev);
1291                 add_device_randomness(dev->dev_addr, dev->addr_len);
1292         }
1293
1294         return ret;
1295 }
1296
1297 /**
1298  *      dev_open        - prepare an interface for use.
1299  *      @dev:   device to open
1300  *
1301  *      Takes a device from down to up state. The device's private open
1302  *      function is invoked and then the multicast lists are loaded. Finally
1303  *      the device is moved into the up state and a %NETDEV_UP message is
1304  *      sent to the netdev notifier chain.
1305  *
1306  *      Calling this function on an active interface is a nop. On a failure
1307  *      a negative errno code is returned.
1308  */
1309 int dev_open(struct net_device *dev)
1310 {
1311         int ret;
1312
1313         if (dev->flags & IFF_UP)
1314                 return 0;
1315
1316         ret = __dev_open(dev);
1317         if (ret < 0)
1318                 return ret;
1319
1320         rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1321         call_netdevice_notifiers(NETDEV_UP, dev);
1322
1323         return ret;
1324 }
1325 EXPORT_SYMBOL(dev_open);
1326
1327 static int __dev_close_many(struct list_head *head)
1328 {
1329         struct net_device *dev;
1330
1331         ASSERT_RTNL();
1332         might_sleep();
1333
1334         list_for_each_entry(dev, head, close_list) {
1335                 /* Temporarily disable netpoll until the interface is down */
1336                 netpoll_poll_disable(dev);
1337
1338                 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1339
1340                 clear_bit(__LINK_STATE_START, &dev->state);
1341
1342                 /* Synchronize to scheduled poll. We cannot touch poll list, it
1343                  * can be even on different cpu. So just clear netif_running().
1344                  *
1345                  * dev->stop() will invoke napi_disable() on all of it's
1346                  * napi_struct instances on this device.
1347                  */
1348                 smp_mb__after_atomic(); /* Commit netif_running(). */
1349         }
1350
1351         dev_deactivate_many(head);
1352
1353         list_for_each_entry(dev, head, close_list) {
1354                 const struct net_device_ops *ops = dev->netdev_ops;
1355
1356                 /*
1357                  *      Call the device specific close. This cannot fail.
1358                  *      Only if device is UP
1359                  *
1360                  *      We allow it to be called even after a DETACH hot-plug
1361                  *      event.
1362                  */
1363                 if (ops->ndo_stop)
1364                         ops->ndo_stop(dev);
1365
1366                 dev->flags &= ~IFF_UP;
1367                 netpoll_poll_enable(dev);
1368         }
1369
1370         return 0;
1371 }
1372
1373 static int __dev_close(struct net_device *dev)
1374 {
1375         int retval;
1376         LIST_HEAD(single);
1377
1378         list_add(&dev->close_list, &single);
1379         retval = __dev_close_many(&single);
1380         list_del(&single);
1381
1382         return retval;
1383 }
1384
1385 static int dev_close_many(struct list_head *head)
1386 {
1387         struct net_device *dev, *tmp;
1388
1389         /* Remove the devices that don't need to be closed */
1390         list_for_each_entry_safe(dev, tmp, head, close_list)
1391                 if (!(dev->flags & IFF_UP))
1392                         list_del_init(&dev->close_list);
1393
1394         __dev_close_many(head);
1395
1396         list_for_each_entry_safe(dev, tmp, head, close_list) {
1397                 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1398                 call_netdevice_notifiers(NETDEV_DOWN, dev);
1399                 list_del_init(&dev->close_list);
1400         }
1401
1402         return 0;
1403 }
1404
1405 /**
1406  *      dev_close - shutdown an interface.
1407  *      @dev: device to shutdown
1408  *
1409  *      This function moves an active device into down state. A
1410  *      %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1411  *      is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1412  *      chain.
1413  */
1414 int dev_close(struct net_device *dev)
1415 {
1416         if (dev->flags & IFF_UP) {
1417                 LIST_HEAD(single);
1418
1419                 list_add(&dev->close_list, &single);
1420                 dev_close_many(&single);
1421                 list_del(&single);
1422         }
1423         return 0;
1424 }
1425 EXPORT_SYMBOL(dev_close);
1426
1427
1428 /**
1429  *      dev_disable_lro - disable Large Receive Offload on a device
1430  *      @dev: device
1431  *
1432  *      Disable Large Receive Offload (LRO) on a net device.  Must be
1433  *      called under RTNL.  This is needed if received packets may be
1434  *      forwarded to another interface.
1435  */
1436 void dev_disable_lro(struct net_device *dev)
1437 {
1438         /*
1439          * If we're trying to disable lro on a vlan device
1440          * use the underlying physical device instead
1441          */
1442         if (is_vlan_dev(dev))
1443                 dev = vlan_dev_real_dev(dev);
1444
1445         /* the same for macvlan devices */
1446         if (netif_is_macvlan(dev))
1447                 dev = macvlan_dev_real_dev(dev);
1448
1449         dev->wanted_features &= ~NETIF_F_LRO;
1450         netdev_update_features(dev);
1451
1452         if (unlikely(dev->features & NETIF_F_LRO))
1453                 netdev_WARN(dev, "failed to disable LRO!\n");
1454 }
1455 EXPORT_SYMBOL(dev_disable_lro);
1456
1457 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1458                                    struct net_device *dev)
1459 {
1460         struct netdev_notifier_info info;
1461
1462         netdev_notifier_info_init(&info, dev);
1463         return nb->notifier_call(nb, val, &info);
1464 }
1465
1466 static int dev_boot_phase = 1;
1467
1468 /**
1469  *      register_netdevice_notifier - register a network notifier block
1470  *      @nb: notifier
1471  *
1472  *      Register a notifier to be called when network device events occur.
1473  *      The notifier passed is linked into the kernel structures and must
1474  *      not be reused until it has been unregistered. A negative errno code
1475  *      is returned on a failure.
1476  *
1477  *      When registered all registration and up events are replayed
1478  *      to the new notifier to allow device to have a race free
1479  *      view of the network device list.
1480  */
1481
1482 int register_netdevice_notifier(struct notifier_block *nb)
1483 {
1484         struct net_device *dev;
1485         struct net_device *last;
1486         struct net *net;
1487         int err;
1488
1489         rtnl_lock();
1490         err = raw_notifier_chain_register(&netdev_chain, nb);
1491         if (err)
1492                 goto unlock;
1493         if (dev_boot_phase)
1494                 goto unlock;
1495         for_each_net(net) {
1496                 for_each_netdev(net, dev) {
1497                         err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1498                         err = notifier_to_errno(err);
1499                         if (err)
1500                                 goto rollback;
1501
1502                         if (!(dev->flags & IFF_UP))
1503                                 continue;
1504
1505                         call_netdevice_notifier(nb, NETDEV_UP, dev);
1506                 }
1507         }
1508
1509 unlock:
1510         rtnl_unlock();
1511         return err;
1512
1513 rollback:
1514         last = dev;
1515         for_each_net(net) {
1516                 for_each_netdev(net, dev) {
1517                         if (dev == last)
1518                                 goto outroll;
1519
1520                         if (dev->flags & IFF_UP) {
1521                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1522                                                         dev);
1523                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1524                         }
1525                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1526                 }
1527         }
1528
1529 outroll:
1530         raw_notifier_chain_unregister(&netdev_chain, nb);
1531         goto unlock;
1532 }
1533 EXPORT_SYMBOL(register_netdevice_notifier);
1534
1535 /**
1536  *      unregister_netdevice_notifier - unregister a network notifier block
1537  *      @nb: notifier
1538  *
1539  *      Unregister a notifier previously registered by
1540  *      register_netdevice_notifier(). The notifier is unlinked into the
1541  *      kernel structures and may then be reused. A negative errno code
1542  *      is returned on a failure.
1543  *
1544  *      After unregistering unregister and down device events are synthesized
1545  *      for all devices on the device list to the removed notifier to remove
1546  *      the need for special case cleanup code.
1547  */
1548
1549 int unregister_netdevice_notifier(struct notifier_block *nb)
1550 {
1551         struct net_device *dev;
1552         struct net *net;
1553         int err;
1554
1555         rtnl_lock();
1556         err = raw_notifier_chain_unregister(&netdev_chain, nb);
1557         if (err)
1558                 goto unlock;
1559
1560         for_each_net(net) {
1561                 for_each_netdev(net, dev) {
1562                         if (dev->flags & IFF_UP) {
1563                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1564                                                         dev);
1565                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1566                         }
1567                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1568                 }
1569         }
1570 unlock:
1571         rtnl_unlock();
1572         return err;
1573 }
1574 EXPORT_SYMBOL(unregister_netdevice_notifier);
1575
1576 /**
1577  *      call_netdevice_notifiers_info - call all network notifier blocks
1578  *      @val: value passed unmodified to notifier function
1579  *      @dev: net_device pointer passed unmodified to notifier function
1580  *      @info: notifier information data
1581  *
1582  *      Call all network notifier blocks.  Parameters and return value
1583  *      are as for raw_notifier_call_chain().
1584  */
1585
1586 static int call_netdevice_notifiers_info(unsigned long val,
1587                                          struct net_device *dev,
1588                                          struct netdev_notifier_info *info)
1589 {
1590         ASSERT_RTNL();
1591         netdev_notifier_info_init(info, dev);
1592         return raw_notifier_call_chain(&netdev_chain, val, info);
1593 }
1594
1595 /**
1596  *      call_netdevice_notifiers - call all network notifier blocks
1597  *      @val: value passed unmodified to notifier function
1598  *      @dev: net_device pointer passed unmodified to notifier function
1599  *
1600  *      Call all network notifier blocks.  Parameters and return value
1601  *      are as for raw_notifier_call_chain().
1602  */
1603
1604 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1605 {
1606         struct netdev_notifier_info info;
1607
1608         return call_netdevice_notifiers_info(val, dev, &info);
1609 }
1610 EXPORT_SYMBOL(call_netdevice_notifiers);
1611
1612 static struct static_key netstamp_needed __read_mostly;
1613 #ifdef HAVE_JUMP_LABEL
1614 /* We are not allowed to call static_key_slow_dec() from irq context
1615  * If net_disable_timestamp() is called from irq context, defer the
1616  * static_key_slow_dec() calls.
1617  */
1618 static atomic_t netstamp_needed_deferred;
1619 #endif
1620
1621 void net_enable_timestamp(void)
1622 {
1623 #ifdef HAVE_JUMP_LABEL
1624         int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1625
1626         if (deferred) {
1627                 while (--deferred)
1628                         static_key_slow_dec(&netstamp_needed);
1629                 return;
1630         }
1631 #endif
1632         static_key_slow_inc(&netstamp_needed);
1633 }
1634 EXPORT_SYMBOL(net_enable_timestamp);
1635
1636 void net_disable_timestamp(void)
1637 {
1638 #ifdef HAVE_JUMP_LABEL
1639         if (in_interrupt()) {
1640                 atomic_inc(&netstamp_needed_deferred);
1641                 return;
1642         }
1643 #endif
1644         static_key_slow_dec(&netstamp_needed);
1645 }
1646 EXPORT_SYMBOL(net_disable_timestamp);
1647
1648 static inline void net_timestamp_set(struct sk_buff *skb)
1649 {
1650         skb->tstamp.tv64 = 0;
1651         if (static_key_false(&netstamp_needed))
1652                 __net_timestamp(skb);
1653 }
1654
1655 #define net_timestamp_check(COND, SKB)                  \
1656         if (static_key_false(&netstamp_needed)) {               \
1657                 if ((COND) && !(SKB)->tstamp.tv64)      \
1658                         __net_timestamp(SKB);           \
1659         }                                               \
1660
1661 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1662 {
1663         unsigned int len;
1664
1665         if (!(dev->flags & IFF_UP))
1666                 return false;
1667
1668         len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1669         if (skb->len <= len)
1670                 return true;
1671
1672         /* if TSO is enabled, we don't care about the length as the packet
1673          * could be forwarded without being segmented before
1674          */
1675         if (skb_is_gso(skb))
1676                 return true;
1677
1678         return false;
1679 }
1680 EXPORT_SYMBOL_GPL(is_skb_forwardable);
1681
1682 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1683 {
1684         if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1685                 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1686                         atomic_long_inc(&dev->rx_dropped);
1687                         kfree_skb(skb);
1688                         return NET_RX_DROP;
1689                 }
1690         }
1691
1692         if (unlikely(!is_skb_forwardable(dev, skb))) {
1693                 atomic_long_inc(&dev->rx_dropped);
1694                 kfree_skb(skb);
1695                 return NET_RX_DROP;
1696         }
1697
1698         skb_scrub_packet(skb, true);
1699         skb->protocol = eth_type_trans(skb, dev);
1700
1701         return 0;
1702 }
1703 EXPORT_SYMBOL_GPL(__dev_forward_skb);
1704
1705 /**
1706  * dev_forward_skb - loopback an skb to another netif
1707  *
1708  * @dev: destination network device
1709  * @skb: buffer to forward
1710  *
1711  * return values:
1712  *      NET_RX_SUCCESS  (no congestion)
1713  *      NET_RX_DROP     (packet was dropped, but freed)
1714  *
1715  * dev_forward_skb can be used for injecting an skb from the
1716  * start_xmit function of one device into the receive queue
1717  * of another device.
1718  *
1719  * The receiving device may be in another namespace, so
1720  * we have to clear all information in the skb that could
1721  * impact namespace isolation.
1722  */
1723 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1724 {
1725         return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1726 }
1727 EXPORT_SYMBOL_GPL(dev_forward_skb);
1728
1729 static inline int deliver_skb(struct sk_buff *skb,
1730                               struct packet_type *pt_prev,
1731                               struct net_device *orig_dev)
1732 {
1733         if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1734                 return -ENOMEM;
1735         atomic_inc(&skb->users);
1736         return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1737 }
1738
1739 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1740 {
1741         if (!ptype->af_packet_priv || !skb->sk)
1742                 return false;
1743
1744         if (ptype->id_match)
1745                 return ptype->id_match(ptype, skb->sk);
1746         else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1747                 return true;
1748
1749         return false;
1750 }
1751
1752 /*
1753  *      Support routine. Sends outgoing frames to any network
1754  *      taps currently in use.
1755  */
1756
1757 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1758 {
1759         struct packet_type *ptype;
1760         struct sk_buff *skb2 = NULL;
1761         struct packet_type *pt_prev = NULL;
1762
1763         rcu_read_lock();
1764         list_for_each_entry_rcu(ptype, &ptype_all, list) {
1765                 /* Never send packets back to the socket
1766                  * they originated from - MvS (miquels@drinkel.ow.org)
1767                  */
1768                 if ((ptype->dev == dev || !ptype->dev) &&
1769                     (!skb_loop_sk(ptype, skb))) {
1770                         if (pt_prev) {
1771                                 deliver_skb(skb2, pt_prev, skb->dev);
1772                                 pt_prev = ptype;
1773                                 continue;
1774                         }
1775
1776                         skb2 = skb_clone(skb, GFP_ATOMIC);
1777                         if (!skb2)
1778                                 break;
1779
1780                         net_timestamp_set(skb2);
1781
1782                         /* skb->nh should be correctly
1783                            set by sender, so that the second statement is
1784                            just protection against buggy protocols.
1785                          */
1786                         skb_reset_mac_header(skb2);
1787
1788                         if (skb_network_header(skb2) < skb2->data ||
1789                             skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1790                                 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1791                                                      ntohs(skb2->protocol),
1792                                                      dev->name);
1793                                 skb_reset_network_header(skb2);
1794                         }
1795
1796                         skb2->transport_header = skb2->network_header;
1797                         skb2->pkt_type = PACKET_OUTGOING;
1798                         pt_prev = ptype;
1799                 }
1800         }
1801         if (pt_prev)
1802                 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1803         rcu_read_unlock();
1804 }
1805
1806 /**
1807  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1808  * @dev: Network device
1809  * @txq: number of queues available
1810  *
1811  * If real_num_tx_queues is changed the tc mappings may no longer be
1812  * valid. To resolve this verify the tc mapping remains valid and if
1813  * not NULL the mapping. With no priorities mapping to this
1814  * offset/count pair it will no longer be used. In the worst case TC0
1815  * is invalid nothing can be done so disable priority mappings. If is
1816  * expected that drivers will fix this mapping if they can before
1817  * calling netif_set_real_num_tx_queues.
1818  */
1819 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1820 {
1821         int i;
1822         struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1823
1824         /* If TC0 is invalidated disable TC mapping */
1825         if (tc->offset + tc->count > txq) {
1826                 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1827                 dev->num_tc = 0;
1828                 return;
1829         }
1830
1831         /* Invalidated prio to tc mappings set to TC0 */
1832         for (i = 1; i < TC_BITMASK + 1; i++) {
1833                 int q = netdev_get_prio_tc_map(dev, i);
1834
1835                 tc = &dev->tc_to_txq[q];
1836                 if (tc->offset + tc->count > txq) {
1837                         pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1838                                 i, q);
1839                         netdev_set_prio_tc_map(dev, i, 0);
1840                 }
1841         }
1842 }
1843
1844 #ifdef CONFIG_XPS
1845 static DEFINE_MUTEX(xps_map_mutex);
1846 #define xmap_dereference(P)             \
1847         rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1848
1849 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1850                                         int cpu, u16 index)
1851 {
1852         struct xps_map *map = NULL;
1853         int pos;
1854
1855         if (dev_maps)
1856                 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1857
1858         for (pos = 0; map && pos < map->len; pos++) {
1859                 if (map->queues[pos] == index) {
1860                         if (map->len > 1) {
1861                                 map->queues[pos] = map->queues[--map->len];
1862                         } else {
1863                                 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1864                                 kfree_rcu(map, rcu);
1865                                 map = NULL;
1866                         }
1867                         break;
1868                 }
1869         }
1870
1871         return map;
1872 }
1873
1874 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1875 {
1876         struct xps_dev_maps *dev_maps;
1877         int cpu, i;
1878         bool active = false;
1879
1880         mutex_lock(&xps_map_mutex);
1881         dev_maps = xmap_dereference(dev->xps_maps);
1882
1883         if (!dev_maps)
1884                 goto out_no_maps;
1885
1886         for_each_possible_cpu(cpu) {
1887                 for (i = index; i < dev->num_tx_queues; i++) {
1888                         if (!remove_xps_queue(dev_maps, cpu, i))
1889                                 break;
1890                 }
1891                 if (i == dev->num_tx_queues)
1892                         active = true;
1893         }
1894
1895         if (!active) {
1896                 RCU_INIT_POINTER(dev->xps_maps, NULL);
1897                 kfree_rcu(dev_maps, rcu);
1898         }
1899
1900         for (i = index; i < dev->num_tx_queues; i++)
1901                 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1902                                              NUMA_NO_NODE);
1903
1904 out_no_maps:
1905         mutex_unlock(&xps_map_mutex);
1906 }
1907
1908 static struct xps_map *expand_xps_map(struct xps_map *map,
1909                                       int cpu, u16 index)
1910 {
1911         struct xps_map *new_map;
1912         int alloc_len = XPS_MIN_MAP_ALLOC;
1913         int i, pos;
1914
1915         for (pos = 0; map && pos < map->len; pos++) {
1916                 if (map->queues[pos] != index)
1917                         continue;
1918                 return map;
1919         }
1920
1921         /* Need to add queue to this CPU's existing map */
1922         if (map) {
1923                 if (pos < map->alloc_len)
1924                         return map;
1925
1926                 alloc_len = map->alloc_len * 2;
1927         }
1928
1929         /* Need to allocate new map to store queue on this CPU's map */
1930         new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1931                                cpu_to_node(cpu));
1932         if (!new_map)
1933                 return NULL;
1934
1935         for (i = 0; i < pos; i++)
1936                 new_map->queues[i] = map->queues[i];
1937         new_map->alloc_len = alloc_len;
1938         new_map->len = pos;
1939
1940         return new_map;
1941 }
1942
1943 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1944                         u16 index)
1945 {
1946         struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
1947         struct xps_map *map, *new_map;
1948         int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
1949         int cpu, numa_node_id = -2;
1950         bool active = false;
1951
1952         mutex_lock(&xps_map_mutex);
1953
1954         dev_maps = xmap_dereference(dev->xps_maps);
1955
1956         /* allocate memory for queue storage */
1957         for_each_online_cpu(cpu) {
1958                 if (!cpumask_test_cpu(cpu, mask))
1959                         continue;
1960
1961                 if (!new_dev_maps)
1962                         new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1963                 if (!new_dev_maps) {
1964                         mutex_unlock(&xps_map_mutex);
1965                         return -ENOMEM;
1966                 }
1967
1968                 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1969                                  NULL;
1970
1971                 map = expand_xps_map(map, cpu, index);
1972                 if (!map)
1973                         goto error;
1974
1975                 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1976         }
1977
1978         if (!new_dev_maps)
1979                 goto out_no_new_maps;
1980
1981         for_each_possible_cpu(cpu) {
1982                 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1983                         /* add queue to CPU maps */
1984                         int pos = 0;
1985
1986                         map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1987                         while ((pos < map->len) && (map->queues[pos] != index))
1988                                 pos++;
1989
1990                         if (pos == map->len)
1991                                 map->queues[map->len++] = index;
1992 #ifdef CONFIG_NUMA
1993                         if (numa_node_id == -2)
1994                                 numa_node_id = cpu_to_node(cpu);
1995                         else if (numa_node_id != cpu_to_node(cpu))
1996                                 numa_node_id = -1;
1997 #endif
1998                 } else if (dev_maps) {
1999                         /* fill in the new device map from the old device map */
2000                         map = xmap_dereference(dev_maps->cpu_map[cpu]);
2001                         RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2002                 }
2003
2004         }
2005
2006         rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2007
2008         /* Cleanup old maps */
2009         if (dev_maps) {
2010                 for_each_possible_cpu(cpu) {
2011                         new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2012                         map = xmap_dereference(dev_maps->cpu_map[cpu]);
2013                         if (map && map != new_map)
2014                                 kfree_rcu(map, rcu);
2015                 }
2016
2017                 kfree_rcu(dev_maps, rcu);
2018         }
2019
2020         dev_maps = new_dev_maps;
2021         active = true;
2022
2023 out_no_new_maps:
2024         /* update Tx queue numa node */
2025         netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2026                                      (numa_node_id >= 0) ? numa_node_id :
2027                                      NUMA_NO_NODE);
2028
2029         if (!dev_maps)
2030                 goto out_no_maps;
2031
2032         /* removes queue from unused CPUs */
2033         for_each_possible_cpu(cpu) {
2034                 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2035                         continue;
2036
2037                 if (remove_xps_queue(dev_maps, cpu, index))
2038                         active = true;
2039         }
2040
2041         /* free map if not active */
2042         if (!active) {
2043                 RCU_INIT_POINTER(dev->xps_maps, NULL);
2044                 kfree_rcu(dev_maps, rcu);
2045         }
2046
2047 out_no_maps:
2048         mutex_unlock(&xps_map_mutex);
2049
2050         return 0;
2051 error:
2052         /* remove any maps that we added */
2053         for_each_possible_cpu(cpu) {
2054                 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2055                 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2056                                  NULL;
2057                 if (new_map && new_map != map)
2058                         kfree(new_map);
2059         }
2060
2061         mutex_unlock(&xps_map_mutex);
2062
2063         kfree(new_dev_maps);
2064         return -ENOMEM;
2065 }
2066 EXPORT_SYMBOL(netif_set_xps_queue);
2067
2068 #endif
2069 /*
2070  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2071  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2072  */
2073 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2074 {
2075         int rc;
2076
2077         if (txq < 1 || txq > dev->num_tx_queues)
2078                 return -EINVAL;
2079
2080         if (dev->reg_state == NETREG_REGISTERED ||
2081             dev->reg_state == NETREG_UNREGISTERING) {
2082                 ASSERT_RTNL();
2083
2084                 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2085                                                   txq);
2086                 if (rc)
2087                         return rc;
2088
2089                 if (dev->num_tc)
2090                         netif_setup_tc(dev, txq);
2091
2092                 if (txq < dev->real_num_tx_queues) {
2093                         qdisc_reset_all_tx_gt(dev, txq);
2094 #ifdef CONFIG_XPS
2095                         netif_reset_xps_queues_gt(dev, txq);
2096 #endif
2097                 }
2098         }
2099
2100         dev->real_num_tx_queues = txq;
2101         return 0;
2102 }
2103 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2104
2105 #ifdef CONFIG_SYSFS
2106 /**
2107  *      netif_set_real_num_rx_queues - set actual number of RX queues used
2108  *      @dev: Network device
2109  *      @rxq: Actual number of RX queues
2110  *
2111  *      This must be called either with the rtnl_lock held or before
2112  *      registration of the net device.  Returns 0 on success, or a
2113  *      negative error code.  If called before registration, it always
2114  *      succeeds.
2115  */
2116 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2117 {
2118         int rc;
2119
2120         if (rxq < 1 || rxq > dev->num_rx_queues)
2121                 return -EINVAL;
2122
2123         if (dev->reg_state == NETREG_REGISTERED) {
2124                 ASSERT_RTNL();
2125
2126                 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2127                                                   rxq);
2128                 if (rc)
2129                         return rc;
2130         }
2131
2132         dev->real_num_rx_queues = rxq;
2133         return 0;
2134 }
2135 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2136 #endif
2137
2138 /**
2139  * netif_get_num_default_rss_queues - default number of RSS queues
2140  *
2141  * This routine should set an upper limit on the number of RSS queues
2142  * used by default by multiqueue devices.
2143  */
2144 int netif_get_num_default_rss_queues(void)
2145 {
2146         return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2147 }
2148 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2149
2150 static inline void __netif_reschedule(struct Qdisc *q)
2151 {
2152         struct softnet_data *sd;
2153         unsigned long flags;
2154
2155         local_irq_save(flags);
2156         sd = this_cpu_ptr(&softnet_data);
2157         q->next_sched = NULL;
2158         *sd->output_queue_tailp = q;
2159         sd->output_queue_tailp = &q->next_sched;
2160         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2161         local_irq_restore(flags);
2162 }
2163
2164 void __netif_schedule(struct Qdisc *q)
2165 {
2166         if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2167                 __netif_reschedule(q);
2168 }
2169 EXPORT_SYMBOL(__netif_schedule);
2170
2171 struct dev_kfree_skb_cb {
2172         enum skb_free_reason reason;
2173 };
2174
2175 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2176 {
2177         return (struct dev_kfree_skb_cb *)skb->cb;
2178 }
2179
2180 void netif_schedule_queue(struct netdev_queue *txq)
2181 {
2182         rcu_read_lock();
2183         if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2184                 struct Qdisc *q = rcu_dereference(txq->qdisc);
2185
2186                 __netif_schedule(q);
2187         }
2188         rcu_read_unlock();
2189 }
2190 EXPORT_SYMBOL(netif_schedule_queue);
2191
2192 /**
2193  *      netif_wake_subqueue - allow sending packets on subqueue
2194  *      @dev: network device
2195  *      @queue_index: sub queue index
2196  *
2197  * Resume individual transmit queue of a device with multiple transmit queues.
2198  */
2199 void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2200 {
2201         struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2202
2203         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2204                 struct Qdisc *q;
2205
2206                 rcu_read_lock();
2207                 q = rcu_dereference(txq->qdisc);
2208                 __netif_schedule(q);
2209                 rcu_read_unlock();
2210         }
2211 }
2212 EXPORT_SYMBOL(netif_wake_subqueue);
2213
2214 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2215 {
2216         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2217                 struct Qdisc *q;
2218
2219                 rcu_read_lock();
2220                 q = rcu_dereference(dev_queue->qdisc);
2221                 __netif_schedule(q);
2222                 rcu_read_unlock();
2223         }
2224 }
2225 EXPORT_SYMBOL(netif_tx_wake_queue);
2226
2227 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2228 {
2229         unsigned long flags;
2230
2231         if (likely(atomic_read(&skb->users) == 1)) {
2232                 smp_rmb();
2233                 atomic_set(&skb->users, 0);
2234         } else if (likely(!atomic_dec_and_test(&skb->users))) {
2235                 return;
2236         }
2237         get_kfree_skb_cb(skb)->reason = reason;
2238         local_irq_save(flags);
2239         skb->next = __this_cpu_read(softnet_data.completion_queue);
2240         __this_cpu_write(softnet_data.completion_queue, skb);
2241         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2242         local_irq_restore(flags);
2243 }
2244 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2245
2246 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2247 {
2248         if (in_irq() || irqs_disabled())
2249                 __dev_kfree_skb_irq(skb, reason);
2250         else
2251                 dev_kfree_skb(skb);
2252 }
2253 EXPORT_SYMBOL(__dev_kfree_skb_any);
2254
2255
2256 /**
2257  * netif_device_detach - mark device as removed
2258  * @dev: network device
2259  *
2260  * Mark device as removed from system and therefore no longer available.
2261  */
2262 void netif_device_detach(struct net_device *dev)
2263 {
2264         if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2265             netif_running(dev)) {
2266                 netif_tx_stop_all_queues(dev);
2267         }
2268 }
2269 EXPORT_SYMBOL(netif_device_detach);
2270
2271 /**
2272  * netif_device_attach - mark device as attached
2273  * @dev: network device
2274  *
2275  * Mark device as attached from system and restart if needed.
2276  */
2277 void netif_device_attach(struct net_device *dev)
2278 {
2279         if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2280             netif_running(dev)) {
2281                 netif_tx_wake_all_queues(dev);
2282                 __netdev_watchdog_up(dev);
2283         }
2284 }
2285 EXPORT_SYMBOL(netif_device_attach);
2286
2287 static void skb_warn_bad_offload(const struct sk_buff *skb)
2288 {
2289         static const netdev_features_t null_features = 0;
2290         struct net_device *dev = skb->dev;
2291         const char *driver = "";
2292
2293         if (!net_ratelimit())
2294                 return;
2295
2296         if (dev && dev->dev.parent)
2297                 driver = dev_driver_string(dev->dev.parent);
2298
2299         WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2300              "gso_type=%d ip_summed=%d\n",
2301              driver, dev ? &dev->features : &null_features,
2302              skb->sk ? &skb->sk->sk_route_caps : &null_features,
2303              skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2304              skb_shinfo(skb)->gso_type, skb->ip_summed);
2305 }
2306
2307 /*
2308  * Invalidate hardware checksum when packet is to be mangled, and
2309  * complete checksum manually on outgoing path.
2310  */
2311 int skb_checksum_help(struct sk_buff *skb)
2312 {
2313         __wsum csum;
2314         int ret = 0, offset;
2315
2316         if (skb->ip_summed == CHECKSUM_COMPLETE)
2317                 goto out_set_summed;
2318
2319         if (unlikely(skb_shinfo(skb)->gso_size)) {
2320                 skb_warn_bad_offload(skb);
2321                 return -EINVAL;
2322         }
2323
2324         /* Before computing a checksum, we should make sure no frag could
2325          * be modified by an external entity : checksum could be wrong.
2326          */
2327         if (skb_has_shared_frag(skb)) {
2328                 ret = __skb_linearize(skb);
2329                 if (ret)
2330                         goto out;
2331         }
2332
2333         offset = skb_checksum_start_offset(skb);
2334         BUG_ON(offset >= skb_headlen(skb));
2335         csum = skb_checksum(skb, offset, skb->len - offset, 0);
2336
2337         offset += skb->csum_offset;
2338         BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2339
2340         if (skb_cloned(skb) &&
2341             !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2342                 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2343                 if (ret)
2344                         goto out;
2345         }
2346
2347         *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2348 out_set_summed:
2349         skb->ip_summed = CHECKSUM_NONE;
2350 out:
2351         return ret;
2352 }
2353 EXPORT_SYMBOL(skb_checksum_help);
2354
2355 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2356 {
2357         unsigned int vlan_depth = skb->mac_len;
2358         __be16 type = skb->protocol;
2359
2360         /* Tunnel gso handlers can set protocol to ethernet. */
2361         if (type == htons(ETH_P_TEB)) {
2362                 struct ethhdr *eth;
2363
2364                 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2365                         return 0;
2366
2367                 eth = (struct ethhdr *)skb_mac_header(skb);
2368                 type = eth->h_proto;
2369         }
2370
2371         /* if skb->protocol is 802.1Q/AD then the header should already be
2372          * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2373          * ETH_HLEN otherwise
2374          */
2375         if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2376                 if (vlan_depth) {
2377                         if (WARN_ON(vlan_depth < VLAN_HLEN))
2378                                 return 0;
2379                         vlan_depth -= VLAN_HLEN;
2380                 } else {
2381                         vlan_depth = ETH_HLEN;
2382                 }
2383                 do {
2384                         struct vlan_hdr *vh;
2385
2386                         if (unlikely(!pskb_may_pull(skb,
2387                                                     vlan_depth + VLAN_HLEN)))
2388                                 return 0;
2389
2390                         vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2391                         type = vh->h_vlan_encapsulated_proto;
2392                         vlan_depth += VLAN_HLEN;
2393                 } while (type == htons(ETH_P_8021Q) ||
2394                          type == htons(ETH_P_8021AD));
2395         }
2396
2397         *depth = vlan_depth;
2398
2399         return type;
2400 }
2401
2402 /**
2403  *      skb_mac_gso_segment - mac layer segmentation handler.
2404  *      @skb: buffer to segment
2405  *      @features: features for the output path (see dev->features)
2406  */
2407 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2408                                     netdev_features_t features)
2409 {
2410         struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2411         struct packet_offload *ptype;
2412         int vlan_depth = skb->mac_len;
2413         __be16 type = skb_network_protocol(skb, &vlan_depth);
2414
2415         if (unlikely(!type))
2416                 return ERR_PTR(-EINVAL);
2417
2418         __skb_pull(skb, vlan_depth);
2419
2420         rcu_read_lock();
2421         list_for_each_entry_rcu(ptype, &offload_base, list) {
2422                 if (ptype->type == type && ptype->callbacks.gso_segment) {
2423                         segs = ptype->callbacks.gso_segment(skb, features);
2424                         break;
2425                 }
2426         }
2427         rcu_read_unlock();
2428
2429         __skb_push(skb, skb->data - skb_mac_header(skb));
2430
2431         return segs;
2432 }
2433 EXPORT_SYMBOL(skb_mac_gso_segment);
2434
2435
2436 /* openvswitch calls this on rx path, so we need a different check.
2437  */
2438 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2439 {
2440         if (tx_path)
2441                 return skb->ip_summed != CHECKSUM_PARTIAL;
2442         else
2443                 return skb->ip_summed == CHECKSUM_NONE;
2444 }
2445
2446 /**
2447  *      __skb_gso_segment - Perform segmentation on skb.
2448  *      @skb: buffer to segment
2449  *      @features: features for the output path (see dev->features)
2450  *      @tx_path: whether it is called in TX path
2451  *
2452  *      This function segments the given skb and returns a list of segments.
2453  *
2454  *      It may return NULL if the skb requires no segmentation.  This is
2455  *      only possible when GSO is used for verifying header integrity.
2456  */
2457 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2458                                   netdev_features_t features, bool tx_path)
2459 {
2460         if (unlikely(skb_needs_check(skb, tx_path))) {
2461                 int err;
2462
2463                 skb_warn_bad_offload(skb);
2464
2465                 err = skb_cow_head(skb, 0);
2466                 if (err < 0)
2467                         return ERR_PTR(err);
2468         }
2469
2470         SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2471         SKB_GSO_CB(skb)->encap_level = 0;
2472
2473         skb_reset_mac_header(skb);
2474         skb_reset_mac_len(skb);
2475
2476         return skb_mac_gso_segment(skb, features);
2477 }
2478 EXPORT_SYMBOL(__skb_gso_segment);
2479
2480 /* Take action when hardware reception checksum errors are detected. */
2481 #ifdef CONFIG_BUG
2482 void netdev_rx_csum_fault(struct net_device *dev)
2483 {
2484         if (net_ratelimit()) {
2485                 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2486                 dump_stack();
2487         }
2488 }
2489 EXPORT_SYMBOL(netdev_rx_csum_fault);
2490 #endif
2491
2492 /* Actually, we should eliminate this check as soon as we know, that:
2493  * 1. IOMMU is present and allows to map all the memory.
2494  * 2. No high memory really exists on this machine.
2495  */
2496
2497 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2498 {
2499 #ifdef CONFIG_HIGHMEM
2500         int i;
2501         if (!(dev->features & NETIF_F_HIGHDMA)) {
2502                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2503                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2504                         if (PageHighMem(skb_frag_page(frag)))
2505                                 return 1;
2506                 }
2507         }
2508
2509         if (PCI_DMA_BUS_IS_PHYS) {
2510                 struct device *pdev = dev->dev.parent;
2511
2512                 if (!pdev)
2513                         return 0;
2514                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2515                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2516                         dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2517                         if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2518                                 return 1;
2519                 }
2520         }
2521 #endif
2522         return 0;
2523 }
2524
2525 /* If MPLS offload request, verify we are testing hardware MPLS features
2526  * instead of standard features for the netdev.
2527  */
2528 #ifdef CONFIG_NET_MPLS_GSO
2529 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2530                                            netdev_features_t features,
2531                                            __be16 type)
2532 {
2533         if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
2534                 features &= skb->dev->mpls_features;
2535
2536         return features;
2537 }
2538 #else
2539 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2540                                            netdev_features_t features,
2541                                            __be16 type)
2542 {
2543         return features;
2544 }
2545 #endif
2546
2547 static netdev_features_t harmonize_features(struct sk_buff *skb,
2548         netdev_features_t features)
2549 {
2550         int tmp;
2551         __be16 type;
2552
2553         type = skb_network_protocol(skb, &tmp);
2554         features = net_mpls_features(skb, features, type);
2555
2556         if (skb->ip_summed != CHECKSUM_NONE &&
2557             !can_checksum_protocol(features, type)) {
2558                 features &= ~NETIF_F_ALL_CSUM;
2559         } else if (illegal_highdma(skb->dev, skb)) {
2560                 features &= ~NETIF_F_SG;
2561         }
2562
2563         return features;
2564 }
2565
2566 netdev_features_t netif_skb_features(struct sk_buff *skb)
2567 {
2568         const struct net_device *dev = skb->dev;
2569         netdev_features_t features = dev->features;
2570         u16 gso_segs = skb_shinfo(skb)->gso_segs;
2571         __be16 protocol = skb->protocol;
2572
2573         if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
2574                 features &= ~NETIF_F_GSO_MASK;
2575
2576         if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2577                 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2578                 protocol = veh->h_vlan_encapsulated_proto;
2579         } else if (!vlan_tx_tag_present(skb)) {
2580                 return harmonize_features(skb, features);
2581         }
2582
2583         features = netdev_intersect_features(features,
2584                                              dev->vlan_features |
2585                                              NETIF_F_HW_VLAN_CTAG_TX |
2586                                              NETIF_F_HW_VLAN_STAG_TX);
2587
2588         if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
2589                 features = netdev_intersect_features(features,
2590                                                      NETIF_F_SG |
2591                                                      NETIF_F_HIGHDMA |
2592                                                      NETIF_F_FRAGLIST |
2593                                                      NETIF_F_GEN_CSUM |
2594                                                      NETIF_F_HW_VLAN_CTAG_TX |
2595                                                      NETIF_F_HW_VLAN_STAG_TX);
2596
2597         return harmonize_features(skb, features);
2598 }
2599 EXPORT_SYMBOL(netif_skb_features);
2600
2601 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
2602                     struct netdev_queue *txq, bool more)
2603 {
2604         unsigned int len;
2605         int rc;
2606
2607         if (!list_empty(&ptype_all))
2608                 dev_queue_xmit_nit(skb, dev);
2609
2610         len = skb->len;
2611         trace_net_dev_start_xmit(skb, dev);
2612         rc = netdev_start_xmit(skb, dev, txq, more);
2613         trace_net_dev_xmit(skb, rc, dev, len);
2614
2615         return rc;
2616 }
2617
2618 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2619                                     struct netdev_queue *txq, int *ret)
2620 {
2621         struct sk_buff *skb = first;
2622         int rc = NETDEV_TX_OK;
2623
2624         while (skb) {
2625                 struct sk_buff *next = skb->next;
2626
2627                 skb->next = NULL;
2628                 rc = xmit_one(skb, dev, txq, next != NULL);
2629                 if (unlikely(!dev_xmit_complete(rc))) {
2630                         skb->next = next;
2631                         goto out;
2632                 }
2633
2634                 skb = next;
2635                 if (netif_xmit_stopped(txq) && skb) {
2636                         rc = NETDEV_TX_BUSY;
2637                         break;
2638                 }
2639         }
2640
2641 out:
2642         *ret = rc;
2643         return skb;
2644 }
2645
2646 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2647                                           netdev_features_t features)
2648 {
2649         if (vlan_tx_tag_present(skb) &&
2650             !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2651                 skb = __vlan_put_tag(skb, skb->vlan_proto,
2652                                      vlan_tx_tag_get(skb));
2653                 if (skb)
2654                         skb->vlan_tci = 0;
2655         }
2656         return skb;
2657 }
2658
2659 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2660 {
2661         netdev_features_t features;
2662
2663         if (skb->next)
2664                 return skb;
2665
2666         features = netif_skb_features(skb);
2667         skb = validate_xmit_vlan(skb, features);
2668         if (unlikely(!skb))
2669                 goto out_null;
2670
2671         /* If encapsulation offload request, verify we are testing
2672          * hardware encapsulation features instead of standard
2673          * features for the netdev
2674          */
2675         if (skb->encapsulation)
2676                 features &= dev->hw_enc_features;
2677
2678         if (netif_needs_gso(dev, skb, features)) {
2679                 struct sk_buff *segs;
2680
2681                 segs = skb_gso_segment(skb, features);
2682                 if (IS_ERR(segs)) {
2683                         segs = NULL;
2684                 } else if (segs) {
2685                         consume_skb(skb);
2686                         skb = segs;
2687                 }
2688         } else {
2689                 if (skb_needs_linearize(skb, features) &&
2690                     __skb_linearize(skb))
2691                         goto out_kfree_skb;
2692
2693                 /* If packet is not checksummed and device does not
2694                  * support checksumming for this protocol, complete
2695                  * checksumming here.
2696                  */
2697                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2698                         if (skb->encapsulation)
2699                                 skb_set_inner_transport_header(skb,
2700                                                                skb_checksum_start_offset(skb));
2701                         else
2702                                 skb_set_transport_header(skb,
2703                                                          skb_checksum_start_offset(skb));
2704                         if (!(features & NETIF_F_ALL_CSUM) &&
2705                             skb_checksum_help(skb))
2706                                 goto out_kfree_skb;
2707                 }
2708         }
2709
2710         return skb;
2711
2712 out_kfree_skb:
2713         kfree_skb(skb);
2714 out_null:
2715         return NULL;
2716 }
2717
2718 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2719 {
2720         struct sk_buff *next, *head = NULL, *tail;
2721
2722         for (; skb != NULL; skb = next) {
2723                 next = skb->next;
2724                 skb->next = NULL;
2725
2726                 /* in case skb wont be segmented, point to itself */
2727                 skb->prev = skb;
2728
2729                 skb = validate_xmit_skb(skb, dev);
2730                 if (!skb)
2731                         continue;
2732
2733                 if (!head)
2734                         head = skb;
2735                 else
2736                         tail->next = skb;
2737                 /* If skb was segmented, skb->prev points to
2738                  * the last segment. If not, it still contains skb.
2739                  */
2740                 tail = skb->prev;
2741         }
2742         return head;
2743 }
2744
2745 static void qdisc_pkt_len_init(struct sk_buff *skb)
2746 {
2747         const struct skb_shared_info *shinfo = skb_shinfo(skb);
2748
2749         qdisc_skb_cb(skb)->pkt_len = skb->len;
2750
2751         /* To get more precise estimation of bytes sent on wire,
2752          * we add to pkt_len the headers size of all segments
2753          */
2754         if (shinfo->gso_size)  {
2755                 unsigned int hdr_len;
2756                 u16 gso_segs = shinfo->gso_segs;
2757
2758                 /* mac layer + network layer */
2759                 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2760
2761                 /* + transport layer */
2762                 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2763                         hdr_len += tcp_hdrlen(skb);
2764                 else
2765                         hdr_len += sizeof(struct udphdr);
2766
2767                 if (shinfo->gso_type & SKB_GSO_DODGY)
2768                         gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2769                                                 shinfo->gso_size);
2770
2771                 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
2772         }
2773 }
2774
2775 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2776                                  struct net_device *dev,
2777                                  struct netdev_queue *txq)
2778 {
2779         spinlock_t *root_lock = qdisc_lock(q);
2780         bool contended;
2781         int rc;
2782
2783         qdisc_pkt_len_init(skb);
2784         qdisc_calculate_pkt_len(skb, q);
2785         /*
2786          * Heuristic to force contended enqueues to serialize on a
2787          * separate lock before trying to get qdisc main lock.
2788          * This permits __QDISC___STATE_RUNNING owner to get the lock more
2789          * often and dequeue packets faster.
2790          */
2791         contended = qdisc_is_running(q);
2792         if (unlikely(contended))
2793                 spin_lock(&q->busylock);
2794
2795         spin_lock(root_lock);
2796         if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2797                 kfree_skb(skb);
2798                 rc = NET_XMIT_DROP;
2799         } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2800                    qdisc_run_begin(q)) {
2801                 /*
2802                  * This is a work-conserving queue; there are no old skbs
2803                  * waiting to be sent out; and the qdisc is not running -
2804                  * xmit the skb directly.
2805                  */
2806
2807                 qdisc_bstats_update(q, skb);
2808
2809                 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
2810                         if (unlikely(contended)) {
2811                                 spin_unlock(&q->busylock);
2812                                 contended = false;
2813                         }
2814                         __qdisc_run(q);
2815                 } else
2816                         qdisc_run_end(q);
2817
2818                 rc = NET_XMIT_SUCCESS;
2819         } else {
2820                 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2821                 if (qdisc_run_begin(q)) {
2822                         if (unlikely(contended)) {
2823                                 spin_unlock(&q->busylock);
2824                                 contended = false;
2825                         }
2826                         __qdisc_run(q);
2827                 }
2828         }
2829         spin_unlock(root_lock);
2830         if (unlikely(contended))
2831                 spin_unlock(&q->busylock);
2832         return rc;
2833 }
2834
2835 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2836 static void skb_update_prio(struct sk_buff *skb)
2837 {
2838         struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2839
2840         if (!skb->priority && skb->sk && map) {
2841                 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2842
2843                 if (prioidx < map->priomap_len)
2844                         skb->priority = map->priomap[prioidx];
2845         }
2846 }
2847 #else
2848 #define skb_update_prio(skb)
2849 #endif
2850
2851 static DEFINE_PER_CPU(int, xmit_recursion);
2852 #define RECURSION_LIMIT 10
2853
2854 /**
2855  *      dev_loopback_xmit - loop back @skb
2856  *      @skb: buffer to transmit
2857  */
2858 int dev_loopback_xmit(struct sk_buff *skb)
2859 {
2860         skb_reset_mac_header(skb);
2861         __skb_pull(skb, skb_network_offset(skb));
2862         skb->pkt_type = PACKET_LOOPBACK;
2863         skb->ip_summed = CHECKSUM_UNNECESSARY;
2864         WARN_ON(!skb_dst(skb));
2865         skb_dst_force(skb);
2866         netif_rx_ni(skb);
2867         return 0;
2868 }
2869 EXPORT_SYMBOL(dev_loopback_xmit);
2870
2871 /**
2872  *      __dev_queue_xmit - transmit a buffer
2873  *      @skb: buffer to transmit
2874  *      @accel_priv: private data used for L2 forwarding offload
2875  *
2876  *      Queue a buffer for transmission to a network device. The caller must
2877  *      have set the device and priority and built the buffer before calling
2878  *      this function. The function can be called from an interrupt.
2879  *
2880  *      A negative errno code is returned on a failure. A success does not
2881  *      guarantee the frame will be transmitted as it may be dropped due
2882  *      to congestion or traffic shaping.
2883  *
2884  * -----------------------------------------------------------------------------------
2885  *      I notice this method can also return errors from the queue disciplines,
2886  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
2887  *      be positive.
2888  *
2889  *      Regardless of the return value, the skb is consumed, so it is currently
2890  *      difficult to retry a send to this method.  (You can bump the ref count
2891  *      before sending to hold a reference for retry if you are careful.)
2892  *
2893  *      When calling this method, interrupts MUST be enabled.  This is because
2894  *      the BH enable code must have IRQs enabled so that it will not deadlock.
2895  *          --BLG
2896  */
2897 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2898 {
2899         struct net_device *dev = skb->dev;
2900         struct netdev_queue *txq;
2901         struct Qdisc *q;
2902         int rc = -ENOMEM;
2903
2904         skb_reset_mac_header(skb);
2905
2906         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
2907                 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
2908
2909         /* Disable soft irqs for various locks below. Also
2910          * stops preemption for RCU.
2911          */
2912         rcu_read_lock_bh();
2913
2914         skb_update_prio(skb);
2915
2916         /* If device/qdisc don't need skb->dst, release it right now while
2917          * its hot in this cpu cache.
2918          */
2919         if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2920                 skb_dst_drop(skb);
2921         else
2922                 skb_dst_force(skb);
2923
2924         txq = netdev_pick_tx(dev, skb, accel_priv);
2925         q = rcu_dereference_bh(txq->qdisc);
2926
2927 #ifdef CONFIG_NET_CLS_ACT
2928         skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2929 #endif
2930         trace_net_dev_queue(skb);
2931         if (q->enqueue) {
2932                 rc = __dev_xmit_skb(skb, q, dev, txq);
2933                 goto out;
2934         }
2935
2936         /* The device has no queue. Common case for software devices:
2937            loopback, all the sorts of tunnels...
2938
2939            Really, it is unlikely that netif_tx_lock protection is necessary
2940            here.  (f.e. loopback and IP tunnels are clean ignoring statistics
2941            counters.)
2942            However, it is possible, that they rely on protection
2943            made by us here.
2944
2945            Check this and shot the lock. It is not prone from deadlocks.
2946            Either shot noqueue qdisc, it is even simpler 8)
2947          */
2948         if (dev->flags & IFF_UP) {
2949                 int cpu = smp_processor_id(); /* ok because BHs are off */
2950
2951                 if (txq->xmit_lock_owner != cpu) {
2952
2953                         if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2954                                 goto recursion_alert;
2955
2956                         skb = validate_xmit_skb(skb, dev);
2957                         if (!skb)
2958                                 goto drop;
2959
2960                         HARD_TX_LOCK(dev, txq, cpu);
2961
2962                         if (!netif_xmit_stopped(txq)) {
2963                                 __this_cpu_inc(xmit_recursion);
2964                                 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
2965                                 __this_cpu_dec(xmit_recursion);
2966                                 if (dev_xmit_complete(rc)) {
2967                                         HARD_TX_UNLOCK(dev, txq);
2968                                         goto out;
2969                                 }
2970                         }
2971                         HARD_TX_UNLOCK(dev, txq);
2972                         net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2973                                              dev->name);
2974                 } else {
2975                         /* Recursion is detected! It is possible,
2976                          * unfortunately
2977                          */
2978 recursion_alert:
2979                         net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2980                                              dev->name);
2981                 }
2982         }
2983
2984         rc = -ENETDOWN;
2985 drop:
2986         rcu_read_unlock_bh();
2987
2988         atomic_long_inc(&dev->tx_dropped);
2989         kfree_skb_list(skb);
2990         return rc;
2991 out:
2992         rcu_read_unlock_bh();
2993         return rc;
2994 }
2995
2996 int dev_queue_xmit(struct sk_buff *skb)
2997 {
2998         return __dev_queue_xmit(skb, NULL);
2999 }
3000 EXPORT_SYMBOL(dev_queue_xmit);
3001
3002 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3003 {
3004         return __dev_queue_xmit(skb, accel_priv);
3005 }
3006 EXPORT_SYMBOL(dev_queue_xmit_accel);
3007
3008
3009 /*=======================================================================
3010                         Receiver routines
3011   =======================================================================*/
3012
3013 int netdev_max_backlog __read_mostly = 1000;
3014 EXPORT_SYMBOL(netdev_max_backlog);
3015
3016 int netdev_tstamp_prequeue __read_mostly = 1;
3017 int netdev_budget __read_mostly = 300;
3018 int weight_p __read_mostly = 64;            /* old backlog weight */
3019
3020 /* Called with irq disabled */
3021 static inline void ____napi_schedule(struct softnet_data *sd,
3022                                      struct napi_struct *napi)
3023 {
3024         list_add_tail(&napi->poll_list, &sd->poll_list);
3025         __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3026 }
3027
3028 #ifdef CONFIG_RPS
3029
3030 /* One global table that all flow-based protocols share. */
3031 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3032 EXPORT_SYMBOL(rps_sock_flow_table);
3033
3034 struct static_key rps_needed __read_mostly;
3035
3036 static struct rps_dev_flow *
3037 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3038             struct rps_dev_flow *rflow, u16 next_cpu)
3039 {
3040         if (next_cpu != RPS_NO_CPU) {
3041 #ifdef CONFIG_RFS_ACCEL
3042                 struct netdev_rx_queue *rxqueue;
3043                 struct rps_dev_flow_table *flow_table;
3044                 struct rps_dev_flow *old_rflow;
3045                 u32 flow_id;
3046                 u16 rxq_index;
3047                 int rc;
3048
3049                 /* Should we steer this flow to a different hardware queue? */
3050                 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3051                     !(dev->features & NETIF_F_NTUPLE))
3052                         goto out;
3053                 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3054                 if (rxq_index == skb_get_rx_queue(skb))
3055                         goto out;
3056
3057                 rxqueue = dev->_rx + rxq_index;
3058                 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3059                 if (!flow_table)
3060                         goto out;
3061                 flow_id = skb_get_hash(skb) & flow_table->mask;
3062                 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3063                                                         rxq_index, flow_id);
3064                 if (rc < 0)
3065                         goto out;
3066                 old_rflow = rflow;
3067                 rflow = &flow_table->flows[flow_id];
3068                 rflow->filter = rc;
3069                 if (old_rflow->filter == rflow->filter)
3070                         old_rflow->filter = RPS_NO_FILTER;
3071         out:
3072 #endif
3073                 rflow->last_qtail =
3074                         per_cpu(softnet_data, next_cpu).input_queue_head;
3075         }
3076
3077         rflow->cpu = next_cpu;
3078         return rflow;
3079 }
3080
3081 /*
3082  * get_rps_cpu is called from netif_receive_skb and returns the target
3083  * CPU from the RPS map of the receiving queue for a given skb.
3084  * rcu_read_lock must be held on entry.
3085  */
3086 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3087                        struct rps_dev_flow **rflowp)
3088 {
3089         struct netdev_rx_queue *rxqueue;
3090         struct rps_map *map;
3091         struct rps_dev_flow_table *flow_table;
3092         struct rps_sock_flow_table *sock_flow_table;
3093         int cpu = -1;
3094         u16 tcpu;
3095         u32 hash;
3096
3097         if (skb_rx_queue_recorded(skb)) {
3098                 u16 index = skb_get_rx_queue(skb);
3099                 if (unlikely(index >= dev->real_num_rx_queues)) {
3100                         WARN_ONCE(dev->real_num_rx_queues > 1,
3101                                   "%s received packet on queue %u, but number "
3102                                   "of RX queues is %u\n",
3103                                   dev->name, index, dev->real_num_rx_queues);
3104                         goto done;
3105                 }
3106                 rxqueue = dev->_rx + index;
3107         } else
3108                 rxqueue = dev->_rx;
3109
3110         map = rcu_dereference(rxqueue->rps_map);
3111         if (map) {
3112                 if (map->len == 1 &&
3113                     !rcu_access_pointer(rxqueue->rps_flow_table)) {
3114                         tcpu = map->cpus[0];
3115                         if (cpu_online(tcpu))
3116                                 cpu = tcpu;
3117                         goto done;
3118                 }
3119         } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
3120                 goto done;
3121         }
3122
3123         skb_reset_network_header(skb);
3124         hash = skb_get_hash(skb);
3125         if (!hash)
3126                 goto done;
3127
3128         flow_table = rcu_dereference(rxqueue->rps_flow_table);
3129         sock_flow_table = rcu_dereference(rps_sock_flow_table);
3130         if (flow_table && sock_flow_table) {
3131                 u16 next_cpu;
3132                 struct rps_dev_flow *rflow;
3133
3134                 rflow = &flow_table->flows[hash & flow_table->mask];
3135                 tcpu = rflow->cpu;
3136
3137                 next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask];
3138
3139                 /*
3140                  * If the desired CPU (where last recvmsg was done) is
3141                  * different from current CPU (one in the rx-queue flow
3142                  * table entry), switch if one of the following holds:
3143                  *   - Current CPU is unset (equal to RPS_NO_CPU).
3144                  *   - Current CPU is offline.
3145                  *   - The current CPU's queue tail has advanced beyond the
3146                  *     last packet that was enqueued using this table entry.
3147                  *     This guarantees that all previous packets for the flow
3148                  *     have been dequeued, thus preserving in order delivery.
3149                  */
3150                 if (unlikely(tcpu != next_cpu) &&
3151                     (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3152                      ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3153                       rflow->last_qtail)) >= 0)) {
3154                         tcpu = next_cpu;
3155                         rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3156                 }
3157
3158                 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3159                         *rflowp = rflow;
3160                         cpu = tcpu;
3161                         goto done;
3162                 }
3163         }
3164
3165         if (map) {
3166                 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
3167                 if (cpu_online(tcpu)) {
3168                         cpu = tcpu;
3169                         goto done;
3170                 }
3171         }
3172
3173 done:
3174         return cpu;
3175 }
3176
3177 #ifdef CONFIG_RFS_ACCEL
3178
3179 /**
3180  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3181  * @dev: Device on which the filter was set
3182  * @rxq_index: RX queue index
3183  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3184  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3185  *
3186  * Drivers that implement ndo_rx_flow_steer() should periodically call
3187  * this function for each installed filter and remove the filters for
3188  * which it returns %true.
3189  */
3190 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3191                          u32 flow_id, u16 filter_id)
3192 {
3193         struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3194         struct rps_dev_flow_table *flow_table;
3195         struct rps_dev_flow *rflow;
3196         bool expire = true;
3197         int cpu;
3198
3199         rcu_read_lock();
3200         flow_table = rcu_dereference(rxqueue->rps_flow_table);
3201         if (flow_table && flow_id <= flow_table->mask) {
3202                 rflow = &flow_table->flows[flow_id];
3203                 cpu = ACCESS_ONCE(rflow->cpu);
3204                 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3205                     ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3206                            rflow->last_qtail) <
3207                      (int)(10 * flow_table->mask)))
3208                         expire = false;
3209         }
3210         rcu_read_unlock();
3211         return expire;
3212 }
3213 EXPORT_SYMBOL(rps_may_expire_flow);
3214
3215 #endif /* CONFIG_RFS_ACCEL */
3216
3217 /* Called from hardirq (IPI) context */
3218 static void rps_trigger_softirq(void *data)
3219 {
3220         struct softnet_data *sd = data;
3221
3222         ____napi_schedule(sd, &sd->backlog);
3223         sd->received_rps++;
3224 }
3225
3226 #endif /* CONFIG_RPS */
3227
3228 /*
3229  * Check if this softnet_data structure is another cpu one
3230  * If yes, queue it to our IPI list and return 1
3231  * If no, return 0
3232  */
3233 static int rps_ipi_queued(struct softnet_data *sd)
3234 {
3235 #ifdef CONFIG_RPS
3236         struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3237
3238         if (sd != mysd) {
3239                 sd->rps_ipi_next = mysd->rps_ipi_list;
3240                 mysd->rps_ipi_list = sd;
3241
3242                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3243                 return 1;
3244         }
3245 #endif /* CONFIG_RPS */
3246         return 0;
3247 }
3248
3249 #ifdef CONFIG_NET_FLOW_LIMIT
3250 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3251 #endif
3252
3253 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3254 {
3255 #ifdef CONFIG_NET_FLOW_LIMIT
3256         struct sd_flow_limit *fl;
3257         struct softnet_data *sd;
3258         unsigned int old_flow, new_flow;
3259
3260         if (qlen < (netdev_max_backlog >> 1))
3261                 return false;
3262
3263         sd = this_cpu_ptr(&softnet_data);
3264
3265         rcu_read_lock();
3266         fl = rcu_dereference(sd->flow_limit);
3267         if (fl) {
3268                 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3269                 old_flow = fl->history[fl->history_head];
3270                 fl->history[fl->history_head] = new_flow;
3271
3272                 fl->history_head++;
3273                 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3274
3275                 if (likely(fl->buckets[old_flow]))
3276                         fl->buckets[old_flow]--;
3277
3278                 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3279                         fl->count++;
3280                         rcu_read_unlock();
3281                         return true;
3282                 }
3283         }
3284         rcu_read_unlock();
3285 #endif
3286         return false;
3287 }
3288
3289 /*
3290  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3291  * queue (may be a remote CPU queue).
3292  */
3293 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3294                               unsigned int *qtail)
3295 {
3296         struct softnet_data *sd;
3297         unsigned long flags;
3298         unsigned int qlen;
3299
3300         sd = &per_cpu(softnet_data, cpu);
3301
3302         local_irq_save(flags);
3303
3304         rps_lock(sd);
3305         qlen = skb_queue_len(&sd->input_pkt_queue);
3306         if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3307                 if (skb_queue_len(&sd->input_pkt_queue)) {
3308 enqueue:
3309                         __skb_queue_tail(&sd->input_pkt_queue, skb);
3310                         input_queue_tail_incr_save(sd, qtail);
3311                         rps_unlock(sd);
3312                         local_irq_restore(flags);
3313                         return NET_RX_SUCCESS;
3314                 }
3315
3316                 /* Schedule NAPI for backlog device
3317                  * We can use non atomic operation since we own the queue lock
3318                  */
3319                 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3320                         if (!rps_ipi_queued(sd))
3321                                 ____napi_schedule(sd, &sd->backlog);
3322                 }
3323                 goto enqueue;
3324         }
3325
3326         sd->dropped++;
3327         rps_unlock(sd);
3328
3329         local_irq_restore(flags);
3330
3331         atomic_long_inc(&skb->dev->rx_dropped);
3332         kfree_skb(skb);
3333         return NET_RX_DROP;
3334 }
3335
3336 static int netif_rx_internal(struct sk_buff *skb)
3337 {
3338         int ret;
3339
3340         net_timestamp_check(netdev_tstamp_prequeue, skb);
3341
3342         trace_netif_rx(skb);
3343 #ifdef CONFIG_RPS
3344         if (static_key_false(&rps_needed)) {
3345                 struct rps_dev_flow voidflow, *rflow = &voidflow;
3346                 int cpu;
3347
3348                 preempt_disable();
3349                 rcu_read_lock();
3350
3351                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3352                 if (cpu < 0)
3353                         cpu = smp_processor_id();
3354
3355                 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3356
3357                 rcu_read_unlock();
3358                 preempt_enable();
3359         } else
3360 #endif
3361         {
3362                 unsigned int qtail;
3363                 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3364                 put_cpu();
3365         }
3366         return ret;
3367 }
3368
3369 /**
3370  *      netif_rx        -       post buffer to the network code
3371  *      @skb: buffer to post
3372  *
3373  *      This function receives a packet from a device driver and queues it for
3374  *      the upper (protocol) levels to process.  It always succeeds. The buffer
3375  *      may be dropped during processing for congestion control or by the
3376  *      protocol layers.
3377  *
3378  *      return values:
3379  *      NET_RX_SUCCESS  (no congestion)
3380  *      NET_RX_DROP     (packet was dropped)
3381  *
3382  */
3383
3384 int netif_rx(struct sk_buff *skb)
3385 {
3386         trace_netif_rx_entry(skb);
3387
3388         return netif_rx_internal(skb);
3389 }
3390 EXPORT_SYMBOL(netif_rx);
3391
3392 int netif_rx_ni(struct sk_buff *skb)
3393 {
3394         int err;
3395
3396         trace_netif_rx_ni_entry(skb);
3397
3398         preempt_disable();
3399         err = netif_rx_internal(skb);
3400         if (local_softirq_pending())
3401                 do_softirq();
3402         preempt_enable();
3403
3404         return err;
3405 }
3406 EXPORT_SYMBOL(netif_rx_ni);
3407
3408 static void net_tx_action(struct softirq_action *h)
3409 {
3410         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3411
3412         if (sd->completion_queue) {
3413                 struct sk_buff *clist;
3414
3415                 local_irq_disable();
3416                 clist = sd->completion_queue;
3417                 sd->completion_queue = NULL;
3418                 local_irq_enable();
3419
3420                 while (clist) {
3421                         struct sk_buff *skb = clist;
3422                         clist = clist->next;
3423
3424                         WARN_ON(atomic_read(&skb->users));
3425                         if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3426                                 trace_consume_skb(skb);
3427                         else
3428                                 trace_kfree_skb(skb, net_tx_action);
3429                         __kfree_skb(skb);
3430                 }
3431         }
3432
3433         if (sd->output_queue) {
3434                 struct Qdisc *head;
3435
3436                 local_irq_disable();
3437                 head = sd->output_queue;
3438                 sd->output_queue = NULL;
3439                 sd->output_queue_tailp = &sd->output_queue;
3440                 local_irq_enable();
3441
3442                 while (head) {
3443                         struct Qdisc *q = head;
3444                         spinlock_t *root_lock;
3445
3446                         head = head->next_sched;
3447
3448                         root_lock = qdisc_lock(q);
3449                         if (spin_trylock(root_lock)) {
3450                                 smp_mb__before_atomic();
3451                                 clear_bit(__QDISC_STATE_SCHED,
3452                                           &q->state);
3453                                 qdisc_run(q);
3454                                 spin_unlock(root_lock);
3455                         } else {
3456                                 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3457                                               &q->state)) {
3458                                         __netif_reschedule(q);
3459                                 } else {
3460                                         smp_mb__before_atomic();
3461                                         clear_bit(__QDISC_STATE_SCHED,
3462                                                   &q->state);
3463                                 }
3464                         }
3465                 }
3466         }
3467 }
3468
3469 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3470     (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3471 /* This hook is defined here for ATM LANE */
3472 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3473                              unsigned char *addr) __read_mostly;
3474 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3475 #endif
3476
3477 #ifdef CONFIG_NET_CLS_ACT
3478 /* TODO: Maybe we should just force sch_ingress to be compiled in
3479  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3480  * a compare and 2 stores extra right now if we dont have it on
3481  * but have CONFIG_NET_CLS_ACT
3482  * NOTE: This doesn't stop any functionality; if you dont have
3483  * the ingress scheduler, you just can't add policies on ingress.
3484  *
3485  */
3486 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3487 {
3488         struct net_device *dev = skb->dev;
3489         u32 ttl = G_TC_RTTL(skb->tc_verd);
3490         int result = TC_ACT_OK;
3491         struct Qdisc *q;
3492
3493         if (unlikely(MAX_RED_LOOP < ttl++)) {
3494                 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3495                                      skb->skb_iif, dev->ifindex);
3496                 return TC_ACT_SHOT;
3497         }
3498
3499         skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3500         skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3501
3502         q = rcu_dereference(rxq->qdisc);
3503         if (q != &noop_qdisc) {
3504                 spin_lock(qdisc_lock(q));
3505                 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3506                         result = qdisc_enqueue_root(skb, q);
3507                 spin_unlock(qdisc_lock(q));
3508         }
3509
3510         return result;
3511 }
3512
3513 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3514                                          struct packet_type **pt_prev,
3515                                          int *ret, struct net_device *orig_dev)
3516 {
3517         struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3518
3519         if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
3520                 goto out;
3521
3522         if (*pt_prev) {
3523                 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3524                 *pt_prev = NULL;
3525         }
3526
3527         switch (ing_filter(skb, rxq)) {
3528         case TC_ACT_SHOT:
3529         case TC_ACT_STOLEN:
3530                 kfree_skb(skb);
3531                 return NULL;
3532         }
3533
3534 out:
3535         skb->tc_verd = 0;
3536         return skb;
3537 }
3538 #endif
3539
3540 /**
3541  *      netdev_rx_handler_register - register receive handler
3542  *      @dev: device to register a handler for
3543  *      @rx_handler: receive handler to register
3544  *      @rx_handler_data: data pointer that is used by rx handler
3545  *
3546  *      Register a receive handler for a device. This handler will then be
3547  *      called from __netif_receive_skb. A negative errno code is returned
3548  *      on a failure.
3549  *
3550  *      The caller must hold the rtnl_mutex.
3551  *
3552  *      For a general description of rx_handler, see enum rx_handler_result.
3553  */
3554 int netdev_rx_handler_register(struct net_device *dev,
3555                                rx_handler_func_t *rx_handler,
3556                                void *rx_handler_data)
3557 {
3558         ASSERT_RTNL();
3559
3560         if (dev->rx_handler)
3561                 return -EBUSY;
3562
3563         /* Note: rx_handler_data must be set before rx_handler */
3564         rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3565         rcu_assign_pointer(dev->rx_handler, rx_handler);
3566
3567         return 0;
3568 }
3569 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3570
3571 /**
3572  *      netdev_rx_handler_unregister - unregister receive handler
3573  *      @dev: device to unregister a handler from
3574  *
3575  *      Unregister a receive handler from a device.
3576  *
3577  *      The caller must hold the rtnl_mutex.
3578  */
3579 void netdev_rx_handler_unregister(struct net_device *dev)
3580 {
3581
3582         ASSERT_RTNL();
3583         RCU_INIT_POINTER(dev->rx_handler, NULL);
3584         /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3585          * section has a guarantee to see a non NULL rx_handler_data
3586          * as well.
3587          */
3588         synchronize_net();
3589         RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3590 }
3591 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3592
3593 /*
3594  * Limit the use of PFMEMALLOC reserves to those protocols that implement
3595  * the special handling of PFMEMALLOC skbs.
3596  */
3597 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3598 {
3599         switch (skb->protocol) {
3600         case htons(ETH_P_ARP):
3601         case htons(ETH_P_IP):
3602         case htons(ETH_P_IPV6):
3603         case htons(ETH_P_8021Q):
3604         case htons(ETH_P_8021AD):
3605                 return true;
3606         default:
3607                 return false;
3608         }
3609 }
3610
3611 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3612 {
3613         struct packet_type *ptype, *pt_prev;
3614         rx_handler_func_t *rx_handler;
3615         struct net_device *orig_dev;
3616         struct net_device *null_or_dev;
3617         bool deliver_exact = false;
3618         int ret = NET_RX_DROP;
3619         __be16 type;
3620
3621         net_timestamp_check(!netdev_tstamp_prequeue, skb);
3622
3623         trace_netif_receive_skb(skb);
3624
3625         orig_dev = skb->dev;
3626
3627         skb_reset_network_header(skb);
3628         if (!skb_transport_header_was_set(skb))
3629                 skb_reset_transport_header(skb);
3630         skb_reset_mac_len(skb);
3631
3632         pt_prev = NULL;
3633
3634         rcu_read_lock();
3635
3636 another_round:
3637         skb->skb_iif = skb->dev->ifindex;
3638
3639         __this_cpu_inc(softnet_data.processed);
3640
3641         if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3642             skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3643                 skb = skb_vlan_untag(skb);
3644                 if (unlikely(!skb))
3645                         goto unlock;
3646         }
3647
3648 #ifdef CONFIG_NET_CLS_ACT
3649         if (skb->tc_verd & TC_NCLS) {
3650                 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3651                 goto ncls;
3652         }
3653 #endif
3654
3655         if (pfmemalloc)
3656                 goto skip_taps;
3657
3658         list_for_each_entry_rcu(ptype, &ptype_all, list) {
3659                 if (!ptype->dev || ptype->dev == skb->dev) {
3660                         if (pt_prev)
3661                                 ret = deliver_skb(skb, pt_prev, orig_dev);
3662                         pt_prev = ptype;
3663                 }
3664         }
3665
3666 skip_taps:
3667 #ifdef CONFIG_NET_CLS_ACT
3668         skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3669         if (!skb)
3670                 goto unlock;
3671 ncls:
3672 #endif
3673
3674         if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3675                 goto drop;
3676
3677         if (vlan_tx_tag_present(skb)) {
3678                 if (pt_prev) {
3679                         ret = deliver_skb(skb, pt_prev, orig_dev);
3680                         pt_prev = NULL;
3681                 }
3682                 if (vlan_do_receive(&skb))
3683                         goto another_round;
3684                 else if (unlikely(!skb))
3685                         goto unlock;
3686         }
3687
3688         rx_handler = rcu_dereference(skb->dev->rx_handler);
3689         if (rx_handler) {
3690                 if (pt_prev) {
3691                         ret = deliver_skb(skb, pt_prev, orig_dev);
3692                         pt_prev = NULL;
3693                 }
3694                 switch (rx_handler(&skb)) {
3695                 case RX_HANDLER_CONSUMED:
3696                         ret = NET_RX_SUCCESS;
3697                         goto unlock;
3698                 case RX_HANDLER_ANOTHER:
3699                         goto another_round;
3700                 case RX_HANDLER_EXACT:
3701                         deliver_exact = true;
3702                 case RX_HANDLER_PASS:
3703                         break;
3704                 default:
3705                         BUG();
3706                 }
3707         }
3708
3709         if (unlikely(vlan_tx_tag_present(skb))) {
3710                 if (vlan_tx_tag_get_id(skb))
3711                         skb->pkt_type = PACKET_OTHERHOST;
3712                 /* Note: we might in the future use prio bits
3713                  * and set skb->priority like in vlan_do_receive()
3714                  * For the time being, just ignore Priority Code Point
3715                  */
3716                 skb->vlan_tci = 0;
3717         }
3718
3719         /* deliver only exact match when indicated */
3720         null_or_dev = deliver_exact ? skb->dev : NULL;
3721
3722         type = skb->protocol;
3723         list_for_each_entry_rcu(ptype,
3724                         &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3725                 if (ptype->type == type &&
3726                     (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3727                      ptype->dev == orig_dev)) {
3728                         if (pt_prev)
3729                                 ret = deliver_skb(skb, pt_prev, orig_dev);
3730                         pt_prev = ptype;
3731                 }
3732         }
3733
3734         if (pt_prev) {
3735                 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3736                         goto drop;
3737                 else
3738                         ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3739         } else {
3740 drop:
3741                 atomic_long_inc(&skb->dev->rx_dropped);
3742                 kfree_skb(skb);
3743                 /* Jamal, now you will not able to escape explaining
3744                  * me how you were going to use this. :-)
3745                  */
3746                 ret = NET_RX_DROP;
3747         }
3748
3749 unlock:
3750         rcu_read_unlock();
3751         return ret;
3752 }
3753
3754 static int __netif_receive_skb(struct sk_buff *skb)
3755 {
3756         int ret;
3757
3758         if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3759                 unsigned long pflags = current->flags;
3760
3761                 /*
3762                  * PFMEMALLOC skbs are special, they should
3763                  * - be delivered to SOCK_MEMALLOC sockets only
3764                  * - stay away from userspace
3765                  * - have bounded memory usage
3766                  *
3767                  * Use PF_MEMALLOC as this saves us from propagating the allocation
3768                  * context down to all allocation sites.
3769                  */
3770                 current->flags |= PF_MEMALLOC;
3771                 ret = __netif_receive_skb_core(skb, true);
3772                 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3773         } else
3774                 ret = __netif_receive_skb_core(skb, false);
3775
3776         return ret;
3777 }
3778
3779 static int netif_receive_skb_internal(struct sk_buff *skb)
3780 {
3781         net_timestamp_check(netdev_tstamp_prequeue, skb);
3782
3783         if (skb_defer_rx_timestamp(skb))
3784                 return NET_RX_SUCCESS;
3785
3786 #ifdef CONFIG_RPS
3787         if (static_key_false(&rps_needed)) {
3788                 struct rps_dev_flow voidflow, *rflow = &voidflow;
3789                 int cpu, ret;
3790
3791                 rcu_read_lock();
3792
3793                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3794
3795                 if (cpu >= 0) {
3796                         ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3797                         rcu_read_unlock();
3798                         return ret;
3799                 }
3800                 rcu_read_unlock();
3801         }
3802 #endif
3803         return __netif_receive_skb(skb);
3804 }
3805
3806 /**
3807  *      netif_receive_skb - process receive buffer from network
3808  *      @skb: buffer to process
3809  *
3810  *      netif_receive_skb() is the main receive data processing function.
3811  *      It always succeeds. The buffer may be dropped during processing
3812  *      for congestion control or by the protocol layers.
3813  *
3814  *      This function may only be called from softirq context and interrupts
3815  *      should be enabled.
3816  *
3817  *      Return values (usually ignored):
3818  *      NET_RX_SUCCESS: no congestion
3819  *      NET_RX_DROP: packet was dropped
3820  */
3821 int netif_receive_skb(struct sk_buff *skb)
3822 {
3823         trace_netif_receive_skb_entry(skb);
3824
3825         return netif_receive_skb_internal(skb);
3826 }
3827 EXPORT_SYMBOL(netif_receive_skb);
3828
3829 /* Network device is going away, flush any packets still pending
3830  * Called with irqs disabled.
3831  */
3832 static void flush_backlog(void *arg)
3833 {
3834         struct net_device *dev = arg;
3835         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3836         struct sk_buff *skb, *tmp;
3837
3838         rps_lock(sd);
3839         skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3840                 if (skb->dev == dev) {
3841                         __skb_unlink(skb, &sd->input_pkt_queue);
3842                         kfree_skb(skb);
3843                         input_queue_head_incr(sd);
3844                 }
3845         }
3846         rps_unlock(sd);
3847
3848         skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3849                 if (skb->dev == dev) {
3850                         __skb_unlink(skb, &sd->process_queue);
3851                         kfree_skb(skb);
3852                         input_queue_head_incr(sd);
3853                 }
3854         }
3855 }
3856
3857 static int napi_gro_complete(struct sk_buff *skb)
3858 {
3859         struct packet_offload *ptype;
3860         __be16 type = skb->protocol;
3861         struct list_head *head = &offload_base;
3862         int err = -ENOENT;
3863
3864         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3865
3866         if (NAPI_GRO_CB(skb)->count == 1) {
3867                 skb_shinfo(skb)->gso_size = 0;
3868                 goto out;
3869         }
3870
3871         rcu_read_lock();
3872         list_for_each_entry_rcu(ptype, head, list) {
3873                 if (ptype->type != type || !ptype->callbacks.gro_complete)
3874                         continue;
3875
3876                 err = ptype->callbacks.gro_complete(skb, 0);
3877                 break;
3878         }
3879         rcu_read_unlock();
3880
3881         if (err) {
3882                 WARN_ON(&ptype->list == head);
3883                 kfree_skb(skb);
3884                 return NET_RX_SUCCESS;
3885         }
3886
3887 out:
3888         return netif_receive_skb_internal(skb);
3889 }
3890
3891 /* napi->gro_list contains packets ordered by age.
3892  * youngest packets at the head of it.
3893  * Complete skbs in reverse order to reduce latencies.
3894  */
3895 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
3896 {
3897         struct sk_buff *skb, *prev = NULL;
3898
3899         /* scan list and build reverse chain */
3900         for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3901                 skb->prev = prev;
3902                 prev = skb;
3903         }
3904
3905         for (skb = prev; skb; skb = prev) {
3906                 skb->next = NULL;
3907
3908                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3909                         return;
3910
3911                 prev = skb->prev;
3912                 napi_gro_complete(skb);
3913                 napi->gro_count--;
3914         }
3915
3916         napi->gro_list = NULL;
3917 }
3918 EXPORT_SYMBOL(napi_gro_flush);
3919
3920 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3921 {
3922         struct sk_buff *p;
3923         unsigned int maclen = skb->dev->hard_header_len;
3924         u32 hash = skb_get_hash_raw(skb);
3925
3926         for (p = napi->gro_list; p; p = p->next) {
3927                 unsigned long diffs;
3928
3929                 NAPI_GRO_CB(p)->flush = 0;
3930
3931                 if (hash != skb_get_hash_raw(p)) {
3932                         NAPI_GRO_CB(p)->same_flow = 0;
3933                         continue;
3934                 }
3935
3936                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3937                 diffs |= p->vlan_tci ^ skb->vlan_tci;
3938                 if (maclen == ETH_HLEN)
3939                         diffs |= compare_ether_header(skb_mac_header(p),
3940                                                       skb_mac_header(skb));
3941                 else if (!diffs)
3942                         diffs = memcmp(skb_mac_header(p),
3943                                        skb_mac_header(skb),
3944                                        maclen);
3945                 NAPI_GRO_CB(p)->same_flow = !diffs;
3946         }
3947 }
3948
3949 static void skb_gro_reset_offset(struct sk_buff *skb)
3950 {
3951         const struct skb_shared_info *pinfo = skb_shinfo(skb);
3952         const skb_frag_t *frag0 = &pinfo->frags[0];
3953
3954         NAPI_GRO_CB(skb)->data_offset = 0;
3955         NAPI_GRO_CB(skb)->frag0 = NULL;
3956         NAPI_GRO_CB(skb)->frag0_len = 0;
3957
3958         if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3959             pinfo->nr_frags &&
3960             !PageHighMem(skb_frag_page(frag0))) {
3961                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3962                 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3963         }
3964 }
3965
3966 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
3967 {
3968         struct skb_shared_info *pinfo = skb_shinfo(skb);
3969
3970         BUG_ON(skb->end - skb->tail < grow);
3971
3972         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3973
3974         skb->data_len -= grow;
3975         skb->tail += grow;
3976
3977         pinfo->frags[0].page_offset += grow;
3978         skb_frag_size_sub(&pinfo->frags[0], grow);
3979
3980         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
3981                 skb_frag_unref(skb, 0);
3982                 memmove(pinfo->frags, pinfo->frags + 1,
3983                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
3984         }
3985 }
3986
3987 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3988 {
3989         struct sk_buff **pp = NULL;
3990         struct packet_offload *ptype;
3991         __be16 type = skb->protocol;
3992         struct list_head *head = &offload_base;
3993         int same_flow;
3994         enum gro_result ret;
3995         int grow;
3996
3997         if (!(skb->dev->features & NETIF_F_GRO))
3998                 goto normal;
3999
4000         if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
4001                 goto normal;
4002
4003         gro_list_prepare(napi, skb);
4004
4005         rcu_read_lock();
4006         list_for_each_entry_rcu(ptype, head, list) {
4007                 if (ptype->type != type || !ptype->callbacks.gro_receive)
4008                         continue;
4009
4010                 skb_set_network_header(skb, skb_gro_offset(skb));
4011                 skb_reset_mac_len(skb);
4012                 NAPI_GRO_CB(skb)->same_flow = 0;
4013                 NAPI_GRO_CB(skb)->flush = 0;
4014                 NAPI_GRO_CB(skb)->free = 0;
4015                 NAPI_GRO_CB(skb)->udp_mark = 0;
4016
4017                 /* Setup for GRO checksum validation */
4018                 switch (skb->ip_summed) {
4019                 case CHECKSUM_COMPLETE:
4020                         NAPI_GRO_CB(skb)->csum = skb->csum;
4021                         NAPI_GRO_CB(skb)->csum_valid = 1;
4022                         NAPI_GRO_CB(skb)->csum_cnt = 0;
4023                         break;
4024                 case CHECKSUM_UNNECESSARY:
4025                         NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4026                         NAPI_GRO_CB(skb)->csum_valid = 0;
4027                         break;
4028                 default:
4029                         NAPI_GRO_CB(skb)->csum_cnt = 0;
4030                         NAPI_GRO_CB(skb)->csum_valid = 0;
4031                 }
4032
4033                 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
4034                 break;
4035         }
4036         rcu_read_unlock();
4037
4038         if (&ptype->list == head)
4039                 goto normal;
4040
4041         same_flow = NAPI_GRO_CB(skb)->same_flow;
4042         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
4043
4044         if (pp) {
4045                 struct sk_buff *nskb = *pp;
4046
4047                 *pp = nskb->next;
4048                 nskb->next = NULL;
4049                 napi_gro_complete(nskb);
4050                 napi->gro_count--;
4051         }
4052
4053         if (same_flow)
4054                 goto ok;
4055
4056         if (NAPI_GRO_CB(skb)->flush)
4057                 goto normal;
4058
4059         if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4060                 struct sk_buff *nskb = napi->gro_list;
4061
4062                 /* locate the end of the list to select the 'oldest' flow */
4063                 while (nskb->next) {
4064                         pp = &nskb->next;
4065                         nskb = *pp;
4066                 }
4067                 *pp = NULL;
4068                 nskb->next = NULL;
4069                 napi_gro_complete(nskb);
4070         } else {
4071                 napi->gro_count++;
4072         }
4073         NAPI_GRO_CB(skb)->count = 1;
4074         NAPI_GRO_CB(skb)->age = jiffies;
4075         NAPI_GRO_CB(skb)->last = skb;
4076         skb_shinfo(skb)->gso_size = skb_gro_len(skb);
4077         skb->next = napi->gro_list;
4078         napi->gro_list = skb;
4079         ret = GRO_HELD;
4080
4081 pull:
4082         grow = skb_gro_offset(skb) - skb_headlen(skb);
4083         if (grow > 0)
4084                 gro_pull_from_frag0(skb, grow);
4085 ok:
4086         return ret;
4087
4088 normal:
4089         ret = GRO_NORMAL;
4090         goto pull;
4091 }
4092
4093 struct packet_offload *gro_find_receive_by_type(__be16 type)
4094 {
4095         struct list_head *offload_head = &offload_base;
4096         struct packet_offload *ptype;
4097
4098         list_for_each_entry_rcu(ptype, offload_head, list) {
4099                 if (ptype->type != type || !ptype->callbacks.gro_receive)
4100                         continue;
4101                 return ptype;
4102         }
4103         return NULL;
4104 }
4105 EXPORT_SYMBOL(gro_find_receive_by_type);
4106
4107 struct packet_offload *gro_find_complete_by_type(__be16 type)
4108 {
4109         struct list_head *offload_head = &offload_base;
4110         struct packet_offload *ptype;
4111
4112         list_for_each_entry_rcu(ptype, offload_head, list) {
4113                 if (ptype->type != type || !ptype->callbacks.gro_complete)
4114                         continue;
4115                 return ptype;
4116         }
4117         return NULL;
4118 }
4119 EXPORT_SYMBOL(gro_find_complete_by_type);
4120
4121 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4122 {
4123         switch (ret) {
4124         case GRO_NORMAL:
4125                 if (netif_receive_skb_internal(skb))
4126                         ret = GRO_DROP;
4127                 break;
4128
4129         case GRO_DROP:
4130                 kfree_skb(skb);
4131                 break;
4132
4133         case GRO_MERGED_FREE:
4134                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4135                         kmem_cache_free(skbuff_head_cache, skb);
4136                 else
4137                         __kfree_skb(skb);
4138                 break;
4139
4140         case GRO_HELD:
4141         case GRO_MERGED:
4142                 break;
4143         }
4144
4145         return ret;
4146 }
4147
4148 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4149 {
4150         trace_napi_gro_receive_entry(skb);
4151
4152         skb_gro_reset_offset(skb);
4153
4154         return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4155 }
4156 EXPORT_SYMBOL(napi_gro_receive);
4157
4158 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4159 {
4160         if (unlikely(skb->pfmemalloc)) {
4161                 consume_skb(skb);
4162                 return;
4163         }
4164         __skb_pull(skb, skb_headlen(skb));
4165         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4166         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4167         skb->vlan_tci = 0;
4168         skb->dev = napi->dev;
4169         skb->skb_iif = 0;
4170         skb->encapsulation = 0;
4171         skb_shinfo(skb)->gso_type = 0;
4172         skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4173
4174         napi->skb = skb;
4175 }
4176
4177 struct sk_buff *napi_get_frags(struct napi_struct *napi)
4178 {
4179         struct sk_buff *skb = napi->skb;
4180
4181         if (!skb) {
4182                 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
4183                 napi->skb = skb;
4184         }
4185         return skb;
4186 }
4187 EXPORT_SYMBOL(napi_get_frags);
4188
4189 static gro_result_t napi_frags_finish(struct napi_struct *napi,
4190                                       struct sk_buff *skb,
4191                                       gro_result_t ret)
4192 {
4193         switch (ret) {
4194         case GRO_NORMAL:
4195         case GRO_HELD:
4196                 __skb_push(skb, ETH_HLEN);
4197                 skb->protocol = eth_type_trans(skb, skb->dev);
4198                 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
4199                         ret = GRO_DROP;
4200                 break;
4201
4202         case GRO_DROP:
4203         case GRO_MERGED_FREE:
4204                 napi_reuse_skb(napi, skb);
4205                 break;
4206
4207         case GRO_MERGED:
4208                 break;
4209         }
4210
4211         return ret;
4212 }
4213
4214 /* Upper GRO stack assumes network header starts at gro_offset=0
4215  * Drivers could call both napi_gro_frags() and napi_gro_receive()
4216  * We copy ethernet header into skb->data to have a common layout.
4217  */
4218 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4219 {
4220         struct sk_buff *skb = napi->skb;
4221         const struct ethhdr *eth;
4222         unsigned int hlen = sizeof(*eth);
4223
4224         napi->skb = NULL;
4225
4226         skb_reset_mac_header(skb);
4227         skb_gro_reset_offset(skb);
4228
4229         eth = skb_gro_header_fast(skb, 0);
4230         if (unlikely(skb_gro_header_hard(skb, hlen))) {
4231                 eth = skb_gro_header_slow(skb, hlen, 0);
4232                 if (unlikely(!eth)) {
4233                         napi_reuse_skb(napi, skb);
4234                         return NULL;
4235                 }
4236         } else {
4237                 gro_pull_from_frag0(skb, hlen);
4238                 NAPI_GRO_CB(skb)->frag0 += hlen;
4239                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
4240         }
4241         __skb_pull(skb, hlen);
4242
4243         /*
4244          * This works because the only protocols we care about don't require
4245          * special handling.
4246          * We'll fix it up properly in napi_frags_finish()
4247          */
4248         skb->protocol = eth->h_proto;
4249
4250         return skb;
4251 }
4252
4253 gro_result_t napi_gro_frags(struct napi_struct *napi)
4254 {
4255         struct sk_buff *skb = napi_frags_skb(napi);
4256
4257         if (!skb)
4258                 return GRO_DROP;
4259
4260         trace_napi_gro_frags_entry(skb);
4261
4262         return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4263 }
4264 EXPORT_SYMBOL(napi_gro_frags);
4265
4266 /* Compute the checksum from gro_offset and return the folded value
4267  * after adding in any pseudo checksum.
4268  */
4269 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4270 {
4271         __wsum wsum;
4272         __sum16 sum;
4273
4274         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4275
4276         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4277         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4278         if (likely(!sum)) {
4279                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4280                     !skb->csum_complete_sw)
4281                         netdev_rx_csum_fault(skb->dev);
4282         }
4283
4284         NAPI_GRO_CB(skb)->csum = wsum;
4285         NAPI_GRO_CB(skb)->csum_valid = 1;
4286
4287         return sum;
4288 }
4289 EXPORT_SYMBOL(__skb_gro_checksum_complete);
4290
4291 /*
4292  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4293  * Note: called with local irq disabled, but exits with local irq enabled.
4294  */
4295 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4296 {
4297 #ifdef CONFIG_RPS
4298         struct softnet_data *remsd = sd->rps_ipi_list;
4299
4300         if (remsd) {
4301                 sd->rps_ipi_list = NULL;
4302
4303                 local_irq_enable();
4304
4305                 /* Send pending IPI's to kick RPS processing on remote cpus. */
4306                 while (remsd) {
4307                         struct softnet_data *next = remsd->rps_ipi_next;
4308
4309                         if (cpu_online(remsd->cpu))
4310                                 smp_call_function_single_async(remsd->cpu,
4311                                                            &remsd->csd);
4312                         remsd = next;
4313                 }
4314         } else
4315 #endif
4316                 local_irq_enable();
4317 }
4318
4319 static int process_backlog(struct napi_struct *napi, int quota)
4320 {
4321         int work = 0;
4322         struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4323
4324 #ifdef CONFIG_RPS
4325         /* Check if we have pending ipi, its better to send them now,
4326          * not waiting net_rx_action() end.
4327          */
4328         if (sd->rps_ipi_list) {
4329                 local_irq_disable();
4330                 net_rps_action_and_irq_enable(sd);
4331         }
4332 #endif
4333         napi->weight = weight_p;
4334         local_irq_disable();
4335         while (1) {
4336                 struct sk_buff *skb;
4337
4338                 while ((skb = __skb_dequeue(&sd->process_queue))) {
4339                         local_irq_enable();
4340                         __netif_receive_skb(skb);
4341                         local_irq_disable();
4342                         input_queue_head_incr(sd);
4343                         if (++work >= quota) {
4344                                 local_irq_enable();
4345                                 return work;
4346                         }
4347                 }
4348
4349                 rps_lock(sd);
4350                 if (skb_queue_empty(&sd->input_pkt_queue)) {
4351                         /*
4352                          * Inline a custom version of __napi_complete().
4353                          * only current cpu owns and manipulates this napi,
4354                          * and NAPI_STATE_SCHED is the only possible flag set
4355                          * on backlog.
4356                          * We can use a plain write instead of clear_bit(),
4357                          * and we dont need an smp_mb() memory barrier.
4358                          */
4359                         list_del(&napi->poll_list);
4360                         napi->state = 0;
4361                         rps_unlock(sd);
4362
4363                         break;
4364                 }
4365
4366                 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4367                                            &sd->process_queue);
4368                 rps_unlock(sd);
4369         }
4370         local_irq_enable();
4371
4372         return work;
4373 }
4374
4375 /**
4376  * __napi_schedule - schedule for receive
4377  * @n: entry to schedule
4378  *
4379  * The entry's receive function will be scheduled to run
4380  */
4381 void __napi_schedule(struct napi_struct *n)
4382 {
4383         unsigned long flags;
4384
4385         local_irq_save(flags);
4386         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4387         local_irq_restore(flags);
4388 }
4389 EXPORT_SYMBOL(__napi_schedule);
4390
4391 void __napi_complete(struct napi_struct *n)
4392 {
4393         BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4394         BUG_ON(n->gro_list);
4395
4396         list_del(&n->poll_list);
4397         smp_mb__before_atomic();
4398         clear_bit(NAPI_STATE_SCHED, &n->state);
4399 }
4400 EXPORT_SYMBOL(__napi_complete);
4401
4402 void napi_complete(struct napi_struct *n)
4403 {
4404         unsigned long flags;
4405
4406         /*
4407          * don't let napi dequeue from the cpu poll list
4408          * just in case its running on a different cpu
4409          */
4410         if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4411                 return;
4412
4413         napi_gro_flush(n, false);
4414         local_irq_save(flags);
4415         __napi_complete(n);
4416         local_irq_restore(flags);
4417 }
4418 EXPORT_SYMBOL(napi_complete);
4419
4420 /* must be called under rcu_read_lock(), as we dont take a reference */
4421 struct napi_struct *napi_by_id(unsigned int napi_id)
4422 {
4423         unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4424         struct napi_struct *napi;
4425
4426         hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4427                 if (napi->napi_id == napi_id)
4428                         return napi;
4429
4430         return NULL;
4431 }
4432 EXPORT_SYMBOL_GPL(napi_by_id);
4433
4434 void napi_hash_add(struct napi_struct *napi)
4435 {
4436         if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4437
4438                 spin_lock(&napi_hash_lock);
4439
4440                 /* 0 is not a valid id, we also skip an id that is taken
4441                  * we expect both events to be extremely rare
4442                  */
4443                 napi->napi_id = 0;
4444                 while (!napi->napi_id) {
4445                         napi->napi_id = ++napi_gen_id;
4446                         if (napi_by_id(napi->napi_id))
4447                                 napi->napi_id = 0;
4448                 }
4449
4450                 hlist_add_head_rcu(&napi->napi_hash_node,
4451                         &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4452
4453                 spin_unlock(&napi_hash_lock);
4454         }
4455 }
4456 EXPORT_SYMBOL_GPL(napi_hash_add);
4457
4458 /* Warning : caller is responsible to make sure rcu grace period
4459  * is respected before freeing memory containing @napi
4460  */
4461 void napi_hash_del(struct napi_struct *napi)
4462 {
4463         spin_lock(&napi_hash_lock);
4464
4465         if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4466                 hlist_del_rcu(&napi->napi_hash_node);
4467
4468         spin_unlock(&napi_hash_lock);
4469 }
4470 EXPORT_SYMBOL_GPL(napi_hash_del);
4471
4472 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4473                     int (*poll)(struct napi_struct *, int), int weight)
4474 {
4475         INIT_LIST_HEAD(&napi->poll_list);
4476         napi->gro_count = 0;
4477         napi->gro_list = NULL;
4478         napi->skb = NULL;
4479         napi->poll = poll;
4480         if (weight > NAPI_POLL_WEIGHT)
4481                 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4482                             weight, dev->name);
4483         napi->weight = weight;
4484         list_add(&napi->dev_list, &dev->napi_list);
4485         napi->dev = dev;
4486 #ifdef CONFIG_NETPOLL
4487         spin_lock_init(&napi->poll_lock);
4488         napi->poll_owner = -1;
4489 #endif
4490         set_bit(NAPI_STATE_SCHED, &napi->state);
4491 }
4492 EXPORT_SYMBOL(netif_napi_add);
4493
4494 void netif_napi_del(struct napi_struct *napi)
4495 {
4496         list_del_init(&napi->dev_list);
4497         napi_free_frags(napi);
4498
4499         kfree_skb_list(napi->gro_list);
4500         napi->gro_list = NULL;
4501         napi->gro_count = 0;
4502 }
4503 EXPORT_SYMBOL(netif_napi_del);
4504
4505 static void net_rx_action(struct softirq_action *h)
4506 {
4507         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4508         unsigned long time_limit = jiffies + 2;
4509         int budget = netdev_budget;
4510         void *have;
4511
4512         local_irq_disable();
4513
4514         while (!list_empty(&sd->poll_list)) {
4515                 struct napi_struct *n;
4516                 int work, weight;
4517
4518                 /* If softirq window is exhuasted then punt.
4519                  * Allow this to run for 2 jiffies since which will allow
4520                  * an average latency of 1.5/HZ.
4521                  */
4522                 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
4523                         goto softnet_break;
4524
4525                 local_irq_enable();
4526
4527                 /* Even though interrupts have been re-enabled, this
4528                  * access is safe because interrupts can only add new
4529                  * entries to the tail of this list, and only ->poll()
4530                  * calls can remove this head entry from the list.
4531                  */
4532                 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
4533
4534                 have = netpoll_poll_lock(n);
4535
4536                 weight = n->weight;
4537
4538                 /* This NAPI_STATE_SCHED test is for avoiding a race
4539                  * with netpoll's poll_napi().  Only the entity which
4540                  * obtains the lock and sees NAPI_STATE_SCHED set will
4541                  * actually make the ->poll() call.  Therefore we avoid
4542                  * accidentally calling ->poll() when NAPI is not scheduled.
4543                  */
4544                 work = 0;
4545                 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4546                         work = n->poll(n, weight);
4547                         trace_napi_poll(n);
4548                 }
4549
4550                 WARN_ON_ONCE(work > weight);
4551
4552                 budget -= work;
4553
4554                 local_irq_disable();
4555
4556                 /* Drivers must not modify the NAPI state if they
4557                  * consume the entire weight.  In such cases this code
4558                  * still "owns" the NAPI instance and therefore can
4559                  * move the instance around on the list at-will.
4560                  */
4561                 if (unlikely(work == weight)) {
4562                         if (unlikely(napi_disable_pending(n))) {
4563                                 local_irq_enable();
4564                                 napi_complete(n);
4565                                 local_irq_disable();
4566                         } else {
4567                                 if (n->gro_list) {
4568                                         /* flush too old packets
4569                                          * If HZ < 1000, flush all packets.
4570                                          */
4571                                         local_irq_enable();
4572                                         napi_gro_flush(n, HZ >= 1000);
4573                                         local_irq_disable();
4574                                 }
4575                                 list_move_tail(&n->poll_list, &sd->poll_list);
4576                         }
4577                 }
4578
4579                 netpoll_poll_unlock(have);
4580         }
4581 out:
4582         net_rps_action_and_irq_enable(sd);
4583
4584         return;
4585
4586 softnet_break:
4587         sd->time_squeeze++;
4588         __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4589         goto out;
4590 }
4591
4592 struct netdev_adjacent {
4593         struct net_device *dev;
4594
4595         /* upper master flag, there can only be one master device per list */
4596         bool master;
4597
4598         /* counter for the number of times this device was added to us */
4599         u16 ref_nr;
4600
4601         /* private field for the users */
4602         void *private;
4603
4604         struct list_head list;
4605         struct rcu_head rcu;
4606 };
4607
4608 static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4609                                                  struct net_device *adj_dev,
4610                                                  struct list_head *adj_list)
4611 {
4612         struct netdev_adjacent *adj;
4613
4614         list_for_each_entry(adj, adj_list, list) {
4615                 if (adj->dev == adj_dev)
4616                         return adj;
4617         }
4618         return NULL;
4619 }
4620
4621 /**
4622  * netdev_has_upper_dev - Check if device is linked to an upper device
4623  * @dev: device
4624  * @upper_dev: upper device to check
4625  *
4626  * Find out if a device is linked to specified upper device and return true
4627  * in case it is. Note that this checks only immediate upper device,
4628  * not through a complete stack of devices. The caller must hold the RTNL lock.
4629  */
4630 bool netdev_has_upper_dev(struct net_device *dev,
4631                           struct net_device *upper_dev)
4632 {
4633         ASSERT_RTNL();
4634
4635         return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
4636 }
4637 EXPORT_SYMBOL(netdev_has_upper_dev);
4638
4639 /**
4640  * netdev_has_any_upper_dev - Check if device is linked to some device
4641  * @dev: device
4642  *
4643  * Find out if a device is linked to an upper device and return true in case
4644  * it is. The caller must hold the RTNL lock.
4645  */
4646 static bool netdev_has_any_upper_dev(struct net_device *dev)
4647 {
4648         ASSERT_RTNL();
4649
4650         return !list_empty(&dev->all_adj_list.upper);
4651 }
4652
4653 /**
4654  * netdev_master_upper_dev_get - Get master upper device
4655  * @dev: device
4656  *
4657  * Find a master upper device and return pointer to it or NULL in case
4658  * it's not there. The caller must hold the RTNL lock.
4659  */
4660 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4661 {
4662         struct netdev_adjacent *upper;
4663
4664         ASSERT_RTNL();
4665
4666         if (list_empty(&dev->adj_list.upper))
4667                 return NULL;
4668
4669         upper = list_first_entry(&dev->adj_list.upper,
4670                                  struct netdev_adjacent, list);
4671         if (likely(upper->master))
4672                 return upper->dev;
4673         return NULL;
4674 }
4675 EXPORT_SYMBOL(netdev_master_upper_dev_get);
4676
4677 void *netdev_adjacent_get_private(struct list_head *adj_list)
4678 {
4679         struct netdev_adjacent *adj;
4680
4681         adj = list_entry(adj_list, struct netdev_adjacent, list);
4682
4683         return adj->private;
4684 }
4685 EXPORT_SYMBOL(netdev_adjacent_get_private);
4686
4687 /**
4688  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4689  * @dev: device
4690  * @iter: list_head ** of the current position
4691  *
4692  * Gets the next device from the dev's upper list, starting from iter
4693  * position. The caller must hold RCU read lock.
4694  */
4695 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4696                                                  struct list_head **iter)
4697 {
4698         struct netdev_adjacent *upper;
4699
4700         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4701
4702         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4703
4704         if (&upper->list == &dev->adj_list.upper)
4705                 return NULL;
4706
4707         *iter = &upper->list;
4708
4709         return upper->dev;
4710 }
4711 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4712
4713 /**
4714  * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4715  * @dev: device
4716  * @iter: list_head ** of the current position
4717  *
4718  * Gets the next device from the dev's upper list, starting from iter
4719  * position. The caller must hold RCU read lock.
4720  */
4721 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4722                                                      struct list_head **iter)
4723 {
4724         struct netdev_adjacent *upper;
4725
4726         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4727
4728         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4729
4730         if (&upper->list == &dev->all_adj_list.upper)
4731                 return NULL;
4732
4733         *iter = &upper->list;
4734
4735         return upper->dev;
4736 }
4737 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
4738
4739 /**
4740  * netdev_lower_get_next_private - Get the next ->private from the
4741  *                                 lower neighbour list
4742  * @dev: device
4743  * @iter: list_head ** of the current position
4744  *
4745  * Gets the next netdev_adjacent->private from the dev's lower neighbour
4746  * list, starting from iter position. The caller must hold either hold the
4747  * RTNL lock or its own locking that guarantees that the neighbour lower
4748  * list will remain unchainged.
4749  */
4750 void *netdev_lower_get_next_private(struct net_device *dev,
4751                                     struct list_head **iter)
4752 {
4753         struct netdev_adjacent *lower;
4754
4755         lower = list_entry(*iter, struct netdev_adjacent, list);
4756
4757         if (&lower->list == &dev->adj_list.lower)
4758                 return NULL;
4759
4760         *iter = lower->list.next;
4761
4762         return lower->private;
4763 }
4764 EXPORT_SYMBOL(netdev_lower_get_next_private);
4765
4766 /**
4767  * netdev_lower_get_next_private_rcu - Get the next ->private from the
4768  *                                     lower neighbour list, RCU
4769  *                                     variant
4770  * @dev: device
4771  * @iter: list_head ** of the current position
4772  *
4773  * Gets the next netdev_adjacent->private from the dev's lower neighbour
4774  * list, starting from iter position. The caller must hold RCU read lock.
4775  */
4776 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4777                                         struct list_head **iter)
4778 {
4779         struct netdev_adjacent *lower;
4780
4781         WARN_ON_ONCE(!rcu_read_lock_held());
4782
4783         lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4784
4785         if (&lower->list == &dev->adj_list.lower)
4786                 return NULL;
4787
4788         *iter = &lower->list;
4789
4790         return lower->private;
4791 }
4792 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4793
4794 /**
4795  * netdev_lower_get_next - Get the next device from the lower neighbour
4796  *                         list
4797  * @dev: device
4798  * @iter: list_head ** of the current position
4799  *
4800  * Gets the next netdev_adjacent from the dev's lower neighbour
4801  * list, starting from iter position. The caller must hold RTNL lock or
4802  * its own locking that guarantees that the neighbour lower
4803  * list will remain unchainged.
4804  */
4805 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4806 {
4807         struct netdev_adjacent *lower;
4808
4809         lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4810
4811         if (&lower->list == &dev->adj_list.lower)
4812                 return NULL;
4813
4814         *iter = &lower->list;
4815
4816         return lower->dev;
4817 }
4818 EXPORT_SYMBOL(netdev_lower_get_next);
4819
4820 /**
4821  * netdev_lower_get_first_private_rcu - Get the first ->private from the
4822  *                                     lower neighbour list, RCU
4823  *                                     variant
4824  * @dev: device
4825  *
4826  * Gets the first netdev_adjacent->private from the dev's lower neighbour
4827  * list. The caller must hold RCU read lock.
4828  */
4829 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4830 {
4831         struct netdev_adjacent *lower;
4832
4833         lower = list_first_or_null_rcu(&dev->adj_list.lower,
4834                         struct netdev_adjacent, list);
4835         if (lower)
4836                 return lower->private;
4837         return NULL;
4838 }
4839 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4840
4841 /**
4842  * netdev_master_upper_dev_get_rcu - Get master upper device
4843  * @dev: device
4844  *
4845  * Find a master upper device and return pointer to it or NULL in case
4846  * it's not there. The caller must hold the RCU read lock.
4847  */
4848 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4849 {
4850         struct netdev_adjacent *upper;
4851
4852         upper = list_first_or_null_rcu(&dev->adj_list.upper,
4853                                        struct netdev_adjacent, list);
4854         if (upper && likely(upper->master))
4855                 return upper->dev;
4856         return NULL;
4857 }
4858 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4859
4860 static int netdev_adjacent_sysfs_add(struct net_device *dev,
4861                               struct net_device *adj_dev,
4862                               struct list_head *dev_list)
4863 {
4864         char linkname[IFNAMSIZ+7];
4865         sprintf(linkname, dev_list == &dev->adj_list.upper ?
4866                 "upper_%s" : "lower_%s", adj_dev->name);
4867         return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4868                                  linkname);
4869 }
4870 static void netdev_adjacent_sysfs_del(struct net_device *dev,
4871                                char *name,
4872                                struct list_head *dev_list)
4873 {
4874         char linkname[IFNAMSIZ+7];
4875         sprintf(linkname, dev_list == &dev->adj_list.upper ?
4876                 "upper_%s" : "lower_%s", name);
4877         sysfs_remove_link(&(dev->dev.kobj), linkname);
4878 }
4879
4880 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
4881                                                  struct net_device *adj_dev,
4882                                                  struct list_head *dev_list)
4883 {
4884         return (dev_list == &dev->adj_list.upper ||
4885                 dev_list == &dev->adj_list.lower) &&
4886                 net_eq(dev_net(dev), dev_net(adj_dev));
4887 }
4888
4889 static int __netdev_adjacent_dev_insert(struct net_device *dev,
4890                                         struct net_device *adj_dev,
4891                                         struct list_head *dev_list,
4892                                         void *private, bool master)
4893 {
4894         struct netdev_adjacent *adj;
4895         int ret;
4896
4897         adj = __netdev_find_adj(dev, adj_dev, dev_list);
4898
4899         if (adj) {
4900                 adj->ref_nr++;
4901                 return 0;
4902         }
4903
4904         adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4905         if (!adj)
4906                 return -ENOMEM;
4907
4908         adj->dev = adj_dev;
4909         adj->master = master;
4910         adj->ref_nr = 1;
4911         adj->private = private;
4912         dev_hold(adj_dev);
4913
4914         pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4915                  adj_dev->name, dev->name, adj_dev->name);
4916
4917         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
4918                 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
4919                 if (ret)
4920                         goto free_adj;
4921         }
4922
4923         /* Ensure that master link is always the first item in list. */
4924         if (master) {
4925                 ret = sysfs_create_link(&(dev->dev.kobj),
4926                                         &(adj_dev->dev.kobj), "master");
4927                 if (ret)
4928                         goto remove_symlinks;
4929
4930                 list_add_rcu(&adj->list, dev_list);
4931         } else {
4932                 list_add_tail_rcu(&adj->list, dev_list);
4933         }
4934
4935         return 0;
4936
4937 remove_symlinks:
4938         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
4939                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
4940 free_adj:
4941         kfree(adj);
4942         dev_put(adj_dev);
4943
4944         return ret;
4945 }
4946
4947 static void __netdev_adjacent_dev_remove(struct net_device *dev,
4948                                          struct net_device *adj_dev,
4949                                          struct list_head *dev_list)
4950 {
4951         struct netdev_adjacent *adj;
4952
4953         adj = __netdev_find_adj(dev, adj_dev, dev_list);
4954
4955         if (!adj) {
4956                 pr_err("tried to remove device %s from %s\n",
4957                        dev->name, adj_dev->name);
4958                 BUG();
4959         }
4960
4961         if (adj->ref_nr > 1) {
4962                 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
4963                          adj->ref_nr-1);
4964                 adj->ref_nr--;
4965                 return;
4966         }
4967
4968         if (adj->master)
4969                 sysfs_remove_link(&(dev->dev.kobj), "master");
4970
4971         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
4972                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
4973
4974         list_del_rcu(&adj->list);
4975         pr_debug("dev_put for %s, because link removed from %s to %s\n",
4976                  adj_dev->name, dev->name, adj_dev->name);
4977         dev_put(adj_dev);
4978         kfree_rcu(adj, rcu);
4979 }
4980
4981 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
4982                                             struct net_device *upper_dev,
4983                                             struct list_head *up_list,
4984                                             struct list_head *down_list,
4985                                             void *private, bool master)
4986 {
4987         int ret;
4988
4989         ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
4990                                            master);
4991         if (ret)
4992                 return ret;
4993
4994         ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
4995                                            false);
4996         if (ret) {
4997                 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
4998                 return ret;
4999         }
5000
5001         return 0;
5002 }
5003
5004 static int __netdev_adjacent_dev_link(struct net_device *dev,
5005                                       struct net_device *upper_dev)
5006 {
5007         return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5008                                                 &dev->all_adj_list.upper,
5009                                                 &upper_dev->all_adj_list.lower,
5010                                                 NULL, false);
5011 }
5012
5013 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5014                                                struct net_device *upper_dev,
5015                                                struct list_head *up_list,
5016                                                struct list_head *down_list)
5017 {
5018         __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5019         __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5020 }
5021
5022 static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5023                                          struct net_device *upper_dev)
5024 {
5025         __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5026                                            &dev->all_adj_list.upper,
5027                                            &upper_dev->all_adj_list.lower);
5028 }
5029
5030 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5031                                                 struct net_device *upper_dev,
5032                                                 void *private, bool master)
5033 {
5034         int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5035
5036         if (ret)
5037                 return ret;
5038
5039         ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5040                                                &dev->adj_list.upper,
5041                                                &upper_dev->adj_list.lower,
5042                                                private, master);
5043         if (ret) {
5044                 __netdev_adjacent_dev_unlink(dev, upper_dev);
5045                 return ret;
5046         }
5047
5048         return 0;
5049 }
5050
5051 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5052                                                    struct net_device *upper_dev)
5053 {
5054         __netdev_adjacent_dev_unlink(dev, upper_dev);
5055         __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5056                                            &dev->adj_list.upper,
5057                                            &upper_dev->adj_list.lower);
5058 }
5059
5060 static int __netdev_upper_dev_link(struct net_device *dev,
5061                                    struct net_device *upper_dev, bool master,
5062                                    void *private)
5063 {
5064         struct netdev_adjacent *i, *j, *to_i, *to_j;
5065         int ret = 0;
5066
5067         ASSERT_RTNL();
5068
5069         if (dev == upper_dev)
5070                 return -EBUSY;
5071
5072         /* To prevent loops, check if dev is not upper device to upper_dev. */
5073         if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
5074                 return -EBUSY;
5075
5076         if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
5077                 return -EEXIST;
5078
5079         if (master && netdev_master_upper_dev_get(dev))
5080                 return -EBUSY;
5081
5082         ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5083                                                    master);
5084         if (ret)
5085                 return ret;
5086
5087         /* Now that we linked these devs, make all the upper_dev's
5088          * all_adj_list.upper visible to every dev's all_adj_list.lower an
5089          * versa, and don't forget the devices itself. All of these
5090          * links are non-neighbours.
5091          */
5092         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5093                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5094                         pr_debug("Interlinking %s with %s, non-neighbour\n",
5095                                  i->dev->name, j->dev->name);
5096                         ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5097                         if (ret)
5098                                 goto rollback_mesh;
5099                 }
5100         }
5101
5102         /* add dev to every upper_dev's upper device */
5103         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5104                 pr_debug("linking %s's upper device %s with %s\n",
5105                          upper_dev->name, i->dev->name, dev->name);
5106                 ret = __netdev_adjacent_dev_link(dev, i->dev);
5107                 if (ret)
5108                         goto rollback_upper_mesh;
5109         }
5110
5111         /* add upper_dev to every dev's lower device */
5112         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5113                 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5114                          i->dev->name, upper_dev->name);
5115                 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5116                 if (ret)
5117                         goto rollback_lower_mesh;
5118         }
5119
5120         call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5121         return 0;
5122
5123 rollback_lower_mesh:
5124         to_i = i;
5125         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5126                 if (i == to_i)
5127                         break;
5128                 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5129         }
5130
5131         i = NULL;
5132
5133 rollback_upper_mesh:
5134         to_i = i;
5135         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5136                 if (i == to_i)
5137                         break;
5138                 __netdev_adjacent_dev_unlink(dev, i->dev);
5139         }
5140
5141         i = j = NULL;
5142
5143 rollback_mesh:
5144         to_i = i;
5145         to_j = j;
5146         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5147                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5148                         if (i == to_i && j == to_j)
5149                                 break;
5150                         __netdev_adjacent_dev_unlink(i->dev, j->dev);
5151                 }
5152                 if (i == to_i)
5153                         break;
5154         }
5155
5156         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5157
5158         return ret;
5159 }
5160
5161 /**
5162  * netdev_upper_dev_link - Add a link to the upper device
5163  * @dev: device
5164  * @upper_dev: new upper device
5165  *
5166  * Adds a link to device which is upper to this one. The caller must hold
5167  * the RTNL lock. On a failure a negative errno code is returned.
5168  * On success the reference counts are adjusted and the function
5169  * returns zero.
5170  */
5171 int netdev_upper_dev_link(struct net_device *dev,
5172                           struct net_device *upper_dev)
5173 {
5174         return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
5175 }
5176 EXPORT_SYMBOL(netdev_upper_dev_link);
5177
5178 /**
5179  * netdev_master_upper_dev_link - Add a master link to the upper device
5180  * @dev: device
5181  * @upper_dev: new upper device
5182  *
5183  * Adds a link to device which is upper to this one. In this case, only
5184  * one master upper device can be linked, although other non-master devices
5185  * might be linked as well. The caller must hold the RTNL lock.
5186  * On a failure a negative errno code is returned. On success the reference
5187  * counts are adjusted and the function returns zero.
5188  */
5189 int netdev_master_upper_dev_link(struct net_device *dev,
5190                                  struct net_device *upper_dev)
5191 {
5192         return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
5193 }
5194 EXPORT_SYMBOL(netdev_master_upper_dev_link);
5195
5196 int netdev_master_upper_dev_link_private(struct net_device *dev,
5197                                          struct net_device *upper_dev,
5198                                          void *private)
5199 {
5200         return __netdev_upper_dev_link(dev, upper_dev, true, private);
5201 }
5202 EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5203
5204 /**
5205  * netdev_upper_dev_unlink - Removes a link to upper device
5206  * @dev: device
5207  * @upper_dev: new upper device
5208  *
5209  * Removes a link to device which is upper to this one. The caller must hold
5210  * the RTNL lock.
5211  */
5212 void netdev_upper_dev_unlink(struct net_device *dev,
5213                              struct net_device *upper_dev)
5214 {
5215         struct netdev_adjacent *i, *j;
5216         ASSERT_RTNL();
5217
5218         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5219
5220         /* Here is the tricky part. We must remove all dev's lower
5221          * devices from all upper_dev's upper devices and vice
5222          * versa, to maintain the graph relationship.
5223          */
5224         list_for_each_entry(i, &dev->all_adj_list.lower, list)
5225                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5226                         __netdev_adjacent_dev_unlink(i->dev, j->dev);
5227
5228         /* remove also the devices itself from lower/upper device
5229          * list
5230          */
5231         list_for_each_entry(i, &dev->all_adj_list.lower, list)
5232                 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5233
5234         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5235                 __netdev_adjacent_dev_unlink(dev, i->dev);
5236
5237         call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5238 }
5239 EXPORT_SYMBOL(netdev_upper_dev_unlink);
5240
5241 void netdev_adjacent_add_links(struct net_device *dev)
5242 {
5243         struct netdev_adjacent *iter;
5244
5245         struct net *net = dev_net(dev);
5246
5247         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5248                 if (!net_eq(net,dev_net(iter->dev)))
5249                         continue;
5250                 netdev_adjacent_sysfs_add(iter->dev, dev,
5251                                           &iter->dev->adj_list.lower);
5252                 netdev_adjacent_sysfs_add(dev, iter->dev,
5253                                           &dev->adj_list.upper);
5254         }
5255
5256         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5257                 if (!net_eq(net,dev_net(iter->dev)))
5258                         continue;
5259                 netdev_adjacent_sysfs_add(iter->dev, dev,
5260                                           &iter->dev->adj_list.upper);
5261                 netdev_adjacent_sysfs_add(dev, iter->dev,
5262                                           &dev->adj_list.lower);
5263         }
5264 }
5265
5266 void netdev_adjacent_del_links(struct net_device *dev)
5267 {
5268         struct netdev_adjacent *iter;
5269
5270         struct net *net = dev_net(dev);
5271
5272         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5273                 if (!net_eq(net,dev_net(iter->dev)))
5274                         continue;
5275                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5276                                           &iter->dev->adj_list.lower);
5277                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5278                                           &dev->adj_list.upper);
5279         }
5280
5281         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5282                 if (!net_eq(net,dev_net(iter->dev)))
5283                         continue;
5284                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5285                                           &iter->dev->adj_list.upper);
5286                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5287                                           &dev->adj_list.lower);
5288         }
5289 }
5290
5291 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
5292 {
5293         struct netdev_adjacent *iter;
5294
5295         struct net *net = dev_net(dev);
5296
5297         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5298                 if (!net_eq(net,dev_net(iter->dev)))
5299                         continue;
5300                 netdev_adjacent_sysfs_del(iter->dev, oldname,
5301                                           &iter->dev->adj_list.lower);
5302                 netdev_adjacent_sysfs_add(iter->dev, dev,
5303                                           &iter->dev->adj_list.lower);
5304         }
5305
5306         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5307                 if (!net_eq(net,dev_net(iter->dev)))
5308                         continue;
5309                 netdev_adjacent_sysfs_del(iter->dev, oldname,
5310                                           &iter->dev->adj_list.upper);
5311                 netdev_adjacent_sysfs_add(iter->dev, dev,
5312                                           &iter->dev->adj_list.upper);
5313         }
5314 }
5315
5316 void *netdev_lower_dev_get_private(struct net_device *dev,
5317                                    struct net_device *lower_dev)
5318 {
5319         struct netdev_adjacent *lower;
5320
5321         if (!lower_dev)
5322                 return NULL;
5323         lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5324         if (!lower)
5325                 return NULL;
5326
5327         return lower->private;
5328 }
5329 EXPORT_SYMBOL(netdev_lower_dev_get_private);
5330
5331
5332 int dev_get_nest_level(struct net_device *dev,
5333                        bool (*type_check)(struct net_device *dev))
5334 {
5335         struct net_device *lower = NULL;
5336         struct list_head *iter;
5337         int max_nest = -1;
5338         int nest;
5339
5340         ASSERT_RTNL();
5341
5342         netdev_for_each_lower_dev(dev, lower, iter) {
5343                 nest = dev_get_nest_level(lower, type_check);
5344                 if (max_nest < nest)
5345                         max_nest = nest;
5346         }
5347
5348         if (type_check(dev))
5349                 max_nest++;
5350
5351         return max_nest;
5352 }
5353 EXPORT_SYMBOL(dev_get_nest_level);
5354
5355 static void dev_change_rx_flags(struct net_device *dev, int flags)
5356 {
5357         const struct net_device_ops *ops = dev->netdev_ops;
5358
5359         if (ops->ndo_change_rx_flags)
5360                 ops->ndo_change_rx_flags(dev, flags);
5361 }
5362
5363 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
5364 {
5365         unsigned int old_flags = dev->flags;
5366         kuid_t uid;
5367         kgid_t gid;
5368
5369         ASSERT_RTNL();
5370
5371         dev->flags |= IFF_PROMISC;
5372         dev->promiscuity += inc;
5373         if (dev->promiscuity == 0) {
5374                 /*
5375                  * Avoid overflow.
5376                  * If inc causes overflow, untouch promisc and return error.
5377                  */
5378                 if (inc < 0)
5379                         dev->flags &= ~IFF_PROMISC;
5380                 else {
5381                         dev->promiscuity -= inc;
5382                         pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5383                                 dev->name);
5384                         return -EOVERFLOW;
5385                 }
5386         }
5387         if (dev->flags != old_flags) {
5388                 pr_info("device %s %s promiscuous mode\n",
5389                         dev->name,
5390                         dev->flags & IFF_PROMISC ? "entered" : "left");
5391                 if (audit_enabled) {
5392                         current_uid_gid(&uid, &gid);
5393                         audit_log(current->audit_context, GFP_ATOMIC,
5394                                 AUDIT_ANOM_PROMISCUOUS,
5395                                 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5396                                 dev->name, (dev->flags & IFF_PROMISC),
5397                                 (old_flags & IFF_PROMISC),
5398                                 from_kuid(&init_user_ns, audit_get_loginuid(current)),
5399                                 from_kuid(&init_user_ns, uid),
5400                                 from_kgid(&init_user_ns, gid),
5401                                 audit_get_sessionid(current));
5402                 }
5403
5404                 dev_change_rx_flags(dev, IFF_PROMISC);
5405         }
5406         if (notify)
5407                 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
5408         return 0;
5409 }
5410
5411 /**
5412  *      dev_set_promiscuity     - update promiscuity count on a device
5413  *      @dev: device
5414  *      @inc: modifier
5415  *
5416  *      Add or remove promiscuity from a device. While the count in the device
5417  *      remains above zero the interface remains promiscuous. Once it hits zero
5418  *      the device reverts back to normal filtering operation. A negative inc
5419  *      value is used to drop promiscuity on the device.
5420  *      Return 0 if successful or a negative errno code on error.
5421  */
5422 int dev_set_promiscuity(struct net_device *dev, int inc)
5423 {
5424         unsigned int old_flags = dev->flags;
5425         int err;
5426
5427         err = __dev_set_promiscuity(dev, inc, true);
5428         if (err < 0)
5429                 return err;
5430         if (dev->flags != old_flags)
5431                 dev_set_rx_mode(dev);
5432         return err;
5433 }
5434 EXPORT_SYMBOL(dev_set_promiscuity);
5435
5436 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
5437 {
5438         unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
5439
5440         ASSERT_RTNL();
5441
5442         dev->flags |= IFF_ALLMULTI;
5443         dev->allmulti += inc;
5444         if (dev->allmulti == 0) {
5445                 /*
5446                  * Avoid overflow.
5447                  * If inc causes overflow, untouch allmulti and return error.
5448                  */
5449                 if (inc < 0)
5450                         dev->flags &= ~IFF_ALLMULTI;
5451                 else {
5452                         dev->allmulti -= inc;
5453                         pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5454                                 dev->name);
5455                         return -EOVERFLOW;
5456                 }
5457         }
5458         if (dev->flags ^ old_flags) {
5459                 dev_change_rx_flags(dev, IFF_ALLMULTI);
5460                 dev_set_rx_mode(dev);
5461                 if (notify)
5462                         __dev_notify_flags(dev, old_flags,
5463                                            dev->gflags ^ old_gflags);
5464         }
5465         return 0;
5466 }
5467
5468 /**
5469  *      dev_set_allmulti        - update allmulti count on a device
5470  *      @dev: device
5471  *      @inc: modifier
5472  *
5473  *      Add or remove reception of all multicast frames to a device. While the
5474  *      count in the device remains above zero the interface remains listening
5475  *      to all interfaces. Once it hits zero the device reverts back to normal
5476  *      filtering operation. A negative @inc value is used to drop the counter
5477  *      when releasing a resource needing all multicasts.
5478  *      Return 0 if successful or a negative errno code on error.
5479  */
5480
5481 int dev_set_allmulti(struct net_device *dev, int inc)
5482 {
5483         return __dev_set_allmulti(dev, inc, true);
5484 }
5485 EXPORT_SYMBOL(dev_set_allmulti);
5486
5487 /*
5488  *      Upload unicast and multicast address lists to device and
5489  *      configure RX filtering. When the device doesn't support unicast
5490  *      filtering it is put in promiscuous mode while unicast addresses
5491  *      are present.
5492  */
5493 void __dev_set_rx_mode(struct net_device *dev)
5494 {
5495         const struct net_device_ops *ops = dev->netdev_ops;
5496
5497         /* dev_open will call this function so the list will stay sane. */
5498         if (!(dev->flags&IFF_UP))
5499                 return;
5500
5501         if (!netif_device_present(dev))
5502                 return;
5503
5504         if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
5505                 /* Unicast addresses changes may only happen under the rtnl,
5506                  * therefore calling __dev_set_promiscuity here is safe.
5507                  */
5508                 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
5509                         __dev_set_promiscuity(dev, 1, false);
5510                         dev->uc_promisc = true;
5511                 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
5512                         __dev_set_promiscuity(dev, -1, false);
5513                         dev->uc_promisc = false;
5514                 }
5515         }
5516
5517         if (ops->ndo_set_rx_mode)
5518                 ops->ndo_set_rx_mode(dev);
5519 }
5520
5521 void dev_set_rx_mode(struct net_device *dev)
5522 {
5523         netif_addr_lock_bh(dev);
5524         __dev_set_rx_mode(dev);
5525         netif_addr_unlock_bh(dev);
5526 }
5527
5528 /**
5529  *      dev_get_flags - get flags reported to userspace
5530  *      @dev: device
5531  *
5532  *      Get the combination of flag bits exported through APIs to userspace.
5533  */
5534 unsigned int dev_get_flags(const struct net_device *dev)
5535 {
5536         unsigned int flags;
5537
5538         flags = (dev->flags & ~(IFF_PROMISC |
5539                                 IFF_ALLMULTI |
5540                                 IFF_RUNNING |
5541                                 IFF_LOWER_UP |
5542                                 IFF_DORMANT)) |
5543                 (dev->gflags & (IFF_PROMISC |
5544                                 IFF_ALLMULTI));
5545
5546         if (netif_running(dev)) {
5547                 if (netif_oper_up(dev))
5548                         flags |= IFF_RUNNING;
5549                 if (netif_carrier_ok(dev))
5550                         flags |= IFF_LOWER_UP;
5551                 if (netif_dormant(dev))
5552                         flags |= IFF_DORMANT;
5553         }
5554
5555         return flags;
5556 }
5557 EXPORT_SYMBOL(dev_get_flags);
5558
5559 int __dev_change_flags(struct net_device *dev, unsigned int flags)
5560 {
5561         unsigned int old_flags = dev->flags;
5562         int ret;
5563
5564         ASSERT_RTNL();
5565
5566         /*
5567          *      Set the flags on our device.
5568          */
5569
5570         dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5571                                IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5572                                IFF_AUTOMEDIA)) |
5573                      (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5574                                     IFF_ALLMULTI));
5575
5576         /*
5577          *      Load in the correct multicast list now the flags have changed.
5578          */
5579
5580         if ((old_flags ^ flags) & IFF_MULTICAST)
5581                 dev_change_rx_flags(dev, IFF_MULTICAST);
5582
5583         dev_set_rx_mode(dev);
5584
5585         /*
5586          *      Have we downed the interface. We handle IFF_UP ourselves
5587          *      according to user attempts to set it, rather than blindly
5588          *      setting it.
5589          */
5590
5591         ret = 0;
5592         if ((old_flags ^ flags) & IFF_UP)
5593                 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
5594
5595         if ((flags ^ dev->gflags) & IFF_PROMISC) {
5596                 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5597                 unsigned int old_flags = dev->flags;
5598
5599                 dev->gflags ^= IFF_PROMISC;
5600
5601                 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5602                         if (dev->flags != old_flags)
5603                                 dev_set_rx_mode(dev);
5604         }
5605
5606         /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5607            is important. Some (broken) drivers set IFF_PROMISC, when
5608            IFF_ALLMULTI is requested not asking us and not reporting.
5609          */
5610         if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
5611                 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5612
5613                 dev->gflags ^= IFF_ALLMULTI;
5614                 __dev_set_allmulti(dev, inc, false);
5615         }
5616
5617         return ret;
5618 }
5619
5620 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5621                         unsigned int gchanges)
5622 {
5623         unsigned int changes = dev->flags ^ old_flags;
5624
5625         if (gchanges)
5626                 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
5627
5628         if (changes & IFF_UP) {
5629                 if (dev->flags & IFF_UP)
5630                         call_netdevice_notifiers(NETDEV_UP, dev);
5631                 else
5632                         call_netdevice_notifiers(NETDEV_DOWN, dev);
5633         }
5634
5635         if (dev->flags & IFF_UP &&
5636             (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5637                 struct netdev_notifier_change_info change_info;
5638
5639                 change_info.flags_changed = changes;
5640                 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5641                                               &change_info.info);
5642         }
5643 }
5644
5645 /**
5646  *      dev_change_flags - change device settings
5647  *      @dev: device
5648  *      @flags: device state flags
5649  *
5650  *      Change settings on device based state flags. The flags are
5651  *      in the userspace exported format.
5652  */
5653 int dev_change_flags(struct net_device *dev, unsigned int flags)
5654 {
5655         int ret;
5656         unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
5657
5658         ret = __dev_change_flags(dev, flags);
5659         if (ret < 0)
5660                 return ret;
5661
5662         changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
5663         __dev_notify_flags(dev, old_flags, changes);
5664         return ret;
5665 }
5666 EXPORT_SYMBOL(dev_change_flags);
5667
5668 static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5669 {
5670         const struct net_device_ops *ops = dev->netdev_ops;
5671
5672         if (ops->ndo_change_mtu)
5673                 return ops->ndo_change_mtu(dev, new_mtu);
5674
5675         dev->mtu = new_mtu;
5676         return 0;
5677 }
5678
5679 /**
5680  *      dev_set_mtu - Change maximum transfer unit
5681  *      @dev: device
5682  *      @new_mtu: new transfer unit
5683  *
5684  *      Change the maximum transfer size of the network device.
5685  */
5686 int dev_set_mtu(struct net_device *dev, int new_mtu)
5687 {
5688         int err, orig_mtu;
5689
5690         if (new_mtu == dev->mtu)
5691                 return 0;
5692
5693         /*      MTU must be positive.    */
5694         if (new_mtu < 0)
5695                 return -EINVAL;
5696
5697         if (!netif_device_present(dev))
5698                 return -ENODEV;
5699
5700         err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5701         err = notifier_to_errno(err);
5702         if (err)
5703                 return err;
5704
5705         orig_mtu = dev->mtu;
5706         err = __dev_set_mtu(dev, new_mtu);
5707
5708         if (!err) {
5709                 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5710                 err = notifier_to_errno(err);
5711                 if (err) {
5712                         /* setting mtu back and notifying everyone again,
5713                          * so that they have a chance to revert changes.
5714                          */
5715                         __dev_set_mtu(dev, orig_mtu);
5716                         call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5717                 }
5718         }
5719         return err;
5720 }
5721 EXPORT_SYMBOL(dev_set_mtu);
5722
5723 /**
5724  *      dev_set_group - Change group this device belongs to
5725  *      @dev: device
5726  *      @new_group: group this device should belong to
5727  */
5728 void dev_set_group(struct net_device *dev, int new_group)
5729 {
5730         dev->group = new_group;
5731 }
5732 EXPORT_SYMBOL(dev_set_group);
5733
5734 /**
5735  *      dev_set_mac_address - Change Media Access Control Address
5736  *      @dev: device
5737  *      @sa: new address
5738  *
5739  *      Change the hardware (MAC) address of the device
5740  */
5741 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5742 {
5743         const struct net_device_ops *ops = dev->netdev_ops;
5744         int err;
5745
5746         if (!ops->ndo_set_mac_address)
5747                 return -EOPNOTSUPP;
5748         if (sa->sa_family != dev->type)
5749                 return -EINVAL;
5750         if (!netif_device_present(dev))
5751                 return -ENODEV;
5752         err = ops->ndo_set_mac_address(dev, sa);
5753         if (err)
5754                 return err;
5755         dev->addr_assign_type = NET_ADDR_SET;
5756         call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5757         add_device_randomness(dev->dev_addr, dev->addr_len);
5758         return 0;
5759 }
5760 EXPORT_SYMBOL(dev_set_mac_address);
5761
5762 /**
5763  *      dev_change_carrier - Change device carrier
5764  *      @dev: device
5765  *      @new_carrier: new value
5766  *
5767  *      Change device carrier
5768  */
5769 int dev_change_carrier(struct net_device *dev, bool new_carrier)
5770 {
5771         const struct net_device_ops *ops = dev->netdev_ops;
5772
5773         if (!ops->ndo_change_carrier)
5774                 return -EOPNOTSUPP;
5775         if (!netif_device_present(dev))
5776                 return -ENODEV;
5777         return ops->ndo_change_carrier(dev, new_carrier);
5778 }
5779 EXPORT_SYMBOL(dev_change_carrier);
5780
5781 /**
5782  *      dev_get_phys_port_id - Get device physical port ID
5783  *      @dev: device
5784  *      @ppid: port ID
5785  *
5786  *      Get device physical port ID
5787  */
5788 int dev_get_phys_port_id(struct net_device *dev,
5789                          struct netdev_phys_port_id *ppid)
5790 {
5791         const struct net_device_ops *ops = dev->netdev_ops;
5792
5793         if (!ops->ndo_get_phys_port_id)
5794                 return -EOPNOTSUPP;
5795         return ops->ndo_get_phys_port_id(dev, ppid);
5796 }
5797 EXPORT_SYMBOL(dev_get_phys_port_id);
5798
5799 /**
5800  *      dev_new_index   -       allocate an ifindex
5801  *      @net: the applicable net namespace
5802  *
5803  *      Returns a suitable unique value for a new device interface
5804  *      number.  The caller must hold the rtnl semaphore or the
5805  *      dev_base_lock to be sure it remains unique.
5806  */
5807 static int dev_new_index(struct net *net)
5808 {
5809         int ifindex = net->ifindex;
5810         for (;;) {
5811                 if (++ifindex <= 0)
5812                         ifindex = 1;
5813                 if (!__dev_get_by_index(net, ifindex))
5814                         return net->ifindex = ifindex;
5815         }
5816 }
5817
5818 /* Delayed registration/unregisteration */
5819 static LIST_HEAD(net_todo_list);
5820 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5821
5822 static void net_set_todo(struct net_device *dev)
5823 {
5824         list_add_tail(&dev->todo_list, &net_todo_list);
5825         dev_net(dev)->dev_unreg_count++;
5826 }
5827
5828 static void rollback_registered_many(struct list_head *head)
5829 {
5830         struct net_device *dev, *tmp;
5831         LIST_HEAD(close_head);
5832
5833         BUG_ON(dev_boot_phase);
5834         ASSERT_RTNL();
5835
5836         list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5837                 /* Some devices call without registering
5838                  * for initialization unwind. Remove those
5839                  * devices and proceed with the remaining.
5840                  */
5841                 if (dev->reg_state == NETREG_UNINITIALIZED) {
5842                         pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5843                                  dev->name, dev);
5844
5845                         WARN_ON(1);
5846                         list_del(&dev->unreg_list);
5847                         continue;
5848                 }
5849                 dev->dismantle = true;
5850                 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5851         }
5852
5853         /* If device is running, close it first. */
5854         list_for_each_entry(dev, head, unreg_list)
5855                 list_add_tail(&dev->close_list, &close_head);
5856         dev_close_many(&close_head);
5857
5858         list_for_each_entry(dev, head, unreg_list) {
5859                 /* And unlink it from device chain. */
5860                 unlist_netdevice(dev);
5861
5862                 dev->reg_state = NETREG_UNREGISTERING;
5863         }
5864
5865         synchronize_net();
5866
5867         list_for_each_entry(dev, head, unreg_list) {
5868                 /* Shutdown queueing discipline. */
5869                 dev_shutdown(dev);
5870
5871
5872                 /* Notify protocols, that we are about to destroy
5873                    this device. They should clean all the things.
5874                 */
5875                 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5876
5877                 /*
5878                  *      Flush the unicast and multicast chains
5879                  */
5880                 dev_uc_flush(dev);
5881                 dev_mc_flush(dev);
5882
5883                 if (dev->netdev_ops->ndo_uninit)
5884                         dev->netdev_ops->ndo_uninit(dev);
5885
5886                 if (!dev->rtnl_link_ops ||
5887                     dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5888                         rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
5889
5890                 /* Notifier chain MUST detach us all upper devices. */
5891                 WARN_ON(netdev_has_any_upper_dev(dev));
5892
5893                 /* Remove entries from kobject tree */
5894                 netdev_unregister_kobject(dev);
5895 #ifdef CONFIG_XPS
5896                 /* Remove XPS queueing entries */
5897                 netif_reset_xps_queues_gt(dev, 0);
5898 #endif
5899         }
5900
5901         synchronize_net();
5902
5903         list_for_each_entry(dev, head, unreg_list)
5904                 dev_put(dev);
5905 }
5906
5907 static void rollback_registered(struct net_device *dev)
5908 {
5909         LIST_HEAD(single);
5910
5911         list_add(&dev->unreg_list, &single);
5912         rollback_registered_many(&single);
5913         list_del(&single);
5914 }
5915
5916 static netdev_features_t netdev_fix_features(struct net_device *dev,
5917         netdev_features_t features)
5918 {
5919         /* Fix illegal checksum combinations */
5920         if ((features & NETIF_F_HW_CSUM) &&
5921             (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5922                 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5923                 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5924         }
5925
5926         /* TSO requires that SG is present as well. */
5927         if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5928                 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5929                 features &= ~NETIF_F_ALL_TSO;
5930         }
5931
5932         if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5933                                         !(features & NETIF_F_IP_CSUM)) {
5934                 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5935                 features &= ~NETIF_F_TSO;
5936                 features &= ~NETIF_F_TSO_ECN;
5937         }
5938
5939         if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5940                                          !(features & NETIF_F_IPV6_CSUM)) {
5941                 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5942                 features &= ~NETIF_F_TSO6;
5943         }
5944
5945         /* TSO ECN requires that TSO is present as well. */
5946         if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5947                 features &= ~NETIF_F_TSO_ECN;
5948
5949         /* Software GSO depends on SG. */
5950         if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5951                 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5952                 features &= ~NETIF_F_GSO;
5953         }
5954
5955         /* UFO needs SG and checksumming */
5956         if (features & NETIF_F_UFO) {
5957                 /* maybe split UFO into V4 and V6? */
5958                 if (!((features & NETIF_F_GEN_CSUM) ||
5959                     (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5960                             == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5961                         netdev_dbg(dev,
5962                                 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5963                         features &= ~NETIF_F_UFO;
5964                 }
5965
5966                 if (!(features & NETIF_F_SG)) {
5967                         netdev_dbg(dev,
5968                                 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5969                         features &= ~NETIF_F_UFO;
5970                 }
5971         }
5972
5973 #ifdef CONFIG_NET_RX_BUSY_POLL
5974         if (dev->netdev_ops->ndo_busy_poll)
5975                 features |= NETIF_F_BUSY_POLL;
5976         else
5977 #endif
5978                 features &= ~NETIF_F_BUSY_POLL;
5979
5980         return features;
5981 }
5982
5983 int __netdev_update_features(struct net_device *dev)
5984 {
5985         netdev_features_t features;
5986         int err = 0;
5987
5988         ASSERT_RTNL();
5989
5990         features = netdev_get_wanted_features(dev);
5991
5992         if (dev->netdev_ops->ndo_fix_features)
5993                 features = dev->netdev_ops->ndo_fix_features(dev, features);
5994
5995         /* driver might be less strict about feature dependencies */
5996         features = netdev_fix_features(dev, features);
5997
5998         if (dev->features == features)
5999                 return 0;
6000
6001         netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6002                 &dev->features, &features);
6003
6004         if (dev->netdev_ops->ndo_set_features)
6005                 err = dev->netdev_ops->ndo_set_features(dev, features);
6006
6007         if (unlikely(err < 0)) {
6008                 netdev_err(dev,
6009                         "set_features() failed (%d); wanted %pNF, left %pNF\n",
6010                         err, &features, &dev->features);
6011                 return -1;
6012         }
6013
6014         if (!err)
6015                 dev->features = features;
6016
6017         return 1;
6018 }
6019
6020 /**
6021  *      netdev_update_features - recalculate device features
6022  *      @dev: the device to check
6023  *
6024  *      Recalculate dev->features set and send notifications if it
6025  *      has changed. Should be called after driver or hardware dependent
6026  *      conditions might have changed that influence the features.
6027  */
6028 void netdev_update_features(struct net_device *dev)
6029 {
6030         if (__netdev_update_features(dev))
6031                 netdev_features_change(dev);
6032 }
6033 EXPORT_SYMBOL(netdev_update_features);
6034
6035 /**
6036  *      netdev_change_features - recalculate device features
6037  *      @dev: the device to check
6038  *
6039  *      Recalculate dev->features set and send notifications even
6040  *      if they have not changed. Should be called instead of
6041  *      netdev_update_features() if also dev->vlan_features might
6042  *      have changed to allow the changes to be propagated to stacked
6043  *      VLAN devices.
6044  */
6045 void netdev_change_features(struct net_device *dev)
6046 {
6047         __netdev_update_features(dev);
6048         netdev_features_change(dev);
6049 }
6050 EXPORT_SYMBOL(netdev_change_features);
6051
6052 /**
6053  *      netif_stacked_transfer_operstate -      transfer operstate
6054  *      @rootdev: the root or lower level device to transfer state from
6055  *      @dev: the device to transfer operstate to
6056  *
6057  *      Transfer operational state from root to device. This is normally
6058  *      called when a stacking relationship exists between the root
6059  *      device and the device(a leaf device).
6060  */
6061 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6062                                         struct net_device *dev)
6063 {
6064         if (rootdev->operstate == IF_OPER_DORMANT)
6065                 netif_dormant_on(dev);
6066         else
6067                 netif_dormant_off(dev);
6068
6069         if (netif_carrier_ok(rootdev)) {
6070                 if (!netif_carrier_ok(dev))
6071                         netif_carrier_on(dev);
6072         } else {
6073                 if (netif_carrier_ok(dev))
6074                         netif_carrier_off(dev);
6075         }
6076 }
6077 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6078
6079 #ifdef CONFIG_SYSFS
6080 static int netif_alloc_rx_queues(struct net_device *dev)
6081 {
6082         unsigned int i, count = dev->num_rx_queues;
6083         struct netdev_rx_queue *rx;
6084
6085         BUG_ON(count < 1);
6086
6087         rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
6088         if (!rx)
6089                 return -ENOMEM;
6090
6091         dev->_rx = rx;
6092
6093         for (i = 0; i < count; i++)
6094                 rx[i].dev = dev;
6095         return 0;
6096 }
6097 #endif
6098
6099 static void netdev_init_one_queue(struct net_device *dev,
6100                                   struct netdev_queue *queue, void *_unused)
6101 {
6102         /* Initialize queue lock */
6103         spin_lock_init(&queue->_xmit_lock);
6104         netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6105         queue->xmit_lock_owner = -1;
6106         netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
6107         queue->dev = dev;
6108 #ifdef CONFIG_BQL
6109         dql_init(&queue->dql, HZ);
6110 #endif
6111 }
6112
6113 static void netif_free_tx_queues(struct net_device *dev)
6114 {
6115         kvfree(dev->_tx);
6116 }
6117
6118 static int netif_alloc_netdev_queues(struct net_device *dev)
6119 {
6120         unsigned int count = dev->num_tx_queues;
6121         struct netdev_queue *tx;
6122         size_t sz = count * sizeof(*tx);
6123
6124         BUG_ON(count < 1 || count > 0xffff);
6125
6126         tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6127         if (!tx) {
6128                 tx = vzalloc(sz);
6129                 if (!tx)
6130                         return -ENOMEM;
6131         }
6132         dev->_tx = tx;
6133
6134         netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6135         spin_lock_init(&dev->tx_global_lock);
6136
6137         return 0;
6138 }
6139
6140 /**
6141  *      register_netdevice      - register a network device
6142  *      @dev: device to register
6143  *
6144  *      Take a completed network device structure and add it to the kernel
6145  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6146  *      chain. 0 is returned on success. A negative errno code is returned
6147  *      on a failure to set up the device, or if the name is a duplicate.
6148  *
6149  *      Callers must hold the rtnl semaphore. You may want
6150  *      register_netdev() instead of this.
6151  *
6152  *      BUGS:
6153  *      The locking appears insufficient to guarantee two parallel registers
6154  *      will not get the same name.
6155  */
6156
6157 int register_netdevice(struct net_device *dev)
6158 {
6159         int ret;
6160         struct net *net = dev_net(dev);
6161
6162         BUG_ON(dev_boot_phase);
6163         ASSERT_RTNL();
6164
6165         might_sleep();
6166
6167         /* When net_device's are persistent, this will be fatal. */
6168         BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
6169         BUG_ON(!net);
6170
6171         spin_lock_init(&dev->addr_list_lock);
6172         netdev_set_addr_lockdep_class(dev);
6173
6174         dev->iflink = -1;
6175
6176         ret = dev_get_valid_name(net, dev, dev->name);
6177         if (ret < 0)
6178                 goto out;
6179
6180         /* Init, if this function is available */
6181         if (dev->netdev_ops->ndo_init) {
6182                 ret = dev->netdev_ops->ndo_init(dev);
6183                 if (ret) {
6184                         if (ret > 0)
6185                                 ret = -EIO;
6186                         goto out;
6187                 }
6188         }
6189
6190         if (((dev->hw_features | dev->features) &
6191              NETIF_F_HW_VLAN_CTAG_FILTER) &&
6192             (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6193              !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6194                 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6195                 ret = -EINVAL;
6196                 goto err_uninit;
6197         }
6198
6199         ret = -EBUSY;
6200         if (!dev->ifindex)
6201                 dev->ifindex = dev_new_index(net);
6202         else if (__dev_get_by_index(net, dev->ifindex))
6203                 goto err_uninit;
6204
6205         if (dev->iflink == -1)
6206                 dev->iflink = dev->ifindex;
6207
6208         /* Transfer changeable features to wanted_features and enable
6209          * software offloads (GSO and GRO).
6210          */
6211         dev->hw_features |= NETIF_F_SOFT_FEATURES;
6212         dev->features |= NETIF_F_SOFT_FEATURES;
6213         dev->wanted_features = dev->features & dev->hw_features;
6214
6215         if (!(dev->flags & IFF_LOOPBACK)) {
6216                 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6217         }
6218
6219         /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
6220          */
6221         dev->vlan_features |= NETIF_F_HIGHDMA;
6222
6223         /* Make NETIF_F_SG inheritable to tunnel devices.
6224          */
6225         dev->hw_enc_features |= NETIF_F_SG;
6226
6227         /* Make NETIF_F_SG inheritable to MPLS.
6228          */
6229         dev->mpls_features |= NETIF_F_SG;
6230
6231         ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6232         ret = notifier_to_errno(ret);
6233         if (ret)
6234                 goto err_uninit;
6235
6236         ret = netdev_register_kobject(dev);
6237         if (ret)
6238                 goto err_uninit;
6239         dev->reg_state = NETREG_REGISTERED;
6240
6241         __netdev_update_features(dev);
6242
6243         /*
6244          *      Default initial state at registry is that the
6245          *      device is present.
6246          */
6247
6248         set_bit(__LINK_STATE_PRESENT, &dev->state);
6249
6250         linkwatch_init_dev(dev);
6251
6252         dev_init_scheduler(dev);
6253         dev_hold(dev);
6254         list_netdevice(dev);
6255         add_device_randomness(dev->dev_addr, dev->addr_len);
6256
6257         /* If the device has permanent device address, driver should
6258          * set dev_addr and also addr_assign_type should be set to
6259          * NET_ADDR_PERM (default value).
6260          */
6261         if (dev->addr_assign_type == NET_ADDR_PERM)
6262                 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6263
6264         /* Notify protocols, that a new device appeared. */
6265         ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
6266         ret = notifier_to_errno(ret);
6267         if (ret) {
6268                 rollback_registered(dev);
6269                 dev->reg_state = NETREG_UNREGISTERED;
6270         }
6271         /*
6272          *      Prevent userspace races by waiting until the network
6273          *      device is fully setup before sending notifications.
6274          */
6275         if (!dev->rtnl_link_ops ||
6276             dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6277                 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
6278
6279 out:
6280         return ret;
6281
6282 err_uninit:
6283         if (dev->netdev_ops->ndo_uninit)
6284                 dev->netdev_ops->ndo_uninit(dev);
6285         goto out;
6286 }
6287 EXPORT_SYMBOL(register_netdevice);
6288
6289 /**
6290  *      init_dummy_netdev       - init a dummy network device for NAPI
6291  *      @dev: device to init
6292  *
6293  *      This takes a network device structure and initialize the minimum
6294  *      amount of fields so it can be used to schedule NAPI polls without
6295  *      registering a full blown interface. This is to be used by drivers
6296  *      that need to tie several hardware interfaces to a single NAPI
6297  *      poll scheduler due to HW limitations.
6298  */
6299 int init_dummy_netdev(struct net_device *dev)
6300 {
6301         /* Clear everything. Note we don't initialize spinlocks
6302          * are they aren't supposed to be taken by any of the
6303          * NAPI code and this dummy netdev is supposed to be
6304          * only ever used for NAPI polls
6305          */
6306         memset(dev, 0, sizeof(struct net_device));
6307
6308         /* make sure we BUG if trying to hit standard
6309          * register/unregister code path
6310          */
6311         dev->reg_state = NETREG_DUMMY;
6312
6313         /* NAPI wants this */
6314         INIT_LIST_HEAD(&dev->napi_list);
6315
6316         /* a dummy interface is started by default */
6317         set_bit(__LINK_STATE_PRESENT, &dev->state);
6318         set_bit(__LINK_STATE_START, &dev->state);
6319
6320         /* Note : We dont allocate pcpu_refcnt for dummy devices,
6321          * because users of this 'device' dont need to change
6322          * its refcount.
6323          */
6324
6325         return 0;
6326 }
6327 EXPORT_SYMBOL_GPL(init_dummy_netdev);
6328
6329
6330 /**
6331  *      register_netdev - register a network device
6332  *      @dev: device to register
6333  *
6334  *      Take a completed network device structure and add it to the kernel
6335  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6336  *      chain. 0 is returned on success. A negative errno code is returned
6337  *      on a failure to set up the device, or if the name is a duplicate.
6338  *
6339  *      This is a wrapper around register_netdevice that takes the rtnl semaphore
6340  *      and expands the device name if you passed a format string to
6341  *      alloc_netdev.
6342  */
6343 int register_netdev(struct net_device *dev)
6344 {
6345         int err;
6346
6347         rtnl_lock();
6348         err = register_netdevice(dev);
6349         rtnl_unlock();
6350         return err;
6351 }
6352 EXPORT_SYMBOL(register_netdev);
6353
6354 int netdev_refcnt_read(const struct net_device *dev)
6355 {
6356         int i, refcnt = 0;
6357
6358         for_each_possible_cpu(i)
6359                 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6360         return refcnt;
6361 }
6362 EXPORT_SYMBOL(netdev_refcnt_read);
6363
6364 /**
6365  * netdev_wait_allrefs - wait until all references are gone.
6366  * @dev: target net_device
6367  *
6368  * This is called when unregistering network devices.
6369  *
6370  * Any protocol or device that holds a reference should register
6371  * for netdevice notification, and cleanup and put back the
6372  * reference if they receive an UNREGISTER event.
6373  * We can get stuck here if buggy protocols don't correctly
6374  * call dev_put.
6375  */
6376 static void netdev_wait_allrefs(struct net_device *dev)
6377 {
6378         unsigned long rebroadcast_time, warning_time;
6379         int refcnt;
6380
6381         linkwatch_forget_dev(dev);
6382
6383         rebroadcast_time = warning_time = jiffies;
6384         refcnt = netdev_refcnt_read(dev);
6385
6386         while (refcnt != 0) {
6387                 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6388                         rtnl_lock();
6389
6390                         /* Rebroadcast unregister notification */
6391                         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6392
6393                         __rtnl_unlock();
6394                         rcu_barrier();
6395                         rtnl_lock();
6396
6397                         call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6398                         if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6399                                      &dev->state)) {
6400                                 /* We must not have linkwatch events
6401                                  * pending on unregister. If this
6402                                  * happens, we simply run the queue
6403                                  * unscheduled, resulting in a noop
6404                                  * for this device.
6405                                  */
6406                                 linkwatch_run_queue();
6407                         }
6408
6409                         __rtnl_unlock();
6410
6411                         rebroadcast_time = jiffies;
6412                 }
6413
6414                 msleep(250);
6415
6416                 refcnt = netdev_refcnt_read(dev);
6417
6418                 if (time_after(jiffies, warning_time + 10 * HZ)) {
6419                         pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6420                                  dev->name, refcnt);
6421                         warning_time = jiffies;
6422                 }
6423         }
6424 }
6425
6426 /* The sequence is:
6427  *
6428  *      rtnl_lock();
6429  *      ...
6430  *      register_netdevice(x1);
6431  *      register_netdevice(x2);
6432  *      ...
6433  *      unregister_netdevice(y1);
6434  *      unregister_netdevice(y2);
6435  *      ...
6436  *      rtnl_unlock();
6437  *      free_netdev(y1);
6438  *      free_netdev(y2);
6439  *
6440  * We are invoked by rtnl_unlock().
6441  * This allows us to deal with problems:
6442  * 1) We can delete sysfs objects which invoke hotplug
6443  *    without deadlocking with linkwatch via keventd.
6444  * 2) Since we run with the RTNL semaphore not held, we can sleep
6445  *    safely in order to wait for the netdev refcnt to drop to zero.
6446  *
6447  * We must not return until all unregister events added during
6448  * the interval the lock was held have been completed.
6449  */
6450 void netdev_run_todo(void)
6451 {
6452         struct list_head list;
6453
6454         /* Snapshot list, allow later requests */
6455         list_replace_init(&net_todo_list, &list);
6456
6457         __rtnl_unlock();
6458
6459
6460         /* Wait for rcu callbacks to finish before next phase */
6461         if (!list_empty(&list))
6462                 rcu_barrier();
6463
6464         while (!list_empty(&list)) {
6465                 struct net_device *dev
6466                         = list_first_entry(&list, struct net_device, todo_list);
6467                 list_del(&dev->todo_list);
6468
6469                 rtnl_lock();
6470                 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6471                 __rtnl_unlock();
6472
6473                 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
6474                         pr_err("network todo '%s' but state %d\n",
6475                                dev->name, dev->reg_state);
6476                         dump_stack();
6477                         continue;
6478                 }
6479
6480                 dev->reg_state = NETREG_UNREGISTERED;
6481
6482                 on_each_cpu(flush_backlog, dev, 1);
6483
6484                 netdev_wait_allrefs(dev);
6485
6486                 /* paranoia */
6487                 BUG_ON(netdev_refcnt_read(dev));
6488                 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6489                 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
6490                 WARN_ON(dev->dn_ptr);
6491
6492                 if (dev->destructor)
6493                         dev->destructor(dev);
6494
6495                 /* Report a network device has been unregistered */
6496                 rtnl_lock();
6497                 dev_net(dev)->dev_unreg_count--;
6498                 __rtnl_unlock();
6499                 wake_up(&netdev_unregistering_wq);
6500
6501                 /* Free network device */
6502                 kobject_put(&dev->dev.kobj);
6503         }
6504 }
6505
6506 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
6507  * fields in the same order, with only the type differing.
6508  */
6509 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6510                              const struct net_device_stats *netdev_stats)
6511 {
6512 #if BITS_PER_LONG == 64
6513         BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6514         memcpy(stats64, netdev_stats, sizeof(*stats64));
6515 #else
6516         size_t i, n = sizeof(*stats64) / sizeof(u64);
6517         const unsigned long *src = (const unsigned long *)netdev_stats;
6518         u64 *dst = (u64 *)stats64;
6519
6520         BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6521                      sizeof(*stats64) / sizeof(u64));
6522         for (i = 0; i < n; i++)
6523                 dst[i] = src[i];
6524 #endif
6525 }
6526 EXPORT_SYMBOL(netdev_stats_to_stats64);
6527
6528 /**
6529  *      dev_get_stats   - get network device statistics
6530  *      @dev: device to get statistics from
6531  *      @storage: place to store stats
6532  *
6533  *      Get network statistics from device. Return @storage.
6534  *      The device driver may provide its own method by setting
6535  *      dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6536  *      otherwise the internal statistics structure is used.
6537  */
6538 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6539                                         struct rtnl_link_stats64 *storage)
6540 {
6541         const struct net_device_ops *ops = dev->netdev_ops;
6542
6543         if (ops->ndo_get_stats64) {
6544                 memset(storage, 0, sizeof(*storage));
6545                 ops->ndo_get_stats64(dev, storage);
6546         } else if (ops->ndo_get_stats) {
6547                 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
6548         } else {
6549                 netdev_stats_to_stats64(storage, &dev->stats);
6550         }
6551         storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
6552         storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
6553         return storage;
6554 }
6555 EXPORT_SYMBOL(dev_get_stats);
6556
6557 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6558 {
6559         struct netdev_queue *queue = dev_ingress_queue(dev);
6560
6561 #ifdef CONFIG_NET_CLS_ACT
6562         if (queue)
6563                 return queue;
6564         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6565         if (!queue)
6566                 return NULL;
6567         netdev_init_one_queue(dev, queue, NULL);
6568         queue->qdisc = &noop_qdisc;
6569         queue->qdisc_sleeping = &noop_qdisc;
6570         rcu_assign_pointer(dev->ingress_queue, queue);
6571 #endif
6572         return queue;
6573 }
6574
6575 static const struct ethtool_ops default_ethtool_ops;
6576
6577 void netdev_set_default_ethtool_ops(struct net_device *dev,
6578                                     const struct ethtool_ops *ops)
6579 {
6580         if (dev->ethtool_ops == &default_ethtool_ops)
6581                 dev->ethtool_ops = ops;
6582 }
6583 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6584
6585 void netdev_freemem(struct net_device *dev)
6586 {
6587         char *addr = (char *)dev - dev->padded;
6588
6589         kvfree(addr);
6590 }
6591
6592 /**
6593  *      alloc_netdev_mqs - allocate network device
6594  *      @sizeof_priv:           size of private data to allocate space for
6595  *      @name:                  device name format string
6596  *      @name_assign_type:      origin of device name
6597  *      @setup:                 callback to initialize device
6598  *      @txqs:                  the number of TX subqueues to allocate
6599  *      @rxqs:                  the number of RX subqueues to allocate
6600  *
6601  *      Allocates a struct net_device with private data area for driver use
6602  *      and performs basic initialization.  Also allocates subqueue structs
6603  *      for each queue on the device.
6604  */
6605 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6606                 unsigned char name_assign_type,
6607                 void (*setup)(struct net_device *),
6608                 unsigned int txqs, unsigned int rxqs)
6609 {
6610         struct net_device *dev;
6611         size_t alloc_size;
6612         struct net_device *p;
6613
6614         BUG_ON(strlen(name) >= sizeof(dev->name));
6615
6616         if (txqs < 1) {
6617                 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6618                 return NULL;
6619         }
6620
6621 #ifdef CONFIG_SYSFS
6622         if (rxqs < 1) {
6623                 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6624                 return NULL;
6625         }
6626 #endif
6627
6628         alloc_size = sizeof(struct net_device);
6629         if (sizeof_priv) {
6630                 /* ensure 32-byte alignment of private area */
6631                 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6632                 alloc_size += sizeof_priv;
6633         }
6634         /* ensure 32-byte alignment of whole construct */
6635         alloc_size += NETDEV_ALIGN - 1;
6636
6637         p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6638         if (!p)
6639                 p = vzalloc(alloc_size);
6640         if (!p)
6641                 return NULL;
6642
6643         dev = PTR_ALIGN(p, NETDEV_ALIGN);
6644         dev->padded = (char *)dev - (char *)p;
6645
6646         dev->pcpu_refcnt = alloc_percpu(int);
6647         if (!dev->pcpu_refcnt)
6648                 goto free_dev;
6649
6650         if (dev_addr_init(dev))
6651                 goto free_pcpu;
6652
6653         dev_mc_init(dev);
6654         dev_uc_init(dev);
6655
6656         dev_net_set(dev, &init_net);
6657
6658         dev->gso_max_size = GSO_MAX_SIZE;
6659         dev->gso_max_segs = GSO_MAX_SEGS;
6660         dev->gso_min_segs = 0;
6661
6662         INIT_LIST_HEAD(&dev->napi_list);
6663         INIT_LIST_HEAD(&dev->unreg_list);
6664         INIT_LIST_HEAD(&dev->close_list);
6665         INIT_LIST_HEAD(&dev->link_watch_list);
6666         INIT_LIST_HEAD(&dev->adj_list.upper);
6667         INIT_LIST_HEAD(&dev->adj_list.lower);
6668         INIT_LIST_HEAD(&dev->all_adj_list.upper);
6669         INIT_LIST_HEAD(&dev->all_adj_list.lower);
6670         dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
6671         setup(dev);
6672
6673         dev->num_tx_queues = txqs;
6674         dev->real_num_tx_queues = txqs;
6675         if (netif_alloc_netdev_queues(dev))
6676                 goto free_all;
6677
6678 #ifdef CONFIG_SYSFS
6679         dev->num_rx_queues = rxqs;
6680         dev->real_num_rx_queues = rxqs;
6681         if (netif_alloc_rx_queues(dev))
6682                 goto free_all;
6683 #endif
6684
6685         strcpy(dev->name, name);
6686         dev->name_assign_type = name_assign_type;
6687         dev->group = INIT_NETDEV_GROUP;
6688         if (!dev->ethtool_ops)
6689                 dev->ethtool_ops = &default_ethtool_ops;
6690         return dev;
6691
6692 free_all:
6693         free_netdev(dev);
6694         return NULL;
6695
6696 free_pcpu:
6697         free_percpu(dev->pcpu_refcnt);
6698 free_dev:
6699         netdev_freemem(dev);
6700         return NULL;
6701 }
6702 EXPORT_SYMBOL(alloc_netdev_mqs);
6703
6704 /**
6705  *      free_netdev - free network device
6706  *      @dev: device
6707  *
6708  *      This function does the last stage of destroying an allocated device
6709  *      interface. The reference to the device object is released.
6710  *      If this is the last reference then it will be freed.
6711  */
6712 void free_netdev(struct net_device *dev)
6713 {
6714         struct napi_struct *p, *n;
6715
6716         release_net(dev_net(dev));
6717
6718         netif_free_tx_queues(dev);
6719 #ifdef CONFIG_SYSFS
6720         kfree(dev->_rx);
6721 #endif
6722
6723         kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6724
6725         /* Flush device addresses */
6726         dev_addr_flush(dev);
6727
6728         list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6729                 netif_napi_del(p);
6730
6731         free_percpu(dev->pcpu_refcnt);
6732         dev->pcpu_refcnt = NULL;
6733
6734         /*  Compatibility with error handling in drivers */
6735         if (dev->reg_state == NETREG_UNINITIALIZED) {
6736                 netdev_freemem(dev);
6737                 return;
6738         }
6739
6740         BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6741         dev->reg_state = NETREG_RELEASED;
6742
6743         /* will free via device release */
6744         put_device(&dev->dev);
6745 }
6746 EXPORT_SYMBOL(free_netdev);
6747
6748 /**
6749  *      synchronize_net -  Synchronize with packet receive processing
6750  *
6751  *      Wait for packets currently being received to be done.
6752  *      Does not block later packets from starting.
6753  */
6754 void synchronize_net(void)
6755 {
6756         might_sleep();
6757         if (rtnl_is_locked())
6758                 synchronize_rcu_expedited();
6759         else
6760                 synchronize_rcu();
6761 }
6762 EXPORT_SYMBOL(synchronize_net);
6763
6764 /**
6765  *      unregister_netdevice_queue - remove device from the kernel
6766  *      @dev: device
6767  *      @head: list
6768  *
6769  *      This function shuts down a device interface and removes it
6770  *      from the kernel tables.
6771  *      If head not NULL, device is queued to be unregistered later.
6772  *
6773  *      Callers must hold the rtnl semaphore.  You may want
6774  *      unregister_netdev() instead of this.
6775  */
6776
6777 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6778 {
6779         ASSERT_RTNL();
6780
6781         if (head) {
6782                 list_move_tail(&dev->unreg_list, head);
6783         } else {
6784                 rollback_registered(dev);
6785                 /* Finish processing unregister after unlock */
6786                 net_set_todo(dev);
6787         }
6788 }
6789 EXPORT_SYMBOL(unregister_netdevice_queue);
6790
6791 /**
6792  *      unregister_netdevice_many - unregister many devices
6793  *      @head: list of devices
6794  *
6795  *  Note: As most callers use a stack allocated list_head,
6796  *  we force a list_del() to make sure stack wont be corrupted later.
6797  */
6798 void unregister_netdevice_many(struct list_head *head)
6799 {
6800         struct net_device *dev;
6801
6802         if (!list_empty(head)) {
6803                 rollback_registered_many(head);
6804                 list_for_each_entry(dev, head, unreg_list)
6805                         net_set_todo(dev);
6806                 list_del(head);
6807         }
6808 }
6809 EXPORT_SYMBOL(unregister_netdevice_many);
6810
6811 /**
6812  *      unregister_netdev - remove device from the kernel
6813  *      @dev: device
6814  *
6815  *      This function shuts down a device interface and removes it
6816  *      from the kernel tables.
6817  *
6818  *      This is just a wrapper for unregister_netdevice that takes
6819  *      the rtnl semaphore.  In general you want to use this and not
6820  *      unregister_netdevice.
6821  */
6822 void unregister_netdev(struct net_device *dev)
6823 {
6824         rtnl_lock();
6825         unregister_netdevice(dev);
6826         rtnl_unlock();
6827 }
6828 EXPORT_SYMBOL(unregister_netdev);
6829
6830 /**
6831  *      dev_change_net_namespace - move device to different nethost namespace
6832  *      @dev: device
6833  *      @net: network namespace
6834  *      @pat: If not NULL name pattern to try if the current device name
6835  *            is already taken in the destination network namespace.
6836  *
6837  *      This function shuts down a device interface and moves it
6838  *      to a new network namespace. On success 0 is returned, on
6839  *      a failure a netagive errno code is returned.
6840  *
6841  *      Callers must hold the rtnl semaphore.
6842  */
6843
6844 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6845 {
6846         int err;
6847
6848         ASSERT_RTNL();
6849
6850         /* Don't allow namespace local devices to be moved. */
6851         err = -EINVAL;
6852         if (dev->features & NETIF_F_NETNS_LOCAL)
6853                 goto out;
6854
6855         /* Ensure the device has been registrered */
6856         if (dev->reg_state != NETREG_REGISTERED)
6857                 goto out;
6858
6859         /* Get out if there is nothing todo */
6860         err = 0;
6861         if (net_eq(dev_net(dev), net))
6862                 goto out;
6863
6864         /* Pick the destination device name, and ensure
6865          * we can use it in the destination network namespace.
6866          */
6867         err = -EEXIST;
6868         if (__dev_get_by_name(net, dev->name)) {
6869                 /* We get here if we can't use the current device name */
6870                 if (!pat)
6871                         goto out;
6872                 if (dev_get_valid_name(net, dev, pat) < 0)
6873                         goto out;
6874         }
6875
6876         /*
6877          * And now a mini version of register_netdevice unregister_netdevice.
6878          */
6879
6880         /* If device is running close it first. */
6881         dev_close(dev);
6882
6883         /* And unlink it from device chain */
6884         err = -ENODEV;
6885         unlist_netdevice(dev);
6886
6887         synchronize_net();
6888
6889         /* Shutdown queueing discipline. */
6890         dev_shutdown(dev);
6891
6892         /* Notify protocols, that we are about to destroy
6893            this device. They should clean all the things.
6894
6895            Note that dev->reg_state stays at NETREG_REGISTERED.
6896            This is wanted because this way 8021q and macvlan know
6897            the device is just moving and can keep their slaves up.
6898         */
6899         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6900         rcu_barrier();
6901         call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6902         rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
6903
6904         /*
6905          *      Flush the unicast and multicast chains
6906          */
6907         dev_uc_flush(dev);
6908         dev_mc_flush(dev);
6909
6910         /* Send a netdev-removed uevent to the old namespace */
6911         kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6912         netdev_adjacent_del_links(dev);
6913
6914         /* Actually switch the network namespace */
6915         dev_net_set(dev, net);
6916
6917         /* If there is an ifindex conflict assign a new one */
6918         if (__dev_get_by_index(net, dev->ifindex)) {
6919                 int iflink = (dev->iflink == dev->ifindex);
6920                 dev->ifindex = dev_new_index(net);
6921                 if (iflink)
6922                         dev->iflink = dev->ifindex;
6923         }
6924
6925         /* Send a netdev-add uevent to the new namespace */
6926         kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6927         netdev_adjacent_add_links(dev);
6928
6929         /* Fixup kobjects */
6930         err = device_rename(&dev->dev, dev->name);
6931         WARN_ON(err);
6932
6933         /* Add the device back in the hashes */
6934         list_netdevice(dev);
6935
6936         /* Notify protocols, that a new device appeared. */
6937         call_netdevice_notifiers(NETDEV_REGISTER, dev);
6938
6939         /*
6940          *      Prevent userspace races by waiting until the network
6941          *      device is fully setup before sending notifications.
6942          */
6943         rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
6944
6945         synchronize_net();
6946         err = 0;
6947 out:
6948         return err;
6949 }
6950 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6951
6952 static int dev_cpu_callback(struct notifier_block *nfb,
6953                             unsigned long action,
6954                             void *ocpu)
6955 {
6956         struct sk_buff **list_skb;
6957         struct sk_buff *skb;
6958         unsigned int cpu, oldcpu = (unsigned long)ocpu;
6959         struct softnet_data *sd, *oldsd;
6960
6961         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
6962                 return NOTIFY_OK;
6963
6964         local_irq_disable();
6965         cpu = smp_processor_id();
6966         sd = &per_cpu(softnet_data, cpu);
6967         oldsd = &per_cpu(softnet_data, oldcpu);
6968
6969         /* Find end of our completion_queue. */
6970         list_skb = &sd->completion_queue;
6971         while (*list_skb)
6972                 list_skb = &(*list_skb)->next;
6973         /* Append completion queue from offline CPU. */
6974         *list_skb = oldsd->completion_queue;
6975         oldsd->completion_queue = NULL;
6976
6977         /* Append output queue from offline CPU. */
6978         if (oldsd->output_queue) {
6979                 *sd->output_queue_tailp = oldsd->output_queue;
6980                 sd->output_queue_tailp = oldsd->output_queue_tailp;
6981                 oldsd->output_queue = NULL;
6982                 oldsd->output_queue_tailp = &oldsd->output_queue;
6983         }
6984         /* Append NAPI poll list from offline CPU. */
6985         if (!list_empty(&oldsd->poll_list)) {
6986                 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6987                 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6988         }
6989
6990         raise_softirq_irqoff(NET_TX_SOFTIRQ);
6991         local_irq_enable();
6992
6993         /* Process offline CPU's input_pkt_queue */
6994         while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6995                 netif_rx_internal(skb);
6996                 input_queue_head_incr(oldsd);
6997         }
6998         while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6999                 netif_rx_internal(skb);
7000                 input_queue_head_incr(oldsd);
7001         }
7002
7003         return NOTIFY_OK;
7004 }
7005
7006
7007 /**
7008  *      netdev_increment_features - increment feature set by one
7009  *      @all: current feature set
7010  *      @one: new feature set
7011  *      @mask: mask feature set
7012  *
7013  *      Computes a new feature set after adding a device with feature set
7014  *      @one to the master device with current feature set @all.  Will not
7015  *      enable anything that is off in @mask. Returns the new feature set.
7016  */
7017 netdev_features_t netdev_increment_features(netdev_features_t all,
7018         netdev_features_t one, netdev_features_t mask)
7019 {
7020         if (mask & NETIF_F_GEN_CSUM)
7021                 mask |= NETIF_F_ALL_CSUM;
7022         mask |= NETIF_F_VLAN_CHALLENGED;
7023
7024         all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7025         all &= one | ~NETIF_F_ALL_FOR_ALL;
7026
7027         /* If one device supports hw checksumming, set for all. */
7028         if (all & NETIF_F_GEN_CSUM)
7029                 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7030
7031         return all;
7032 }
7033 EXPORT_SYMBOL(netdev_increment_features);
7034
7035 static struct hlist_head * __net_init netdev_create_hash(void)
7036 {
7037         int i;
7038         struct hlist_head *hash;
7039
7040         hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7041         if (hash != NULL)
7042                 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7043                         INIT_HLIST_HEAD(&hash[i]);
7044
7045         return hash;
7046 }
7047
7048 /* Initialize per network namespace state */
7049 static int __net_init netdev_init(struct net *net)
7050 {
7051         if (net != &init_net)
7052                 INIT_LIST_HEAD(&net->dev_base_head);
7053
7054         net->dev_name_head = netdev_create_hash();
7055         if (net->dev_name_head == NULL)
7056                 goto err_name;
7057
7058         net->dev_index_head = netdev_create_hash();
7059         if (net->dev_index_head == NULL)
7060                 goto err_idx;
7061
7062         return 0;
7063
7064 err_idx:
7065         kfree(net->dev_name_head);
7066 err_name:
7067         return -ENOMEM;
7068 }
7069
7070 /**
7071  *      netdev_drivername - network driver for the device
7072  *      @dev: network device
7073  *
7074  *      Determine network driver for device.
7075  */
7076 const char *netdev_drivername(const struct net_device *dev)
7077 {
7078         const struct device_driver *driver;
7079         const struct device *parent;
7080         const char *empty = "";
7081
7082         parent = dev->dev.parent;
7083         if (!parent)
7084                 return empty;
7085
7086         driver = parent->driver;
7087         if (driver && driver->name)
7088                 return driver->name;
7089         return empty;
7090 }
7091
7092 static void __netdev_printk(const char *level, const struct net_device *dev,
7093                             struct va_format *vaf)
7094 {
7095         if (dev && dev->dev.parent) {
7096                 dev_printk_emit(level[1] - '0',
7097                                 dev->dev.parent,
7098                                 "%s %s %s%s: %pV",
7099                                 dev_driver_string(dev->dev.parent),
7100                                 dev_name(dev->dev.parent),
7101                                 netdev_name(dev), netdev_reg_state(dev),
7102                                 vaf);
7103         } else if (dev) {
7104                 printk("%s%s%s: %pV",
7105                        level, netdev_name(dev), netdev_reg_state(dev), vaf);
7106         } else {
7107                 printk("%s(NULL net_device): %pV", level, vaf);
7108         }
7109 }
7110
7111 void netdev_printk(const char *level, const struct net_device *dev,
7112                    const char *format, ...)
7113 {
7114         struct va_format vaf;
7115         va_list args;
7116
7117         va_start(args, format);
7118
7119         vaf.fmt = format;
7120         vaf.va = &args;
7121
7122         __netdev_printk(level, dev, &vaf);
7123
7124         va_end(args);
7125 }
7126 EXPORT_SYMBOL(netdev_printk);
7127
7128 #define define_netdev_printk_level(func, level)                 \
7129 void func(const struct net_device *dev, const char *fmt, ...)   \
7130 {                                                               \
7131         struct va_format vaf;                                   \
7132         va_list args;                                           \
7133                                                                 \
7134         va_start(args, fmt);                                    \
7135                                                                 \
7136         vaf.fmt = fmt;                                          \
7137         vaf.va = &args;                                         \
7138                                                                 \
7139         __netdev_printk(level, dev, &vaf);                      \
7140                                                                 \
7141         va_end(args);                                           \
7142 }                                                               \
7143 EXPORT_SYMBOL(func);
7144
7145 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7146 define_netdev_printk_level(netdev_alert, KERN_ALERT);
7147 define_netdev_printk_level(netdev_crit, KERN_CRIT);
7148 define_netdev_printk_level(netdev_err, KERN_ERR);
7149 define_netdev_printk_level(netdev_warn, KERN_WARNING);
7150 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7151 define_netdev_printk_level(netdev_info, KERN_INFO);
7152
7153 static void __net_exit netdev_exit(struct net *net)
7154 {
7155         kfree(net->dev_name_head);
7156         kfree(net->dev_index_head);
7157 }
7158
7159 static struct pernet_operations __net_initdata netdev_net_ops = {
7160         .init = netdev_init,
7161         .exit = netdev_exit,
7162 };
7163
7164 static void __net_exit default_device_exit(struct net *net)
7165 {
7166         struct net_device *dev, *aux;
7167         /*
7168          * Push all migratable network devices back to the
7169          * initial network namespace
7170          */
7171         rtnl_lock();
7172         for_each_netdev_safe(net, dev, aux) {
7173                 int err;
7174                 char fb_name[IFNAMSIZ];
7175
7176                 /* Ignore unmoveable devices (i.e. loopback) */
7177                 if (dev->features & NETIF_F_NETNS_LOCAL)
7178                         continue;
7179
7180                 /* Leave virtual devices for the generic cleanup */
7181                 if (dev->rtnl_link_ops)
7182                         continue;
7183
7184                 /* Push remaining network devices to init_net */
7185                 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7186                 err = dev_change_net_namespace(dev, &init_net, fb_name);
7187                 if (err) {
7188                         pr_emerg("%s: failed to move %s to init_net: %d\n",
7189                                  __func__, dev->name, err);
7190                         BUG();
7191                 }
7192         }
7193         rtnl_unlock();
7194 }
7195
7196 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7197 {
7198         /* Return with the rtnl_lock held when there are no network
7199          * devices unregistering in any network namespace in net_list.
7200          */
7201         struct net *net;
7202         bool unregistering;
7203         DEFINE_WAIT_FUNC(wait, woken_wake_function);
7204
7205         add_wait_queue(&netdev_unregistering_wq, &wait);
7206         for (;;) {
7207                 unregistering = false;
7208                 rtnl_lock();
7209                 list_for_each_entry(net, net_list, exit_list) {
7210                         if (net->dev_unreg_count > 0) {
7211                                 unregistering = true;
7212                                 break;
7213                         }
7214                 }
7215                 if (!unregistering)
7216                         break;
7217                 __rtnl_unlock();
7218
7219                 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
7220         }
7221         remove_wait_queue(&netdev_unregistering_wq, &wait);
7222 }
7223
7224 static void __net_exit default_device_exit_batch(struct list_head *net_list)
7225 {
7226         /* At exit all network devices most be removed from a network
7227          * namespace.  Do this in the reverse order of registration.
7228          * Do this across as many network namespaces as possible to
7229          * improve batching efficiency.
7230          */
7231         struct net_device *dev;
7232         struct net *net;
7233         LIST_HEAD(dev_kill_list);
7234
7235         /* To prevent network device cleanup code from dereferencing
7236          * loopback devices or network devices that have been freed
7237          * wait here for all pending unregistrations to complete,
7238          * before unregistring the loopback device and allowing the
7239          * network namespace be freed.
7240          *
7241          * The netdev todo list containing all network devices
7242          * unregistrations that happen in default_device_exit_batch
7243          * will run in the rtnl_unlock() at the end of
7244          * default_device_exit_batch.
7245          */
7246         rtnl_lock_unregistering(net_list);
7247         list_for_each_entry(net, net_list, exit_list) {
7248                 for_each_netdev_reverse(net, dev) {
7249                         if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
7250                                 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7251                         else
7252                                 unregister_netdevice_queue(dev, &dev_kill_list);
7253                 }
7254         }
7255         unregister_netdevice_many(&dev_kill_list);
7256         rtnl_unlock();
7257 }
7258
7259 static struct pernet_operations __net_initdata default_device_ops = {
7260         .exit = default_device_exit,
7261         .exit_batch = default_device_exit_batch,
7262 };
7263
7264 /*
7265  *      Initialize the DEV module. At boot time this walks the device list and
7266  *      unhooks any devices that fail to initialise (normally hardware not
7267  *      present) and leaves us with a valid list of present and active devices.
7268  *
7269  */
7270
7271 /*
7272  *       This is called single threaded during boot, so no need
7273  *       to take the rtnl semaphore.
7274  */
7275 static int __init net_dev_init(void)
7276 {
7277         int i, rc = -ENOMEM;
7278
7279         BUG_ON(!dev_boot_phase);
7280
7281         if (dev_proc_init())
7282                 goto out;
7283
7284         if (netdev_kobject_init())
7285                 goto out;
7286
7287         INIT_LIST_HEAD(&ptype_all);
7288         for (i = 0; i < PTYPE_HASH_SIZE; i++)
7289                 INIT_LIST_HEAD(&ptype_base[i]);
7290
7291         INIT_LIST_HEAD(&offload_base);
7292
7293         if (register_pernet_subsys(&netdev_net_ops))
7294                 goto out;
7295
7296         /*
7297          *      Initialise the packet receive queues.
7298          */
7299
7300         for_each_possible_cpu(i) {
7301                 struct softnet_data *sd = &per_cpu(softnet_data, i);
7302
7303                 skb_queue_head_init(&sd->input_pkt_queue);
7304                 skb_queue_head_init(&sd->process_queue);
7305                 INIT_LIST_HEAD(&sd->poll_list);
7306                 sd->output_queue_tailp = &sd->output_queue;
7307 #ifdef CONFIG_RPS
7308                 sd->csd.func = rps_trigger_softirq;
7309                 sd->csd.info = sd;
7310                 sd->cpu = i;
7311 #endif
7312
7313                 sd->backlog.poll = process_backlog;
7314                 sd->backlog.weight = weight_p;
7315         }
7316
7317         dev_boot_phase = 0;
7318
7319         /* The loopback device is special if any other network devices
7320          * is present in a network namespace the loopback device must
7321          * be present. Since we now dynamically allocate and free the
7322          * loopback device ensure this invariant is maintained by
7323          * keeping the loopback device as the first device on the
7324          * list of network devices.  Ensuring the loopback devices
7325          * is the first device that appears and the last network device
7326          * that disappears.
7327          */
7328         if (register_pernet_device(&loopback_net_ops))
7329                 goto out;
7330
7331         if (register_pernet_device(&default_device_ops))
7332                 goto out;
7333
7334         open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7335         open_softirq(NET_RX_SOFTIRQ, net_rx_action);
7336
7337         hotcpu_notifier(dev_cpu_callback, 0);
7338         dst_init();
7339         rc = 0;
7340 out:
7341         return rc;
7342 }
7343
7344 subsys_initcall(net_dev_init);