Merge tag 'sound-3.19-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[cascardo/linux.git] / net / core / net-sysfs.c
1 /*
2  * net-sysfs.c - network device class and attributes
3  *
4  * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <net/switchdev.h>
16 #include <linux/if_arp.h>
17 #include <linux/slab.h>
18 #include <linux/nsproxy.h>
19 #include <net/sock.h>
20 #include <net/net_namespace.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/vmalloc.h>
23 #include <linux/export.h>
24 #include <linux/jiffies.h>
25 #include <linux/pm_runtime.h>
26
27 #include "net-sysfs.h"
28
29 #ifdef CONFIG_SYSFS
30 static const char fmt_hex[] = "%#x\n";
31 static const char fmt_long_hex[] = "%#lx\n";
32 static const char fmt_dec[] = "%d\n";
33 static const char fmt_udec[] = "%u\n";
34 static const char fmt_ulong[] = "%lu\n";
35 static const char fmt_u64[] = "%llu\n";
36
37 static inline int dev_isalive(const struct net_device *dev)
38 {
39         return dev->reg_state <= NETREG_REGISTERED;
40 }
41
42 /* use same locking rules as GIF* ioctl's */
43 static ssize_t netdev_show(const struct device *dev,
44                            struct device_attribute *attr, char *buf,
45                            ssize_t (*format)(const struct net_device *, char *))
46 {
47         struct net_device *ndev = to_net_dev(dev);
48         ssize_t ret = -EINVAL;
49
50         read_lock(&dev_base_lock);
51         if (dev_isalive(ndev))
52                 ret = (*format)(ndev, buf);
53         read_unlock(&dev_base_lock);
54
55         return ret;
56 }
57
58 /* generate a show function for simple field */
59 #define NETDEVICE_SHOW(field, format_string)                            \
60 static ssize_t format_##field(const struct net_device *dev, char *buf)  \
61 {                                                                       \
62         return sprintf(buf, format_string, dev->field);                 \
63 }                                                                       \
64 static ssize_t field##_show(struct device *dev,                         \
65                             struct device_attribute *attr, char *buf)   \
66 {                                                                       \
67         return netdev_show(dev, attr, buf, format_##field);             \
68 }                                                                       \
69
70 #define NETDEVICE_SHOW_RO(field, format_string)                         \
71 NETDEVICE_SHOW(field, format_string);                                   \
72 static DEVICE_ATTR_RO(field)
73
74 #define NETDEVICE_SHOW_RW(field, format_string)                         \
75 NETDEVICE_SHOW(field, format_string);                                   \
76 static DEVICE_ATTR_RW(field)
77
78 /* use same locking and permission rules as SIF* ioctl's */
79 static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
80                             const char *buf, size_t len,
81                             int (*set)(struct net_device *, unsigned long))
82 {
83         struct net_device *netdev = to_net_dev(dev);
84         struct net *net = dev_net(netdev);
85         unsigned long new;
86         int ret = -EINVAL;
87
88         if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
89                 return -EPERM;
90
91         ret = kstrtoul(buf, 0, &new);
92         if (ret)
93                 goto err;
94
95         if (!rtnl_trylock())
96                 return restart_syscall();
97
98         if (dev_isalive(netdev)) {
99                 if ((ret = (*set)(netdev, new)) == 0)
100                         ret = len;
101         }
102         rtnl_unlock();
103  err:
104         return ret;
105 }
106
107 NETDEVICE_SHOW_RO(dev_id, fmt_hex);
108 NETDEVICE_SHOW_RO(dev_port, fmt_dec);
109 NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
110 NETDEVICE_SHOW_RO(addr_len, fmt_dec);
111 NETDEVICE_SHOW_RO(iflink, fmt_dec);
112 NETDEVICE_SHOW_RO(ifindex, fmt_dec);
113 NETDEVICE_SHOW_RO(type, fmt_dec);
114 NETDEVICE_SHOW_RO(link_mode, fmt_dec);
115
116 static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
117 {
118         return sprintf(buf, fmt_dec, dev->name_assign_type);
119 }
120
121 static ssize_t name_assign_type_show(struct device *dev,
122                                      struct device_attribute *attr,
123                                      char *buf)
124 {
125         struct net_device *ndev = to_net_dev(dev);
126         ssize_t ret = -EINVAL;
127
128         if (ndev->name_assign_type != NET_NAME_UNKNOWN)
129                 ret = netdev_show(dev, attr, buf, format_name_assign_type);
130
131         return ret;
132 }
133 static DEVICE_ATTR_RO(name_assign_type);
134
135 /* use same locking rules as GIFHWADDR ioctl's */
136 static ssize_t address_show(struct device *dev, struct device_attribute *attr,
137                             char *buf)
138 {
139         struct net_device *ndev = to_net_dev(dev);
140         ssize_t ret = -EINVAL;
141
142         read_lock(&dev_base_lock);
143         if (dev_isalive(ndev))
144                 ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
145         read_unlock(&dev_base_lock);
146         return ret;
147 }
148 static DEVICE_ATTR_RO(address);
149
150 static ssize_t broadcast_show(struct device *dev,
151                               struct device_attribute *attr, char *buf)
152 {
153         struct net_device *ndev = to_net_dev(dev);
154         if (dev_isalive(ndev))
155                 return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
156         return -EINVAL;
157 }
158 static DEVICE_ATTR_RO(broadcast);
159
160 static int change_carrier(struct net_device *dev, unsigned long new_carrier)
161 {
162         if (!netif_running(dev))
163                 return -EINVAL;
164         return dev_change_carrier(dev, (bool) new_carrier);
165 }
166
167 static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
168                              const char *buf, size_t len)
169 {
170         return netdev_store(dev, attr, buf, len, change_carrier);
171 }
172
173 static ssize_t carrier_show(struct device *dev,
174                             struct device_attribute *attr, char *buf)
175 {
176         struct net_device *netdev = to_net_dev(dev);
177         if (netif_running(netdev)) {
178                 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
179         }
180         return -EINVAL;
181 }
182 static DEVICE_ATTR_RW(carrier);
183
184 static ssize_t speed_show(struct device *dev,
185                           struct device_attribute *attr, char *buf)
186 {
187         struct net_device *netdev = to_net_dev(dev);
188         int ret = -EINVAL;
189
190         if (!rtnl_trylock())
191                 return restart_syscall();
192
193         if (netif_running(netdev)) {
194                 struct ethtool_cmd cmd;
195                 if (!__ethtool_get_settings(netdev, &cmd))
196                         ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
197         }
198         rtnl_unlock();
199         return ret;
200 }
201 static DEVICE_ATTR_RO(speed);
202
203 static ssize_t duplex_show(struct device *dev,
204                            struct device_attribute *attr, char *buf)
205 {
206         struct net_device *netdev = to_net_dev(dev);
207         int ret = -EINVAL;
208
209         if (!rtnl_trylock())
210                 return restart_syscall();
211
212         if (netif_running(netdev)) {
213                 struct ethtool_cmd cmd;
214                 if (!__ethtool_get_settings(netdev, &cmd)) {
215                         const char *duplex;
216                         switch (cmd.duplex) {
217                         case DUPLEX_HALF:
218                                 duplex = "half";
219                                 break;
220                         case DUPLEX_FULL:
221                                 duplex = "full";
222                                 break;
223                         default:
224                                 duplex = "unknown";
225                                 break;
226                         }
227                         ret = sprintf(buf, "%s\n", duplex);
228                 }
229         }
230         rtnl_unlock();
231         return ret;
232 }
233 static DEVICE_ATTR_RO(duplex);
234
235 static ssize_t dormant_show(struct device *dev,
236                             struct device_attribute *attr, char *buf)
237 {
238         struct net_device *netdev = to_net_dev(dev);
239
240         if (netif_running(netdev))
241                 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
242
243         return -EINVAL;
244 }
245 static DEVICE_ATTR_RO(dormant);
246
247 static const char *const operstates[] = {
248         "unknown",
249         "notpresent", /* currently unused */
250         "down",
251         "lowerlayerdown",
252         "testing", /* currently unused */
253         "dormant",
254         "up"
255 };
256
257 static ssize_t operstate_show(struct device *dev,
258                               struct device_attribute *attr, char *buf)
259 {
260         const struct net_device *netdev = to_net_dev(dev);
261         unsigned char operstate;
262
263         read_lock(&dev_base_lock);
264         operstate = netdev->operstate;
265         if (!netif_running(netdev))
266                 operstate = IF_OPER_DOWN;
267         read_unlock(&dev_base_lock);
268
269         if (operstate >= ARRAY_SIZE(operstates))
270                 return -EINVAL; /* should not happen */
271
272         return sprintf(buf, "%s\n", operstates[operstate]);
273 }
274 static DEVICE_ATTR_RO(operstate);
275
276 static ssize_t carrier_changes_show(struct device *dev,
277                                     struct device_attribute *attr,
278                                     char *buf)
279 {
280         struct net_device *netdev = to_net_dev(dev);
281         return sprintf(buf, fmt_dec,
282                        atomic_read(&netdev->carrier_changes));
283 }
284 static DEVICE_ATTR_RO(carrier_changes);
285
286 /* read-write attributes */
287
288 static int change_mtu(struct net_device *dev, unsigned long new_mtu)
289 {
290         return dev_set_mtu(dev, (int) new_mtu);
291 }
292
293 static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
294                          const char *buf, size_t len)
295 {
296         return netdev_store(dev, attr, buf, len, change_mtu);
297 }
298 NETDEVICE_SHOW_RW(mtu, fmt_dec);
299
300 static int change_flags(struct net_device *dev, unsigned long new_flags)
301 {
302         return dev_change_flags(dev, (unsigned int) new_flags);
303 }
304
305 static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
306                            const char *buf, size_t len)
307 {
308         return netdev_store(dev, attr, buf, len, change_flags);
309 }
310 NETDEVICE_SHOW_RW(flags, fmt_hex);
311
312 static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
313 {
314         dev->tx_queue_len = new_len;
315         return 0;
316 }
317
318 static ssize_t tx_queue_len_store(struct device *dev,
319                                   struct device_attribute *attr,
320                                   const char *buf, size_t len)
321 {
322         if (!capable(CAP_NET_ADMIN))
323                 return -EPERM;
324
325         return netdev_store(dev, attr, buf, len, change_tx_queue_len);
326 }
327 NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
328
329 static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
330 {
331         dev->gro_flush_timeout = val;
332         return 0;
333 }
334
335 static ssize_t gro_flush_timeout_store(struct device *dev,
336                                   struct device_attribute *attr,
337                                   const char *buf, size_t len)
338 {
339         if (!capable(CAP_NET_ADMIN))
340                 return -EPERM;
341
342         return netdev_store(dev, attr, buf, len, change_gro_flush_timeout);
343 }
344 NETDEVICE_SHOW_RW(gro_flush_timeout, fmt_ulong);
345
346 static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
347                              const char *buf, size_t len)
348 {
349         struct net_device *netdev = to_net_dev(dev);
350         struct net *net = dev_net(netdev);
351         size_t count = len;
352         ssize_t ret;
353
354         if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
355                 return -EPERM;
356
357         /* ignore trailing newline */
358         if (len >  0 && buf[len - 1] == '\n')
359                 --count;
360
361         if (!rtnl_trylock())
362                 return restart_syscall();
363         ret = dev_set_alias(netdev, buf, count);
364         rtnl_unlock();
365
366         return ret < 0 ? ret : len;
367 }
368
369 static ssize_t ifalias_show(struct device *dev,
370                             struct device_attribute *attr, char *buf)
371 {
372         const struct net_device *netdev = to_net_dev(dev);
373         ssize_t ret = 0;
374
375         if (!rtnl_trylock())
376                 return restart_syscall();
377         if (netdev->ifalias)
378                 ret = sprintf(buf, "%s\n", netdev->ifalias);
379         rtnl_unlock();
380         return ret;
381 }
382 static DEVICE_ATTR_RW(ifalias);
383
384 static int change_group(struct net_device *dev, unsigned long new_group)
385 {
386         dev_set_group(dev, (int) new_group);
387         return 0;
388 }
389
390 static ssize_t group_store(struct device *dev, struct device_attribute *attr,
391                            const char *buf, size_t len)
392 {
393         return netdev_store(dev, attr, buf, len, change_group);
394 }
395 NETDEVICE_SHOW(group, fmt_dec);
396 static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
397
398 static ssize_t phys_port_id_show(struct device *dev,
399                                  struct device_attribute *attr, char *buf)
400 {
401         struct net_device *netdev = to_net_dev(dev);
402         ssize_t ret = -EINVAL;
403
404         if (!rtnl_trylock())
405                 return restart_syscall();
406
407         if (dev_isalive(netdev)) {
408                 struct netdev_phys_item_id ppid;
409
410                 ret = dev_get_phys_port_id(netdev, &ppid);
411                 if (!ret)
412                         ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
413         }
414         rtnl_unlock();
415
416         return ret;
417 }
418 static DEVICE_ATTR_RO(phys_port_id);
419
420 static ssize_t phys_switch_id_show(struct device *dev,
421                                    struct device_attribute *attr, char *buf)
422 {
423         struct net_device *netdev = to_net_dev(dev);
424         ssize_t ret = -EINVAL;
425
426         if (!rtnl_trylock())
427                 return restart_syscall();
428
429         if (dev_isalive(netdev)) {
430                 struct netdev_phys_item_id ppid;
431
432                 ret = netdev_switch_parent_id_get(netdev, &ppid);
433                 if (!ret)
434                         ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
435         }
436         rtnl_unlock();
437
438         return ret;
439 }
440 static DEVICE_ATTR_RO(phys_switch_id);
441
442 static struct attribute *net_class_attrs[] = {
443         &dev_attr_netdev_group.attr,
444         &dev_attr_type.attr,
445         &dev_attr_dev_id.attr,
446         &dev_attr_dev_port.attr,
447         &dev_attr_iflink.attr,
448         &dev_attr_ifindex.attr,
449         &dev_attr_name_assign_type.attr,
450         &dev_attr_addr_assign_type.attr,
451         &dev_attr_addr_len.attr,
452         &dev_attr_link_mode.attr,
453         &dev_attr_address.attr,
454         &dev_attr_broadcast.attr,
455         &dev_attr_speed.attr,
456         &dev_attr_duplex.attr,
457         &dev_attr_dormant.attr,
458         &dev_attr_operstate.attr,
459         &dev_attr_carrier_changes.attr,
460         &dev_attr_ifalias.attr,
461         &dev_attr_carrier.attr,
462         &dev_attr_mtu.attr,
463         &dev_attr_flags.attr,
464         &dev_attr_tx_queue_len.attr,
465         &dev_attr_gro_flush_timeout.attr,
466         &dev_attr_phys_port_id.attr,
467         &dev_attr_phys_switch_id.attr,
468         NULL,
469 };
470 ATTRIBUTE_GROUPS(net_class);
471
472 /* Show a given an attribute in the statistics group */
473 static ssize_t netstat_show(const struct device *d,
474                             struct device_attribute *attr, char *buf,
475                             unsigned long offset)
476 {
477         struct net_device *dev = to_net_dev(d);
478         ssize_t ret = -EINVAL;
479
480         WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
481                         offset % sizeof(u64) != 0);
482
483         read_lock(&dev_base_lock);
484         if (dev_isalive(dev)) {
485                 struct rtnl_link_stats64 temp;
486                 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
487
488                 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
489         }
490         read_unlock(&dev_base_lock);
491         return ret;
492 }
493
494 /* generate a read-only statistics attribute */
495 #define NETSTAT_ENTRY(name)                                             \
496 static ssize_t name##_show(struct device *d,                            \
497                            struct device_attribute *attr, char *buf)    \
498 {                                                                       \
499         return netstat_show(d, attr, buf,                               \
500                             offsetof(struct rtnl_link_stats64, name));  \
501 }                                                                       \
502 static DEVICE_ATTR_RO(name)
503
504 NETSTAT_ENTRY(rx_packets);
505 NETSTAT_ENTRY(tx_packets);
506 NETSTAT_ENTRY(rx_bytes);
507 NETSTAT_ENTRY(tx_bytes);
508 NETSTAT_ENTRY(rx_errors);
509 NETSTAT_ENTRY(tx_errors);
510 NETSTAT_ENTRY(rx_dropped);
511 NETSTAT_ENTRY(tx_dropped);
512 NETSTAT_ENTRY(multicast);
513 NETSTAT_ENTRY(collisions);
514 NETSTAT_ENTRY(rx_length_errors);
515 NETSTAT_ENTRY(rx_over_errors);
516 NETSTAT_ENTRY(rx_crc_errors);
517 NETSTAT_ENTRY(rx_frame_errors);
518 NETSTAT_ENTRY(rx_fifo_errors);
519 NETSTAT_ENTRY(rx_missed_errors);
520 NETSTAT_ENTRY(tx_aborted_errors);
521 NETSTAT_ENTRY(tx_carrier_errors);
522 NETSTAT_ENTRY(tx_fifo_errors);
523 NETSTAT_ENTRY(tx_heartbeat_errors);
524 NETSTAT_ENTRY(tx_window_errors);
525 NETSTAT_ENTRY(rx_compressed);
526 NETSTAT_ENTRY(tx_compressed);
527
528 static struct attribute *netstat_attrs[] = {
529         &dev_attr_rx_packets.attr,
530         &dev_attr_tx_packets.attr,
531         &dev_attr_rx_bytes.attr,
532         &dev_attr_tx_bytes.attr,
533         &dev_attr_rx_errors.attr,
534         &dev_attr_tx_errors.attr,
535         &dev_attr_rx_dropped.attr,
536         &dev_attr_tx_dropped.attr,
537         &dev_attr_multicast.attr,
538         &dev_attr_collisions.attr,
539         &dev_attr_rx_length_errors.attr,
540         &dev_attr_rx_over_errors.attr,
541         &dev_attr_rx_crc_errors.attr,
542         &dev_attr_rx_frame_errors.attr,
543         &dev_attr_rx_fifo_errors.attr,
544         &dev_attr_rx_missed_errors.attr,
545         &dev_attr_tx_aborted_errors.attr,
546         &dev_attr_tx_carrier_errors.attr,
547         &dev_attr_tx_fifo_errors.attr,
548         &dev_attr_tx_heartbeat_errors.attr,
549         &dev_attr_tx_window_errors.attr,
550         &dev_attr_rx_compressed.attr,
551         &dev_attr_tx_compressed.attr,
552         NULL
553 };
554
555
556 static struct attribute_group netstat_group = {
557         .name  = "statistics",
558         .attrs  = netstat_attrs,
559 };
560
561 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
562 static struct attribute *wireless_attrs[] = {
563         NULL
564 };
565
566 static struct attribute_group wireless_group = {
567         .name = "wireless",
568         .attrs = wireless_attrs,
569 };
570 #endif
571
572 #else /* CONFIG_SYSFS */
573 #define net_class_groups        NULL
574 #endif /* CONFIG_SYSFS */
575
576 #ifdef CONFIG_SYSFS
577 #define to_rx_queue_attr(_attr) container_of(_attr,             \
578     struct rx_queue_attribute, attr)
579
580 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
581
582 static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
583                                   char *buf)
584 {
585         struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
586         struct netdev_rx_queue *queue = to_rx_queue(kobj);
587
588         if (!attribute->show)
589                 return -EIO;
590
591         return attribute->show(queue, attribute, buf);
592 }
593
594 static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
595                                    const char *buf, size_t count)
596 {
597         struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
598         struct netdev_rx_queue *queue = to_rx_queue(kobj);
599
600         if (!attribute->store)
601                 return -EIO;
602
603         return attribute->store(queue, attribute, buf, count);
604 }
605
606 static const struct sysfs_ops rx_queue_sysfs_ops = {
607         .show = rx_queue_attr_show,
608         .store = rx_queue_attr_store,
609 };
610
611 #ifdef CONFIG_RPS
612 static ssize_t show_rps_map(struct netdev_rx_queue *queue,
613                             struct rx_queue_attribute *attribute, char *buf)
614 {
615         struct rps_map *map;
616         cpumask_var_t mask;
617         size_t len = 0;
618         int i;
619
620         if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
621                 return -ENOMEM;
622
623         rcu_read_lock();
624         map = rcu_dereference(queue->rps_map);
625         if (map)
626                 for (i = 0; i < map->len; i++)
627                         cpumask_set_cpu(map->cpus[i], mask);
628
629         len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
630         if (PAGE_SIZE - len < 3) {
631                 rcu_read_unlock();
632                 free_cpumask_var(mask);
633                 return -EINVAL;
634         }
635         rcu_read_unlock();
636
637         free_cpumask_var(mask);
638         len += sprintf(buf + len, "\n");
639         return len;
640 }
641
642 static ssize_t store_rps_map(struct netdev_rx_queue *queue,
643                       struct rx_queue_attribute *attribute,
644                       const char *buf, size_t len)
645 {
646         struct rps_map *old_map, *map;
647         cpumask_var_t mask;
648         int err, cpu, i;
649         static DEFINE_SPINLOCK(rps_map_lock);
650
651         if (!capable(CAP_NET_ADMIN))
652                 return -EPERM;
653
654         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
655                 return -ENOMEM;
656
657         err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
658         if (err) {
659                 free_cpumask_var(mask);
660                 return err;
661         }
662
663         map = kzalloc(max_t(unsigned int,
664             RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
665             GFP_KERNEL);
666         if (!map) {
667                 free_cpumask_var(mask);
668                 return -ENOMEM;
669         }
670
671         i = 0;
672         for_each_cpu_and(cpu, mask, cpu_online_mask)
673                 map->cpus[i++] = cpu;
674
675         if (i)
676                 map->len = i;
677         else {
678                 kfree(map);
679                 map = NULL;
680         }
681
682         spin_lock(&rps_map_lock);
683         old_map = rcu_dereference_protected(queue->rps_map,
684                                             lockdep_is_held(&rps_map_lock));
685         rcu_assign_pointer(queue->rps_map, map);
686         spin_unlock(&rps_map_lock);
687
688         if (map)
689                 static_key_slow_inc(&rps_needed);
690         if (old_map) {
691                 kfree_rcu(old_map, rcu);
692                 static_key_slow_dec(&rps_needed);
693         }
694         free_cpumask_var(mask);
695         return len;
696 }
697
698 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
699                                            struct rx_queue_attribute *attr,
700                                            char *buf)
701 {
702         struct rps_dev_flow_table *flow_table;
703         unsigned long val = 0;
704
705         rcu_read_lock();
706         flow_table = rcu_dereference(queue->rps_flow_table);
707         if (flow_table)
708                 val = (unsigned long)flow_table->mask + 1;
709         rcu_read_unlock();
710
711         return sprintf(buf, "%lu\n", val);
712 }
713
714 static void rps_dev_flow_table_release(struct rcu_head *rcu)
715 {
716         struct rps_dev_flow_table *table = container_of(rcu,
717             struct rps_dev_flow_table, rcu);
718         vfree(table);
719 }
720
721 static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
722                                      struct rx_queue_attribute *attr,
723                                      const char *buf, size_t len)
724 {
725         unsigned long mask, count;
726         struct rps_dev_flow_table *table, *old_table;
727         static DEFINE_SPINLOCK(rps_dev_flow_lock);
728         int rc;
729
730         if (!capable(CAP_NET_ADMIN))
731                 return -EPERM;
732
733         rc = kstrtoul(buf, 0, &count);
734         if (rc < 0)
735                 return rc;
736
737         if (count) {
738                 mask = count - 1;
739                 /* mask = roundup_pow_of_two(count) - 1;
740                  * without overflows...
741                  */
742                 while ((mask | (mask >> 1)) != mask)
743                         mask |= (mask >> 1);
744                 /* On 64 bit arches, must check mask fits in table->mask (u32),
745                  * and on 32bit arches, must check
746                  * RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
747                  */
748 #if BITS_PER_LONG > 32
749                 if (mask > (unsigned long)(u32)mask)
750                         return -EINVAL;
751 #else
752                 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
753                                 / sizeof(struct rps_dev_flow)) {
754                         /* Enforce a limit to prevent overflow */
755                         return -EINVAL;
756                 }
757 #endif
758                 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
759                 if (!table)
760                         return -ENOMEM;
761
762                 table->mask = mask;
763                 for (count = 0; count <= mask; count++)
764                         table->flows[count].cpu = RPS_NO_CPU;
765         } else
766                 table = NULL;
767
768         spin_lock(&rps_dev_flow_lock);
769         old_table = rcu_dereference_protected(queue->rps_flow_table,
770                                               lockdep_is_held(&rps_dev_flow_lock));
771         rcu_assign_pointer(queue->rps_flow_table, table);
772         spin_unlock(&rps_dev_flow_lock);
773
774         if (old_table)
775                 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
776
777         return len;
778 }
779
780 static struct rx_queue_attribute rps_cpus_attribute =
781         __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
782
783
784 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
785         __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
786             show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
787 #endif /* CONFIG_RPS */
788
789 static struct attribute *rx_queue_default_attrs[] = {
790 #ifdef CONFIG_RPS
791         &rps_cpus_attribute.attr,
792         &rps_dev_flow_table_cnt_attribute.attr,
793 #endif
794         NULL
795 };
796
797 static void rx_queue_release(struct kobject *kobj)
798 {
799         struct netdev_rx_queue *queue = to_rx_queue(kobj);
800 #ifdef CONFIG_RPS
801         struct rps_map *map;
802         struct rps_dev_flow_table *flow_table;
803
804
805         map = rcu_dereference_protected(queue->rps_map, 1);
806         if (map) {
807                 RCU_INIT_POINTER(queue->rps_map, NULL);
808                 kfree_rcu(map, rcu);
809         }
810
811         flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
812         if (flow_table) {
813                 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
814                 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
815         }
816 #endif
817
818         memset(kobj, 0, sizeof(*kobj));
819         dev_put(queue->dev);
820 }
821
822 static const void *rx_queue_namespace(struct kobject *kobj)
823 {
824         struct netdev_rx_queue *queue = to_rx_queue(kobj);
825         struct device *dev = &queue->dev->dev;
826         const void *ns = NULL;
827
828         if (dev->class && dev->class->ns_type)
829                 ns = dev->class->namespace(dev);
830
831         return ns;
832 }
833
834 static struct kobj_type rx_queue_ktype = {
835         .sysfs_ops = &rx_queue_sysfs_ops,
836         .release = rx_queue_release,
837         .default_attrs = rx_queue_default_attrs,
838         .namespace = rx_queue_namespace
839 };
840
841 static int rx_queue_add_kobject(struct net_device *dev, int index)
842 {
843         struct netdev_rx_queue *queue = dev->_rx + index;
844         struct kobject *kobj = &queue->kobj;
845         int error = 0;
846
847         kobj->kset = dev->queues_kset;
848         error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
849             "rx-%u", index);
850         if (error)
851                 goto exit;
852
853         if (dev->sysfs_rx_queue_group) {
854                 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
855                 if (error)
856                         goto exit;
857         }
858
859         kobject_uevent(kobj, KOBJ_ADD);
860         dev_hold(queue->dev);
861
862         return error;
863 exit:
864         kobject_put(kobj);
865         return error;
866 }
867 #endif /* CONFIG_SYSFS */
868
869 int
870 net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
871 {
872 #ifdef CONFIG_SYSFS
873         int i;
874         int error = 0;
875
876 #ifndef CONFIG_RPS
877         if (!dev->sysfs_rx_queue_group)
878                 return 0;
879 #endif
880         for (i = old_num; i < new_num; i++) {
881                 error = rx_queue_add_kobject(dev, i);
882                 if (error) {
883                         new_num = old_num;
884                         break;
885                 }
886         }
887
888         while (--i >= new_num) {
889                 if (dev->sysfs_rx_queue_group)
890                         sysfs_remove_group(&dev->_rx[i].kobj,
891                                            dev->sysfs_rx_queue_group);
892                 kobject_put(&dev->_rx[i].kobj);
893         }
894
895         return error;
896 #else
897         return 0;
898 #endif
899 }
900
901 #ifdef CONFIG_SYSFS
902 /*
903  * netdev_queue sysfs structures and functions.
904  */
905 struct netdev_queue_attribute {
906         struct attribute attr;
907         ssize_t (*show)(struct netdev_queue *queue,
908             struct netdev_queue_attribute *attr, char *buf);
909         ssize_t (*store)(struct netdev_queue *queue,
910             struct netdev_queue_attribute *attr, const char *buf, size_t len);
911 };
912 #define to_netdev_queue_attr(_attr) container_of(_attr,         \
913     struct netdev_queue_attribute, attr)
914
915 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
916
917 static ssize_t netdev_queue_attr_show(struct kobject *kobj,
918                                       struct attribute *attr, char *buf)
919 {
920         struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
921         struct netdev_queue *queue = to_netdev_queue(kobj);
922
923         if (!attribute->show)
924                 return -EIO;
925
926         return attribute->show(queue, attribute, buf);
927 }
928
929 static ssize_t netdev_queue_attr_store(struct kobject *kobj,
930                                        struct attribute *attr,
931                                        const char *buf, size_t count)
932 {
933         struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
934         struct netdev_queue *queue = to_netdev_queue(kobj);
935
936         if (!attribute->store)
937                 return -EIO;
938
939         return attribute->store(queue, attribute, buf, count);
940 }
941
942 static const struct sysfs_ops netdev_queue_sysfs_ops = {
943         .show = netdev_queue_attr_show,
944         .store = netdev_queue_attr_store,
945 };
946
947 static ssize_t show_trans_timeout(struct netdev_queue *queue,
948                                   struct netdev_queue_attribute *attribute,
949                                   char *buf)
950 {
951         unsigned long trans_timeout;
952
953         spin_lock_irq(&queue->_xmit_lock);
954         trans_timeout = queue->trans_timeout;
955         spin_unlock_irq(&queue->_xmit_lock);
956
957         return sprintf(buf, "%lu", trans_timeout);
958 }
959
960 static struct netdev_queue_attribute queue_trans_timeout =
961         __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
962
963 #ifdef CONFIG_BQL
964 /*
965  * Byte queue limits sysfs structures and functions.
966  */
967 static ssize_t bql_show(char *buf, unsigned int value)
968 {
969         return sprintf(buf, "%u\n", value);
970 }
971
972 static ssize_t bql_set(const char *buf, const size_t count,
973                        unsigned int *pvalue)
974 {
975         unsigned int value;
976         int err;
977
978         if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
979                 value = DQL_MAX_LIMIT;
980         else {
981                 err = kstrtouint(buf, 10, &value);
982                 if (err < 0)
983                         return err;
984                 if (value > DQL_MAX_LIMIT)
985                         return -EINVAL;
986         }
987
988         *pvalue = value;
989
990         return count;
991 }
992
993 static ssize_t bql_show_hold_time(struct netdev_queue *queue,
994                                   struct netdev_queue_attribute *attr,
995                                   char *buf)
996 {
997         struct dql *dql = &queue->dql;
998
999         return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
1000 }
1001
1002 static ssize_t bql_set_hold_time(struct netdev_queue *queue,
1003                                  struct netdev_queue_attribute *attribute,
1004                                  const char *buf, size_t len)
1005 {
1006         struct dql *dql = &queue->dql;
1007         unsigned int value;
1008         int err;
1009
1010         err = kstrtouint(buf, 10, &value);
1011         if (err < 0)
1012                 return err;
1013
1014         dql->slack_hold_time = msecs_to_jiffies(value);
1015
1016         return len;
1017 }
1018
1019 static struct netdev_queue_attribute bql_hold_time_attribute =
1020         __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
1021             bql_set_hold_time);
1022
1023 static ssize_t bql_show_inflight(struct netdev_queue *queue,
1024                                  struct netdev_queue_attribute *attr,
1025                                  char *buf)
1026 {
1027         struct dql *dql = &queue->dql;
1028
1029         return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
1030 }
1031
1032 static struct netdev_queue_attribute bql_inflight_attribute =
1033         __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
1034
1035 #define BQL_ATTR(NAME, FIELD)                                           \
1036 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,            \
1037                                  struct netdev_queue_attribute *attr,   \
1038                                  char *buf)                             \
1039 {                                                                       \
1040         return bql_show(buf, queue->dql.FIELD);                         \
1041 }                                                                       \
1042                                                                         \
1043 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue,             \
1044                                 struct netdev_queue_attribute *attr,    \
1045                                 const char *buf, size_t len)            \
1046 {                                                                       \
1047         return bql_set(buf, len, &queue->dql.FIELD);                    \
1048 }                                                                       \
1049                                                                         \
1050 static struct netdev_queue_attribute bql_ ## NAME ## _attribute =       \
1051         __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME,              \
1052             bql_set_ ## NAME);
1053
1054 BQL_ATTR(limit, limit)
1055 BQL_ATTR(limit_max, max_limit)
1056 BQL_ATTR(limit_min, min_limit)
1057
1058 static struct attribute *dql_attrs[] = {
1059         &bql_limit_attribute.attr,
1060         &bql_limit_max_attribute.attr,
1061         &bql_limit_min_attribute.attr,
1062         &bql_hold_time_attribute.attr,
1063         &bql_inflight_attribute.attr,
1064         NULL
1065 };
1066
1067 static struct attribute_group dql_group = {
1068         .name  = "byte_queue_limits",
1069         .attrs  = dql_attrs,
1070 };
1071 #endif /* CONFIG_BQL */
1072
1073 #ifdef CONFIG_XPS
1074 static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
1075 {
1076         struct net_device *dev = queue->dev;
1077         unsigned int i;
1078
1079         i = queue - dev->_tx;
1080         BUG_ON(i >= dev->num_tx_queues);
1081
1082         return i;
1083 }
1084
1085
1086 static ssize_t show_xps_map(struct netdev_queue *queue,
1087                             struct netdev_queue_attribute *attribute, char *buf)
1088 {
1089         struct net_device *dev = queue->dev;
1090         struct xps_dev_maps *dev_maps;
1091         cpumask_var_t mask;
1092         unsigned long index;
1093         size_t len = 0;
1094         int i;
1095
1096         if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1097                 return -ENOMEM;
1098
1099         index = get_netdev_queue_index(queue);
1100
1101         rcu_read_lock();
1102         dev_maps = rcu_dereference(dev->xps_maps);
1103         if (dev_maps) {
1104                 for_each_possible_cpu(i) {
1105                         struct xps_map *map =
1106                             rcu_dereference(dev_maps->cpu_map[i]);
1107                         if (map) {
1108                                 int j;
1109                                 for (j = 0; j < map->len; j++) {
1110                                         if (map->queues[j] == index) {
1111                                                 cpumask_set_cpu(i, mask);
1112                                                 break;
1113                                         }
1114                                 }
1115                         }
1116                 }
1117         }
1118         rcu_read_unlock();
1119
1120         len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
1121         if (PAGE_SIZE - len < 3) {
1122                 free_cpumask_var(mask);
1123                 return -EINVAL;
1124         }
1125
1126         free_cpumask_var(mask);
1127         len += sprintf(buf + len, "\n");
1128         return len;
1129 }
1130
1131 static ssize_t store_xps_map(struct netdev_queue *queue,
1132                       struct netdev_queue_attribute *attribute,
1133                       const char *buf, size_t len)
1134 {
1135         struct net_device *dev = queue->dev;
1136         unsigned long index;
1137         cpumask_var_t mask;
1138         int err;
1139
1140         if (!capable(CAP_NET_ADMIN))
1141                 return -EPERM;
1142
1143         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1144                 return -ENOMEM;
1145
1146         index = get_netdev_queue_index(queue);
1147
1148         err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1149         if (err) {
1150                 free_cpumask_var(mask);
1151                 return err;
1152         }
1153
1154         err = netif_set_xps_queue(dev, mask, index);
1155
1156         free_cpumask_var(mask);
1157
1158         return err ? : len;
1159 }
1160
1161 static struct netdev_queue_attribute xps_cpus_attribute =
1162     __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1163 #endif /* CONFIG_XPS */
1164
1165 static struct attribute *netdev_queue_default_attrs[] = {
1166         &queue_trans_timeout.attr,
1167 #ifdef CONFIG_XPS
1168         &xps_cpus_attribute.attr,
1169 #endif
1170         NULL
1171 };
1172
1173 static void netdev_queue_release(struct kobject *kobj)
1174 {
1175         struct netdev_queue *queue = to_netdev_queue(kobj);
1176
1177         memset(kobj, 0, sizeof(*kobj));
1178         dev_put(queue->dev);
1179 }
1180
1181 static const void *netdev_queue_namespace(struct kobject *kobj)
1182 {
1183         struct netdev_queue *queue = to_netdev_queue(kobj);
1184         struct device *dev = &queue->dev->dev;
1185         const void *ns = NULL;
1186
1187         if (dev->class && dev->class->ns_type)
1188                 ns = dev->class->namespace(dev);
1189
1190         return ns;
1191 }
1192
1193 static struct kobj_type netdev_queue_ktype = {
1194         .sysfs_ops = &netdev_queue_sysfs_ops,
1195         .release = netdev_queue_release,
1196         .default_attrs = netdev_queue_default_attrs,
1197         .namespace = netdev_queue_namespace,
1198 };
1199
1200 static int netdev_queue_add_kobject(struct net_device *dev, int index)
1201 {
1202         struct netdev_queue *queue = dev->_tx + index;
1203         struct kobject *kobj = &queue->kobj;
1204         int error = 0;
1205
1206         kobj->kset = dev->queues_kset;
1207         error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1208             "tx-%u", index);
1209         if (error)
1210                 goto exit;
1211
1212 #ifdef CONFIG_BQL
1213         error = sysfs_create_group(kobj, &dql_group);
1214         if (error)
1215                 goto exit;
1216 #endif
1217
1218         kobject_uevent(kobj, KOBJ_ADD);
1219         dev_hold(queue->dev);
1220
1221         return 0;
1222 exit:
1223         kobject_put(kobj);
1224         return error;
1225 }
1226 #endif /* CONFIG_SYSFS */
1227
1228 int
1229 netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
1230 {
1231 #ifdef CONFIG_SYSFS
1232         int i;
1233         int error = 0;
1234
1235         for (i = old_num; i < new_num; i++) {
1236                 error = netdev_queue_add_kobject(dev, i);
1237                 if (error) {
1238                         new_num = old_num;
1239                         break;
1240                 }
1241         }
1242
1243         while (--i >= new_num) {
1244                 struct netdev_queue *queue = dev->_tx + i;
1245
1246 #ifdef CONFIG_BQL
1247                 sysfs_remove_group(&queue->kobj, &dql_group);
1248 #endif
1249                 kobject_put(&queue->kobj);
1250         }
1251
1252         return error;
1253 #else
1254         return 0;
1255 #endif /* CONFIG_SYSFS */
1256 }
1257
1258 static int register_queue_kobjects(struct net_device *dev)
1259 {
1260         int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1261
1262 #ifdef CONFIG_SYSFS
1263         dev->queues_kset = kset_create_and_add("queues",
1264             NULL, &dev->dev.kobj);
1265         if (!dev->queues_kset)
1266                 return -ENOMEM;
1267         real_rx = dev->real_num_rx_queues;
1268 #endif
1269         real_tx = dev->real_num_tx_queues;
1270
1271         error = net_rx_queue_update_kobjects(dev, 0, real_rx);
1272         if (error)
1273                 goto error;
1274         rxq = real_rx;
1275
1276         error = netdev_queue_update_kobjects(dev, 0, real_tx);
1277         if (error)
1278                 goto error;
1279         txq = real_tx;
1280
1281         return 0;
1282
1283 error:
1284         netdev_queue_update_kobjects(dev, txq, 0);
1285         net_rx_queue_update_kobjects(dev, rxq, 0);
1286         return error;
1287 }
1288
1289 static void remove_queue_kobjects(struct net_device *dev)
1290 {
1291         int real_rx = 0, real_tx = 0;
1292
1293 #ifdef CONFIG_SYSFS
1294         real_rx = dev->real_num_rx_queues;
1295 #endif
1296         real_tx = dev->real_num_tx_queues;
1297
1298         net_rx_queue_update_kobjects(dev, real_rx, 0);
1299         netdev_queue_update_kobjects(dev, real_tx, 0);
1300 #ifdef CONFIG_SYSFS
1301         kset_unregister(dev->queues_kset);
1302 #endif
1303 }
1304
1305 static bool net_current_may_mount(void)
1306 {
1307         struct net *net = current->nsproxy->net_ns;
1308
1309         return ns_capable(net->user_ns, CAP_SYS_ADMIN);
1310 }
1311
1312 static void *net_grab_current_ns(void)
1313 {
1314         struct net *ns = current->nsproxy->net_ns;
1315 #ifdef CONFIG_NET_NS
1316         if (ns)
1317                 atomic_inc(&ns->passive);
1318 #endif
1319         return ns;
1320 }
1321
1322 static const void *net_initial_ns(void)
1323 {
1324         return &init_net;
1325 }
1326
1327 static const void *net_netlink_ns(struct sock *sk)
1328 {
1329         return sock_net(sk);
1330 }
1331
1332 struct kobj_ns_type_operations net_ns_type_operations = {
1333         .type = KOBJ_NS_TYPE_NET,
1334         .current_may_mount = net_current_may_mount,
1335         .grab_current_ns = net_grab_current_ns,
1336         .netlink_ns = net_netlink_ns,
1337         .initial_ns = net_initial_ns,
1338         .drop_ns = net_drop_ns,
1339 };
1340 EXPORT_SYMBOL_GPL(net_ns_type_operations);
1341
1342 static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1343 {
1344         struct net_device *dev = to_net_dev(d);
1345         int retval;
1346
1347         /* pass interface to uevent. */
1348         retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1349         if (retval)
1350                 goto exit;
1351
1352         /* pass ifindex to uevent.
1353          * ifindex is useful as it won't change (interface name may change)
1354          * and is what RtNetlink uses natively. */
1355         retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1356
1357 exit:
1358         return retval;
1359 }
1360
1361 /*
1362  *      netdev_release -- destroy and free a dead device.
1363  *      Called when last reference to device kobject is gone.
1364  */
1365 static void netdev_release(struct device *d)
1366 {
1367         struct net_device *dev = to_net_dev(d);
1368
1369         BUG_ON(dev->reg_state != NETREG_RELEASED);
1370
1371         kfree(dev->ifalias);
1372         netdev_freemem(dev);
1373 }
1374
1375 static const void *net_namespace(struct device *d)
1376 {
1377         struct net_device *dev;
1378         dev = container_of(d, struct net_device, dev);
1379         return dev_net(dev);
1380 }
1381
1382 static struct class net_class = {
1383         .name = "net",
1384         .dev_release = netdev_release,
1385         .dev_groups = net_class_groups,
1386         .dev_uevent = netdev_uevent,
1387         .ns_type = &net_ns_type_operations,
1388         .namespace = net_namespace,
1389 };
1390
1391 /* Delete sysfs entries but hold kobject reference until after all
1392  * netdev references are gone.
1393  */
1394 void netdev_unregister_kobject(struct net_device *ndev)
1395 {
1396         struct device *dev = &(ndev->dev);
1397
1398         kobject_get(&dev->kobj);
1399
1400         remove_queue_kobjects(ndev);
1401
1402         pm_runtime_set_memalloc_noio(dev, false);
1403
1404         device_del(dev);
1405 }
1406
1407 /* Create sysfs entries for network device. */
1408 int netdev_register_kobject(struct net_device *ndev)
1409 {
1410         struct device *dev = &(ndev->dev);
1411         const struct attribute_group **groups = ndev->sysfs_groups;
1412         int error = 0;
1413
1414         device_initialize(dev);
1415         dev->class = &net_class;
1416         dev->platform_data = ndev;
1417         dev->groups = groups;
1418
1419         dev_set_name(dev, "%s", ndev->name);
1420
1421 #ifdef CONFIG_SYSFS
1422         /* Allow for a device specific group */
1423         if (*groups)
1424                 groups++;
1425
1426         *groups++ = &netstat_group;
1427
1428 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1429         if (ndev->ieee80211_ptr)
1430                 *groups++ = &wireless_group;
1431 #if IS_ENABLED(CONFIG_WIRELESS_EXT)
1432         else if (ndev->wireless_handlers)
1433                 *groups++ = &wireless_group;
1434 #endif
1435 #endif
1436 #endif /* CONFIG_SYSFS */
1437
1438         error = device_add(dev);
1439         if (error)
1440                 return error;
1441
1442         error = register_queue_kobjects(ndev);
1443         if (error) {
1444                 device_del(dev);
1445                 return error;
1446         }
1447
1448         pm_runtime_set_memalloc_noio(dev, true);
1449
1450         return error;
1451 }
1452
1453 int netdev_class_create_file_ns(struct class_attribute *class_attr,
1454                                 const void *ns)
1455 {
1456         return class_create_file_ns(&net_class, class_attr, ns);
1457 }
1458 EXPORT_SYMBOL(netdev_class_create_file_ns);
1459
1460 void netdev_class_remove_file_ns(struct class_attribute *class_attr,
1461                                  const void *ns)
1462 {
1463         class_remove_file_ns(&net_class, class_attr, ns);
1464 }
1465 EXPORT_SYMBOL(netdev_class_remove_file_ns);
1466
1467 int __init netdev_kobject_init(void)
1468 {
1469         kobj_ns_type_register(&net_ns_type_operations);
1470         return class_register(&net_class);
1471 }