gen_init_cpio: remove redundant empty line
[cascardo/linux.git] / net / core / net-sysfs.c
1 /*
2  * net-sysfs.c - network device class and attributes
3  *
4  * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/slab.h>
17 #include <linux/nsproxy.h>
18 #include <net/sock.h>
19 #include <net/net_namespace.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/wireless.h>
22 #include <linux/vmalloc.h>
23 #include <linux/export.h>
24 #include <linux/jiffies.h>
25 #include <net/wext.h>
26
27 #include "net-sysfs.h"
28
29 #ifdef CONFIG_SYSFS
30 static const char fmt_hex[] = "%#x\n";
31 static const char fmt_long_hex[] = "%#lx\n";
32 static const char fmt_dec[] = "%d\n";
33 static const char fmt_udec[] = "%u\n";
34 static const char fmt_ulong[] = "%lu\n";
35 static const char fmt_u64[] = "%llu\n";
36
37 static inline int dev_isalive(const struct net_device *dev)
38 {
39         return dev->reg_state <= NETREG_REGISTERED;
40 }
41
42 /* use same locking rules as GIF* ioctl's */
43 static ssize_t netdev_show(const struct device *dev,
44                            struct device_attribute *attr, char *buf,
45                            ssize_t (*format)(const struct net_device *, char *))
46 {
47         struct net_device *net = to_net_dev(dev);
48         ssize_t ret = -EINVAL;
49
50         read_lock(&dev_base_lock);
51         if (dev_isalive(net))
52                 ret = (*format)(net, buf);
53         read_unlock(&dev_base_lock);
54
55         return ret;
56 }
57
58 /* generate a show function for simple field */
59 #define NETDEVICE_SHOW(field, format_string)                            \
60 static ssize_t format_##field(const struct net_device *net, char *buf)  \
61 {                                                                       \
62         return sprintf(buf, format_string, net->field);                 \
63 }                                                                       \
64 static ssize_t show_##field(struct device *dev,                         \
65                             struct device_attribute *attr, char *buf)   \
66 {                                                                       \
67         return netdev_show(dev, attr, buf, format_##field);             \
68 }
69
70
71 /* use same locking and permission rules as SIF* ioctl's */
72 static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
73                             const char *buf, size_t len,
74                             int (*set)(struct net_device *, unsigned long))
75 {
76         struct net_device *net = to_net_dev(dev);
77         unsigned long new;
78         int ret = -EINVAL;
79
80         if (!capable(CAP_NET_ADMIN))
81                 return -EPERM;
82
83         ret = kstrtoul(buf, 0, &new);
84         if (ret)
85                 goto err;
86
87         if (!rtnl_trylock())
88                 return restart_syscall();
89
90         if (dev_isalive(net)) {
91                 if ((ret = (*set)(net, new)) == 0)
92                         ret = len;
93         }
94         rtnl_unlock();
95  err:
96         return ret;
97 }
98
99 NETDEVICE_SHOW(dev_id, fmt_hex);
100 NETDEVICE_SHOW(addr_assign_type, fmt_dec);
101 NETDEVICE_SHOW(addr_len, fmt_dec);
102 NETDEVICE_SHOW(iflink, fmt_dec);
103 NETDEVICE_SHOW(ifindex, fmt_dec);
104 NETDEVICE_SHOW(type, fmt_dec);
105 NETDEVICE_SHOW(link_mode, fmt_dec);
106
107 /* use same locking rules as GIFHWADDR ioctl's */
108 static ssize_t show_address(struct device *dev, struct device_attribute *attr,
109                             char *buf)
110 {
111         struct net_device *net = to_net_dev(dev);
112         ssize_t ret = -EINVAL;
113
114         read_lock(&dev_base_lock);
115         if (dev_isalive(net))
116                 ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
117         read_unlock(&dev_base_lock);
118         return ret;
119 }
120
121 static ssize_t show_broadcast(struct device *dev,
122                             struct device_attribute *attr, char *buf)
123 {
124         struct net_device *net = to_net_dev(dev);
125         if (dev_isalive(net))
126                 return sysfs_format_mac(buf, net->broadcast, net->addr_len);
127         return -EINVAL;
128 }
129
130 static ssize_t show_carrier(struct device *dev,
131                             struct device_attribute *attr, char *buf)
132 {
133         struct net_device *netdev = to_net_dev(dev);
134         if (netif_running(netdev)) {
135                 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
136         }
137         return -EINVAL;
138 }
139
140 static ssize_t show_speed(struct device *dev,
141                           struct device_attribute *attr, char *buf)
142 {
143         struct net_device *netdev = to_net_dev(dev);
144         int ret = -EINVAL;
145
146         if (!rtnl_trylock())
147                 return restart_syscall();
148
149         if (netif_running(netdev)) {
150                 struct ethtool_cmd cmd;
151                 if (!__ethtool_get_settings(netdev, &cmd))
152                         ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
153         }
154         rtnl_unlock();
155         return ret;
156 }
157
158 static ssize_t show_duplex(struct device *dev,
159                            struct device_attribute *attr, char *buf)
160 {
161         struct net_device *netdev = to_net_dev(dev);
162         int ret = -EINVAL;
163
164         if (!rtnl_trylock())
165                 return restart_syscall();
166
167         if (netif_running(netdev)) {
168                 struct ethtool_cmd cmd;
169                 if (!__ethtool_get_settings(netdev, &cmd)) {
170                         const char *duplex;
171                         switch (cmd.duplex) {
172                         case DUPLEX_HALF:
173                                 duplex = "half";
174                                 break;
175                         case DUPLEX_FULL:
176                                 duplex = "full";
177                                 break;
178                         default:
179                                 duplex = "unknown";
180                                 break;
181                         }
182                         ret = sprintf(buf, "%s\n", duplex);
183                 }
184         }
185         rtnl_unlock();
186         return ret;
187 }
188
189 static ssize_t show_dormant(struct device *dev,
190                             struct device_attribute *attr, char *buf)
191 {
192         struct net_device *netdev = to_net_dev(dev);
193
194         if (netif_running(netdev))
195                 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
196
197         return -EINVAL;
198 }
199
200 static const char *const operstates[] = {
201         "unknown",
202         "notpresent", /* currently unused */
203         "down",
204         "lowerlayerdown",
205         "testing", /* currently unused */
206         "dormant",
207         "up"
208 };
209
210 static ssize_t show_operstate(struct device *dev,
211                               struct device_attribute *attr, char *buf)
212 {
213         const struct net_device *netdev = to_net_dev(dev);
214         unsigned char operstate;
215
216         read_lock(&dev_base_lock);
217         operstate = netdev->operstate;
218         if (!netif_running(netdev))
219                 operstate = IF_OPER_DOWN;
220         read_unlock(&dev_base_lock);
221
222         if (operstate >= ARRAY_SIZE(operstates))
223                 return -EINVAL; /* should not happen */
224
225         return sprintf(buf, "%s\n", operstates[operstate]);
226 }
227
228 /* read-write attributes */
229 NETDEVICE_SHOW(mtu, fmt_dec);
230
231 static int change_mtu(struct net_device *net, unsigned long new_mtu)
232 {
233         return dev_set_mtu(net, (int) new_mtu);
234 }
235
236 static ssize_t store_mtu(struct device *dev, struct device_attribute *attr,
237                          const char *buf, size_t len)
238 {
239         return netdev_store(dev, attr, buf, len, change_mtu);
240 }
241
242 NETDEVICE_SHOW(flags, fmt_hex);
243
244 static int change_flags(struct net_device *net, unsigned long new_flags)
245 {
246         return dev_change_flags(net, (unsigned int) new_flags);
247 }
248
249 static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
250                            const char *buf, size_t len)
251 {
252         return netdev_store(dev, attr, buf, len, change_flags);
253 }
254
255 NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
256
257 static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
258 {
259         net->tx_queue_len = new_len;
260         return 0;
261 }
262
263 static ssize_t store_tx_queue_len(struct device *dev,
264                                   struct device_attribute *attr,
265                                   const char *buf, size_t len)
266 {
267         return netdev_store(dev, attr, buf, len, change_tx_queue_len);
268 }
269
270 static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
271                              const char *buf, size_t len)
272 {
273         struct net_device *netdev = to_net_dev(dev);
274         size_t count = len;
275         ssize_t ret;
276
277         if (!capable(CAP_NET_ADMIN))
278                 return -EPERM;
279
280         /* ignore trailing newline */
281         if (len >  0 && buf[len - 1] == '\n')
282                 --count;
283
284         if (!rtnl_trylock())
285                 return restart_syscall();
286         ret = dev_set_alias(netdev, buf, count);
287         rtnl_unlock();
288
289         return ret < 0 ? ret : len;
290 }
291
292 static ssize_t show_ifalias(struct device *dev,
293                             struct device_attribute *attr, char *buf)
294 {
295         const struct net_device *netdev = to_net_dev(dev);
296         ssize_t ret = 0;
297
298         if (!rtnl_trylock())
299                 return restart_syscall();
300         if (netdev->ifalias)
301                 ret = sprintf(buf, "%s\n", netdev->ifalias);
302         rtnl_unlock();
303         return ret;
304 }
305
306 NETDEVICE_SHOW(group, fmt_dec);
307
308 static int change_group(struct net_device *net, unsigned long new_group)
309 {
310         dev_set_group(net, (int) new_group);
311         return 0;
312 }
313
314 static ssize_t store_group(struct device *dev, struct device_attribute *attr,
315                          const char *buf, size_t len)
316 {
317         return netdev_store(dev, attr, buf, len, change_group);
318 }
319
320 static struct device_attribute net_class_attributes[] = {
321         __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
322         __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
323         __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
324         __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
325         __ATTR(iflink, S_IRUGO, show_iflink, NULL),
326         __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
327         __ATTR(type, S_IRUGO, show_type, NULL),
328         __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
329         __ATTR(address, S_IRUGO, show_address, NULL),
330         __ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
331         __ATTR(carrier, S_IRUGO, show_carrier, NULL),
332         __ATTR(speed, S_IRUGO, show_speed, NULL),
333         __ATTR(duplex, S_IRUGO, show_duplex, NULL),
334         __ATTR(dormant, S_IRUGO, show_dormant, NULL),
335         __ATTR(operstate, S_IRUGO, show_operstate, NULL),
336         __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu),
337         __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
338         __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
339                store_tx_queue_len),
340         __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
341         {}
342 };
343
344 /* Show a given an attribute in the statistics group */
345 static ssize_t netstat_show(const struct device *d,
346                             struct device_attribute *attr, char *buf,
347                             unsigned long offset)
348 {
349         struct net_device *dev = to_net_dev(d);
350         ssize_t ret = -EINVAL;
351
352         WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
353                         offset % sizeof(u64) != 0);
354
355         read_lock(&dev_base_lock);
356         if (dev_isalive(dev)) {
357                 struct rtnl_link_stats64 temp;
358                 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
359
360                 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
361         }
362         read_unlock(&dev_base_lock);
363         return ret;
364 }
365
366 /* generate a read-only statistics attribute */
367 #define NETSTAT_ENTRY(name)                                             \
368 static ssize_t show_##name(struct device *d,                            \
369                            struct device_attribute *attr, char *buf)    \
370 {                                                                       \
371         return netstat_show(d, attr, buf,                               \
372                             offsetof(struct rtnl_link_stats64, name));  \
373 }                                                                       \
374 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
375
376 NETSTAT_ENTRY(rx_packets);
377 NETSTAT_ENTRY(tx_packets);
378 NETSTAT_ENTRY(rx_bytes);
379 NETSTAT_ENTRY(tx_bytes);
380 NETSTAT_ENTRY(rx_errors);
381 NETSTAT_ENTRY(tx_errors);
382 NETSTAT_ENTRY(rx_dropped);
383 NETSTAT_ENTRY(tx_dropped);
384 NETSTAT_ENTRY(multicast);
385 NETSTAT_ENTRY(collisions);
386 NETSTAT_ENTRY(rx_length_errors);
387 NETSTAT_ENTRY(rx_over_errors);
388 NETSTAT_ENTRY(rx_crc_errors);
389 NETSTAT_ENTRY(rx_frame_errors);
390 NETSTAT_ENTRY(rx_fifo_errors);
391 NETSTAT_ENTRY(rx_missed_errors);
392 NETSTAT_ENTRY(tx_aborted_errors);
393 NETSTAT_ENTRY(tx_carrier_errors);
394 NETSTAT_ENTRY(tx_fifo_errors);
395 NETSTAT_ENTRY(tx_heartbeat_errors);
396 NETSTAT_ENTRY(tx_window_errors);
397 NETSTAT_ENTRY(rx_compressed);
398 NETSTAT_ENTRY(tx_compressed);
399
400 static struct attribute *netstat_attrs[] = {
401         &dev_attr_rx_packets.attr,
402         &dev_attr_tx_packets.attr,
403         &dev_attr_rx_bytes.attr,
404         &dev_attr_tx_bytes.attr,
405         &dev_attr_rx_errors.attr,
406         &dev_attr_tx_errors.attr,
407         &dev_attr_rx_dropped.attr,
408         &dev_attr_tx_dropped.attr,
409         &dev_attr_multicast.attr,
410         &dev_attr_collisions.attr,
411         &dev_attr_rx_length_errors.attr,
412         &dev_attr_rx_over_errors.attr,
413         &dev_attr_rx_crc_errors.attr,
414         &dev_attr_rx_frame_errors.attr,
415         &dev_attr_rx_fifo_errors.attr,
416         &dev_attr_rx_missed_errors.attr,
417         &dev_attr_tx_aborted_errors.attr,
418         &dev_attr_tx_carrier_errors.attr,
419         &dev_attr_tx_fifo_errors.attr,
420         &dev_attr_tx_heartbeat_errors.attr,
421         &dev_attr_tx_window_errors.attr,
422         &dev_attr_rx_compressed.attr,
423         &dev_attr_tx_compressed.attr,
424         NULL
425 };
426
427
428 static struct attribute_group netstat_group = {
429         .name  = "statistics",
430         .attrs  = netstat_attrs,
431 };
432 #endif /* CONFIG_SYSFS */
433
434 #ifdef CONFIG_RPS
435 /*
436  * RX queue sysfs structures and functions.
437  */
438 struct rx_queue_attribute {
439         struct attribute attr;
440         ssize_t (*show)(struct netdev_rx_queue *queue,
441             struct rx_queue_attribute *attr, char *buf);
442         ssize_t (*store)(struct netdev_rx_queue *queue,
443             struct rx_queue_attribute *attr, const char *buf, size_t len);
444 };
445 #define to_rx_queue_attr(_attr) container_of(_attr,             \
446     struct rx_queue_attribute, attr)
447
448 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
449
450 static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
451                                   char *buf)
452 {
453         struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
454         struct netdev_rx_queue *queue = to_rx_queue(kobj);
455
456         if (!attribute->show)
457                 return -EIO;
458
459         return attribute->show(queue, attribute, buf);
460 }
461
462 static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
463                                    const char *buf, size_t count)
464 {
465         struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
466         struct netdev_rx_queue *queue = to_rx_queue(kobj);
467
468         if (!attribute->store)
469                 return -EIO;
470
471         return attribute->store(queue, attribute, buf, count);
472 }
473
474 static const struct sysfs_ops rx_queue_sysfs_ops = {
475         .show = rx_queue_attr_show,
476         .store = rx_queue_attr_store,
477 };
478
479 static ssize_t show_rps_map(struct netdev_rx_queue *queue,
480                             struct rx_queue_attribute *attribute, char *buf)
481 {
482         struct rps_map *map;
483         cpumask_var_t mask;
484         size_t len = 0;
485         int i;
486
487         if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
488                 return -ENOMEM;
489
490         rcu_read_lock();
491         map = rcu_dereference(queue->rps_map);
492         if (map)
493                 for (i = 0; i < map->len; i++)
494                         cpumask_set_cpu(map->cpus[i], mask);
495
496         len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
497         if (PAGE_SIZE - len < 3) {
498                 rcu_read_unlock();
499                 free_cpumask_var(mask);
500                 return -EINVAL;
501         }
502         rcu_read_unlock();
503
504         free_cpumask_var(mask);
505         len += sprintf(buf + len, "\n");
506         return len;
507 }
508
509 static ssize_t store_rps_map(struct netdev_rx_queue *queue,
510                       struct rx_queue_attribute *attribute,
511                       const char *buf, size_t len)
512 {
513         struct rps_map *old_map, *map;
514         cpumask_var_t mask;
515         int err, cpu, i;
516         static DEFINE_SPINLOCK(rps_map_lock);
517
518         if (!capable(CAP_NET_ADMIN))
519                 return -EPERM;
520
521         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
522                 return -ENOMEM;
523
524         err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
525         if (err) {
526                 free_cpumask_var(mask);
527                 return err;
528         }
529
530         map = kzalloc(max_t(unsigned int,
531             RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
532             GFP_KERNEL);
533         if (!map) {
534                 free_cpumask_var(mask);
535                 return -ENOMEM;
536         }
537
538         i = 0;
539         for_each_cpu_and(cpu, mask, cpu_online_mask)
540                 map->cpus[i++] = cpu;
541
542         if (i)
543                 map->len = i;
544         else {
545                 kfree(map);
546                 map = NULL;
547         }
548
549         spin_lock(&rps_map_lock);
550         old_map = rcu_dereference_protected(queue->rps_map,
551                                             lockdep_is_held(&rps_map_lock));
552         rcu_assign_pointer(queue->rps_map, map);
553         spin_unlock(&rps_map_lock);
554
555         if (map)
556                 static_key_slow_inc(&rps_needed);
557         if (old_map) {
558                 kfree_rcu(old_map, rcu);
559                 static_key_slow_dec(&rps_needed);
560         }
561         free_cpumask_var(mask);
562         return len;
563 }
564
565 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
566                                            struct rx_queue_attribute *attr,
567                                            char *buf)
568 {
569         struct rps_dev_flow_table *flow_table;
570         unsigned long val = 0;
571
572         rcu_read_lock();
573         flow_table = rcu_dereference(queue->rps_flow_table);
574         if (flow_table)
575                 val = (unsigned long)flow_table->mask + 1;
576         rcu_read_unlock();
577
578         return sprintf(buf, "%lu\n", val);
579 }
580
581 static void rps_dev_flow_table_release_work(struct work_struct *work)
582 {
583         struct rps_dev_flow_table *table = container_of(work,
584             struct rps_dev_flow_table, free_work);
585
586         vfree(table);
587 }
588
589 static void rps_dev_flow_table_release(struct rcu_head *rcu)
590 {
591         struct rps_dev_flow_table *table = container_of(rcu,
592             struct rps_dev_flow_table, rcu);
593
594         INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
595         schedule_work(&table->free_work);
596 }
597
598 static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
599                                      struct rx_queue_attribute *attr,
600                                      const char *buf, size_t len)
601 {
602         unsigned long mask, count;
603         struct rps_dev_flow_table *table, *old_table;
604         static DEFINE_SPINLOCK(rps_dev_flow_lock);
605         int rc;
606
607         if (!capable(CAP_NET_ADMIN))
608                 return -EPERM;
609
610         rc = kstrtoul(buf, 0, &count);
611         if (rc < 0)
612                 return rc;
613
614         if (count) {
615                 mask = count - 1;
616                 /* mask = roundup_pow_of_two(count) - 1;
617                  * without overflows...
618                  */
619                 while ((mask | (mask >> 1)) != mask)
620                         mask |= (mask >> 1);
621                 /* On 64 bit arches, must check mask fits in table->mask (u32),
622                  * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
623                  * doesnt overflow.
624                  */
625 #if BITS_PER_LONG > 32
626                 if (mask > (unsigned long)(u32)mask)
627                         return -EINVAL;
628 #else
629                 if (mask > (ULONG_MAX - RPS_DEV_FLOW_TABLE_SIZE(1))
630                                 / sizeof(struct rps_dev_flow)) {
631                         /* Enforce a limit to prevent overflow */
632                         return -EINVAL;
633                 }
634 #endif
635                 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask + 1));
636                 if (!table)
637                         return -ENOMEM;
638
639                 table->mask = mask;
640                 for (count = 0; count <= mask; count++)
641                         table->flows[count].cpu = RPS_NO_CPU;
642         } else
643                 table = NULL;
644
645         spin_lock(&rps_dev_flow_lock);
646         old_table = rcu_dereference_protected(queue->rps_flow_table,
647                                               lockdep_is_held(&rps_dev_flow_lock));
648         rcu_assign_pointer(queue->rps_flow_table, table);
649         spin_unlock(&rps_dev_flow_lock);
650
651         if (old_table)
652                 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
653
654         return len;
655 }
656
657 static struct rx_queue_attribute rps_cpus_attribute =
658         __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
659
660
661 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
662         __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
663             show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
664
665 static struct attribute *rx_queue_default_attrs[] = {
666         &rps_cpus_attribute.attr,
667         &rps_dev_flow_table_cnt_attribute.attr,
668         NULL
669 };
670
671 static void rx_queue_release(struct kobject *kobj)
672 {
673         struct netdev_rx_queue *queue = to_rx_queue(kobj);
674         struct rps_map *map;
675         struct rps_dev_flow_table *flow_table;
676
677
678         map = rcu_dereference_protected(queue->rps_map, 1);
679         if (map) {
680                 RCU_INIT_POINTER(queue->rps_map, NULL);
681                 kfree_rcu(map, rcu);
682         }
683
684         flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
685         if (flow_table) {
686                 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
687                 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
688         }
689
690         memset(kobj, 0, sizeof(*kobj));
691         dev_put(queue->dev);
692 }
693
694 static struct kobj_type rx_queue_ktype = {
695         .sysfs_ops = &rx_queue_sysfs_ops,
696         .release = rx_queue_release,
697         .default_attrs = rx_queue_default_attrs,
698 };
699
700 static int rx_queue_add_kobject(struct net_device *net, int index)
701 {
702         struct netdev_rx_queue *queue = net->_rx + index;
703         struct kobject *kobj = &queue->kobj;
704         int error = 0;
705
706         kobj->kset = net->queues_kset;
707         error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
708             "rx-%u", index);
709         if (error) {
710                 kobject_put(kobj);
711                 return error;
712         }
713
714         kobject_uevent(kobj, KOBJ_ADD);
715         dev_hold(queue->dev);
716
717         return error;
718 }
719 #endif /* CONFIG_RPS */
720
721 int
722 net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
723 {
724 #ifdef CONFIG_RPS
725         int i;
726         int error = 0;
727
728         for (i = old_num; i < new_num; i++) {
729                 error = rx_queue_add_kobject(net, i);
730                 if (error) {
731                         new_num = old_num;
732                         break;
733                 }
734         }
735
736         while (--i >= new_num)
737                 kobject_put(&net->_rx[i].kobj);
738
739         return error;
740 #else
741         return 0;
742 #endif
743 }
744
745 #ifdef CONFIG_SYSFS
746 /*
747  * netdev_queue sysfs structures and functions.
748  */
749 struct netdev_queue_attribute {
750         struct attribute attr;
751         ssize_t (*show)(struct netdev_queue *queue,
752             struct netdev_queue_attribute *attr, char *buf);
753         ssize_t (*store)(struct netdev_queue *queue,
754             struct netdev_queue_attribute *attr, const char *buf, size_t len);
755 };
756 #define to_netdev_queue_attr(_attr) container_of(_attr,         \
757     struct netdev_queue_attribute, attr)
758
759 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
760
761 static ssize_t netdev_queue_attr_show(struct kobject *kobj,
762                                       struct attribute *attr, char *buf)
763 {
764         struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
765         struct netdev_queue *queue = to_netdev_queue(kobj);
766
767         if (!attribute->show)
768                 return -EIO;
769
770         return attribute->show(queue, attribute, buf);
771 }
772
773 static ssize_t netdev_queue_attr_store(struct kobject *kobj,
774                                        struct attribute *attr,
775                                        const char *buf, size_t count)
776 {
777         struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
778         struct netdev_queue *queue = to_netdev_queue(kobj);
779
780         if (!attribute->store)
781                 return -EIO;
782
783         return attribute->store(queue, attribute, buf, count);
784 }
785
786 static const struct sysfs_ops netdev_queue_sysfs_ops = {
787         .show = netdev_queue_attr_show,
788         .store = netdev_queue_attr_store,
789 };
790
791 static ssize_t show_trans_timeout(struct netdev_queue *queue,
792                                   struct netdev_queue_attribute *attribute,
793                                   char *buf)
794 {
795         unsigned long trans_timeout;
796
797         spin_lock_irq(&queue->_xmit_lock);
798         trans_timeout = queue->trans_timeout;
799         spin_unlock_irq(&queue->_xmit_lock);
800
801         return sprintf(buf, "%lu", trans_timeout);
802 }
803
804 static struct netdev_queue_attribute queue_trans_timeout =
805         __ATTR(tx_timeout, S_IRUGO, show_trans_timeout, NULL);
806
807 #ifdef CONFIG_BQL
808 /*
809  * Byte queue limits sysfs structures and functions.
810  */
811 static ssize_t bql_show(char *buf, unsigned int value)
812 {
813         return sprintf(buf, "%u\n", value);
814 }
815
816 static ssize_t bql_set(const char *buf, const size_t count,
817                        unsigned int *pvalue)
818 {
819         unsigned int value;
820         int err;
821
822         if (!strcmp(buf, "max") || !strcmp(buf, "max\n"))
823                 value = DQL_MAX_LIMIT;
824         else {
825                 err = kstrtouint(buf, 10, &value);
826                 if (err < 0)
827                         return err;
828                 if (value > DQL_MAX_LIMIT)
829                         return -EINVAL;
830         }
831
832         *pvalue = value;
833
834         return count;
835 }
836
837 static ssize_t bql_show_hold_time(struct netdev_queue *queue,
838                                   struct netdev_queue_attribute *attr,
839                                   char *buf)
840 {
841         struct dql *dql = &queue->dql;
842
843         return sprintf(buf, "%u\n", jiffies_to_msecs(dql->slack_hold_time));
844 }
845
846 static ssize_t bql_set_hold_time(struct netdev_queue *queue,
847                                  struct netdev_queue_attribute *attribute,
848                                  const char *buf, size_t len)
849 {
850         struct dql *dql = &queue->dql;
851         unsigned int value;
852         int err;
853
854         err = kstrtouint(buf, 10, &value);
855         if (err < 0)
856                 return err;
857
858         dql->slack_hold_time = msecs_to_jiffies(value);
859
860         return len;
861 }
862
863 static struct netdev_queue_attribute bql_hold_time_attribute =
864         __ATTR(hold_time, S_IRUGO | S_IWUSR, bql_show_hold_time,
865             bql_set_hold_time);
866
867 static ssize_t bql_show_inflight(struct netdev_queue *queue,
868                                  struct netdev_queue_attribute *attr,
869                                  char *buf)
870 {
871         struct dql *dql = &queue->dql;
872
873         return sprintf(buf, "%u\n", dql->num_queued - dql->num_completed);
874 }
875
876 static struct netdev_queue_attribute bql_inflight_attribute =
877         __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
878
879 #define BQL_ATTR(NAME, FIELD)                                           \
880 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,            \
881                                  struct netdev_queue_attribute *attr,   \
882                                  char *buf)                             \
883 {                                                                       \
884         return bql_show(buf, queue->dql.FIELD);                         \
885 }                                                                       \
886                                                                         \
887 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue,             \
888                                 struct netdev_queue_attribute *attr,    \
889                                 const char *buf, size_t len)            \
890 {                                                                       \
891         return bql_set(buf, len, &queue->dql.FIELD);                    \
892 }                                                                       \
893                                                                         \
894 static struct netdev_queue_attribute bql_ ## NAME ## _attribute =       \
895         __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME,              \
896             bql_set_ ## NAME);
897
898 BQL_ATTR(limit, limit)
899 BQL_ATTR(limit_max, max_limit)
900 BQL_ATTR(limit_min, min_limit)
901
902 static struct attribute *dql_attrs[] = {
903         &bql_limit_attribute.attr,
904         &bql_limit_max_attribute.attr,
905         &bql_limit_min_attribute.attr,
906         &bql_hold_time_attribute.attr,
907         &bql_inflight_attribute.attr,
908         NULL
909 };
910
911 static struct attribute_group dql_group = {
912         .name  = "byte_queue_limits",
913         .attrs  = dql_attrs,
914 };
915 #endif /* CONFIG_BQL */
916
917 #ifdef CONFIG_XPS
918 static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
919 {
920         struct net_device *dev = queue->dev;
921         int i;
922
923         for (i = 0; i < dev->num_tx_queues; i++)
924                 if (queue == &dev->_tx[i])
925                         break;
926
927         BUG_ON(i >= dev->num_tx_queues);
928
929         return i;
930 }
931
932
933 static ssize_t show_xps_map(struct netdev_queue *queue,
934                             struct netdev_queue_attribute *attribute, char *buf)
935 {
936         struct net_device *dev = queue->dev;
937         struct xps_dev_maps *dev_maps;
938         cpumask_var_t mask;
939         unsigned long index;
940         size_t len = 0;
941         int i;
942
943         if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
944                 return -ENOMEM;
945
946         index = get_netdev_queue_index(queue);
947
948         rcu_read_lock();
949         dev_maps = rcu_dereference(dev->xps_maps);
950         if (dev_maps) {
951                 for_each_possible_cpu(i) {
952                         struct xps_map *map =
953                             rcu_dereference(dev_maps->cpu_map[i]);
954                         if (map) {
955                                 int j;
956                                 for (j = 0; j < map->len; j++) {
957                                         if (map->queues[j] == index) {
958                                                 cpumask_set_cpu(i, mask);
959                                                 break;
960                                         }
961                                 }
962                         }
963                 }
964         }
965         rcu_read_unlock();
966
967         len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
968         if (PAGE_SIZE - len < 3) {
969                 free_cpumask_var(mask);
970                 return -EINVAL;
971         }
972
973         free_cpumask_var(mask);
974         len += sprintf(buf + len, "\n");
975         return len;
976 }
977
978 static DEFINE_MUTEX(xps_map_mutex);
979 #define xmap_dereference(P)             \
980         rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
981
982 static void xps_queue_release(struct netdev_queue *queue)
983 {
984         struct net_device *dev = queue->dev;
985         struct xps_dev_maps *dev_maps;
986         struct xps_map *map;
987         unsigned long index;
988         int i, pos, nonempty = 0;
989
990         index = get_netdev_queue_index(queue);
991
992         mutex_lock(&xps_map_mutex);
993         dev_maps = xmap_dereference(dev->xps_maps);
994
995         if (dev_maps) {
996                 for_each_possible_cpu(i) {
997                         map = xmap_dereference(dev_maps->cpu_map[i]);
998                         if (!map)
999                                 continue;
1000
1001                         for (pos = 0; pos < map->len; pos++)
1002                                 if (map->queues[pos] == index)
1003                                         break;
1004
1005                         if (pos < map->len) {
1006                                 if (map->len > 1)
1007                                         map->queues[pos] =
1008                                             map->queues[--map->len];
1009                                 else {
1010                                         RCU_INIT_POINTER(dev_maps->cpu_map[i],
1011                                             NULL);
1012                                         kfree_rcu(map, rcu);
1013                                         map = NULL;
1014                                 }
1015                         }
1016                         if (map)
1017                                 nonempty = 1;
1018                 }
1019
1020                 if (!nonempty) {
1021                         RCU_INIT_POINTER(dev->xps_maps, NULL);
1022                         kfree_rcu(dev_maps, rcu);
1023                 }
1024         }
1025         mutex_unlock(&xps_map_mutex);
1026 }
1027
1028 static ssize_t store_xps_map(struct netdev_queue *queue,
1029                       struct netdev_queue_attribute *attribute,
1030                       const char *buf, size_t len)
1031 {
1032         struct net_device *dev = queue->dev;
1033         cpumask_var_t mask;
1034         int err, i, cpu, pos, map_len, alloc_len, need_set;
1035         unsigned long index;
1036         struct xps_map *map, *new_map;
1037         struct xps_dev_maps *dev_maps, *new_dev_maps;
1038         int nonempty = 0;
1039         int numa_node_id = -2;
1040
1041         if (!capable(CAP_NET_ADMIN))
1042                 return -EPERM;
1043
1044         if (!alloc_cpumask_var(&mask, GFP_KERNEL))
1045                 return -ENOMEM;
1046
1047         index = get_netdev_queue_index(queue);
1048
1049         err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
1050         if (err) {
1051                 free_cpumask_var(mask);
1052                 return err;
1053         }
1054
1055         new_dev_maps = kzalloc(max_t(unsigned int,
1056             XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
1057         if (!new_dev_maps) {
1058                 free_cpumask_var(mask);
1059                 return -ENOMEM;
1060         }
1061
1062         mutex_lock(&xps_map_mutex);
1063
1064         dev_maps = xmap_dereference(dev->xps_maps);
1065
1066         for_each_possible_cpu(cpu) {
1067                 map = dev_maps ?
1068                         xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1069                 new_map = map;
1070                 if (map) {
1071                         for (pos = 0; pos < map->len; pos++)
1072                                 if (map->queues[pos] == index)
1073                                         break;
1074                         map_len = map->len;
1075                         alloc_len = map->alloc_len;
1076                 } else
1077                         pos = map_len = alloc_len = 0;
1078
1079                 need_set = cpumask_test_cpu(cpu, mask) && cpu_online(cpu);
1080 #ifdef CONFIG_NUMA
1081                 if (need_set) {
1082                         if (numa_node_id == -2)
1083                                 numa_node_id = cpu_to_node(cpu);
1084                         else if (numa_node_id != cpu_to_node(cpu))
1085                                 numa_node_id = -1;
1086                 }
1087 #endif
1088                 if (need_set && pos >= map_len) {
1089                         /* Need to add queue to this CPU's map */
1090                         if (map_len >= alloc_len) {
1091                                 alloc_len = alloc_len ?
1092                                     2 * alloc_len : XPS_MIN_MAP_ALLOC;
1093                                 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
1094                                                        GFP_KERNEL,
1095                                                        cpu_to_node(cpu));
1096                                 if (!new_map)
1097                                         goto error;
1098                                 new_map->alloc_len = alloc_len;
1099                                 for (i = 0; i < map_len; i++)
1100                                         new_map->queues[i] = map->queues[i];
1101                                 new_map->len = map_len;
1102                         }
1103                         new_map->queues[new_map->len++] = index;
1104                 } else if (!need_set && pos < map_len) {
1105                         /* Need to remove queue from this CPU's map */
1106                         if (map_len > 1)
1107                                 new_map->queues[pos] =
1108                                     new_map->queues[--new_map->len];
1109                         else
1110                                 new_map = NULL;
1111                 }
1112                 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
1113         }
1114
1115         /* Cleanup old maps */
1116         for_each_possible_cpu(cpu) {
1117                 map = dev_maps ?
1118                         xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1119                 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
1120                         kfree_rcu(map, rcu);
1121                 if (new_dev_maps->cpu_map[cpu])
1122                         nonempty = 1;
1123         }
1124
1125         if (nonempty) {
1126                 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1127         } else {
1128                 kfree(new_dev_maps);
1129                 RCU_INIT_POINTER(dev->xps_maps, NULL);
1130         }
1131
1132         if (dev_maps)
1133                 kfree_rcu(dev_maps, rcu);
1134
1135         netdev_queue_numa_node_write(queue, (numa_node_id >= 0) ? numa_node_id :
1136                                             NUMA_NO_NODE);
1137
1138         mutex_unlock(&xps_map_mutex);
1139
1140         free_cpumask_var(mask);
1141         return len;
1142
1143 error:
1144         mutex_unlock(&xps_map_mutex);
1145
1146         if (new_dev_maps)
1147                 for_each_possible_cpu(i)
1148                         kfree(rcu_dereference_protected(
1149                                 new_dev_maps->cpu_map[i],
1150                                 1));
1151         kfree(new_dev_maps);
1152         free_cpumask_var(mask);
1153         return -ENOMEM;
1154 }
1155
1156 static struct netdev_queue_attribute xps_cpus_attribute =
1157     __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1158 #endif /* CONFIG_XPS */
1159
1160 static struct attribute *netdev_queue_default_attrs[] = {
1161         &queue_trans_timeout.attr,
1162 #ifdef CONFIG_XPS
1163         &xps_cpus_attribute.attr,
1164 #endif
1165         NULL
1166 };
1167
1168 static void netdev_queue_release(struct kobject *kobj)
1169 {
1170         struct netdev_queue *queue = to_netdev_queue(kobj);
1171
1172 #ifdef CONFIG_XPS
1173         xps_queue_release(queue);
1174 #endif
1175
1176         memset(kobj, 0, sizeof(*kobj));
1177         dev_put(queue->dev);
1178 }
1179
1180 static struct kobj_type netdev_queue_ktype = {
1181         .sysfs_ops = &netdev_queue_sysfs_ops,
1182         .release = netdev_queue_release,
1183         .default_attrs = netdev_queue_default_attrs,
1184 };
1185
1186 static int netdev_queue_add_kobject(struct net_device *net, int index)
1187 {
1188         struct netdev_queue *queue = net->_tx + index;
1189         struct kobject *kobj = &queue->kobj;
1190         int error = 0;
1191
1192         kobj->kset = net->queues_kset;
1193         error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1194             "tx-%u", index);
1195         if (error)
1196                 goto exit;
1197
1198 #ifdef CONFIG_BQL
1199         error = sysfs_create_group(kobj, &dql_group);
1200         if (error)
1201                 goto exit;
1202 #endif
1203
1204         kobject_uevent(kobj, KOBJ_ADD);
1205         dev_hold(queue->dev);
1206
1207         return 0;
1208 exit:
1209         kobject_put(kobj);
1210         return error;
1211 }
1212 #endif /* CONFIG_SYSFS */
1213
1214 int
1215 netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1216 {
1217 #ifdef CONFIG_SYSFS
1218         int i;
1219         int error = 0;
1220
1221         for (i = old_num; i < new_num; i++) {
1222                 error = netdev_queue_add_kobject(net, i);
1223                 if (error) {
1224                         new_num = old_num;
1225                         break;
1226                 }
1227         }
1228
1229         while (--i >= new_num) {
1230                 struct netdev_queue *queue = net->_tx + i;
1231
1232 #ifdef CONFIG_BQL
1233                 sysfs_remove_group(&queue->kobj, &dql_group);
1234 #endif
1235                 kobject_put(&queue->kobj);
1236         }
1237
1238         return error;
1239 #else
1240         return 0;
1241 #endif /* CONFIG_SYSFS */
1242 }
1243
1244 static int register_queue_kobjects(struct net_device *net)
1245 {
1246         int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1247
1248 #ifdef CONFIG_SYSFS
1249         net->queues_kset = kset_create_and_add("queues",
1250             NULL, &net->dev.kobj);
1251         if (!net->queues_kset)
1252                 return -ENOMEM;
1253 #endif
1254
1255 #ifdef CONFIG_RPS
1256         real_rx = net->real_num_rx_queues;
1257 #endif
1258         real_tx = net->real_num_tx_queues;
1259
1260         error = net_rx_queue_update_kobjects(net, 0, real_rx);
1261         if (error)
1262                 goto error;
1263         rxq = real_rx;
1264
1265         error = netdev_queue_update_kobjects(net, 0, real_tx);
1266         if (error)
1267                 goto error;
1268         txq = real_tx;
1269
1270         return 0;
1271
1272 error:
1273         netdev_queue_update_kobjects(net, txq, 0);
1274         net_rx_queue_update_kobjects(net, rxq, 0);
1275         return error;
1276 }
1277
1278 static void remove_queue_kobjects(struct net_device *net)
1279 {
1280         int real_rx = 0, real_tx = 0;
1281
1282 #ifdef CONFIG_RPS
1283         real_rx = net->real_num_rx_queues;
1284 #endif
1285         real_tx = net->real_num_tx_queues;
1286
1287         net_rx_queue_update_kobjects(net, real_rx, 0);
1288         netdev_queue_update_kobjects(net, real_tx, 0);
1289 #ifdef CONFIG_SYSFS
1290         kset_unregister(net->queues_kset);
1291 #endif
1292 }
1293
1294 static void *net_grab_current_ns(void)
1295 {
1296         struct net *ns = current->nsproxy->net_ns;
1297 #ifdef CONFIG_NET_NS
1298         if (ns)
1299                 atomic_inc(&ns->passive);
1300 #endif
1301         return ns;
1302 }
1303
1304 static const void *net_initial_ns(void)
1305 {
1306         return &init_net;
1307 }
1308
1309 static const void *net_netlink_ns(struct sock *sk)
1310 {
1311         return sock_net(sk);
1312 }
1313
1314 struct kobj_ns_type_operations net_ns_type_operations = {
1315         .type = KOBJ_NS_TYPE_NET,
1316         .grab_current_ns = net_grab_current_ns,
1317         .netlink_ns = net_netlink_ns,
1318         .initial_ns = net_initial_ns,
1319         .drop_ns = net_drop_ns,
1320 };
1321 EXPORT_SYMBOL_GPL(net_ns_type_operations);
1322
1323 #ifdef CONFIG_HOTPLUG
1324 static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1325 {
1326         struct net_device *dev = to_net_dev(d);
1327         int retval;
1328
1329         /* pass interface to uevent. */
1330         retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1331         if (retval)
1332                 goto exit;
1333
1334         /* pass ifindex to uevent.
1335          * ifindex is useful as it won't change (interface name may change)
1336          * and is what RtNetlink uses natively. */
1337         retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1338
1339 exit:
1340         return retval;
1341 }
1342 #endif
1343
1344 /*
1345  *      netdev_release -- destroy and free a dead device.
1346  *      Called when last reference to device kobject is gone.
1347  */
1348 static void netdev_release(struct device *d)
1349 {
1350         struct net_device *dev = to_net_dev(d);
1351
1352         BUG_ON(dev->reg_state != NETREG_RELEASED);
1353
1354         kfree(dev->ifalias);
1355         kfree((char *)dev - dev->padded);
1356 }
1357
1358 static const void *net_namespace(struct device *d)
1359 {
1360         struct net_device *dev;
1361         dev = container_of(d, struct net_device, dev);
1362         return dev_net(dev);
1363 }
1364
1365 static struct class net_class = {
1366         .name = "net",
1367         .dev_release = netdev_release,
1368 #ifdef CONFIG_SYSFS
1369         .dev_attrs = net_class_attributes,
1370 #endif /* CONFIG_SYSFS */
1371 #ifdef CONFIG_HOTPLUG
1372         .dev_uevent = netdev_uevent,
1373 #endif
1374         .ns_type = &net_ns_type_operations,
1375         .namespace = net_namespace,
1376 };
1377
1378 /* Delete sysfs entries but hold kobject reference until after all
1379  * netdev references are gone.
1380  */
1381 void netdev_unregister_kobject(struct net_device * net)
1382 {
1383         struct device *dev = &(net->dev);
1384
1385         kobject_get(&dev->kobj);
1386
1387         remove_queue_kobjects(net);
1388
1389         device_del(dev);
1390 }
1391
1392 /* Create sysfs entries for network device. */
1393 int netdev_register_kobject(struct net_device *net)
1394 {
1395         struct device *dev = &(net->dev);
1396         const struct attribute_group **groups = net->sysfs_groups;
1397         int error = 0;
1398
1399         device_initialize(dev);
1400         dev->class = &net_class;
1401         dev->platform_data = net;
1402         dev->groups = groups;
1403
1404         dev_set_name(dev, "%s", net->name);
1405
1406 #ifdef CONFIG_SYSFS
1407         /* Allow for a device specific group */
1408         if (*groups)
1409                 groups++;
1410
1411         *groups++ = &netstat_group;
1412 #endif /* CONFIG_SYSFS */
1413
1414         error = device_add(dev);
1415         if (error)
1416                 return error;
1417
1418         error = register_queue_kobjects(net);
1419         if (error) {
1420                 device_del(dev);
1421                 return error;
1422         }
1423
1424         return error;
1425 }
1426
1427 int netdev_class_create_file(struct class_attribute *class_attr)
1428 {
1429         return class_create_file(&net_class, class_attr);
1430 }
1431 EXPORT_SYMBOL(netdev_class_create_file);
1432
1433 void netdev_class_remove_file(struct class_attribute *class_attr)
1434 {
1435         class_remove_file(&net_class, class_attr);
1436 }
1437 EXPORT_SYMBOL(netdev_class_remove_file);
1438
1439 int netdev_kobject_init(void)
1440 {
1441         kobj_ns_type_register(&net_ns_type_operations);
1442         return class_register(&net_class);
1443 }