2 * net-sysfs.c - network device class and attributes
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/slab.h>
17 #include <linux/nsproxy.h>
19 #include <net/net_namespace.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/wireless.h>
22 #include <linux/vmalloc.h>
25 #include "net-sysfs.h"
28 static const char fmt_hex[] = "%#x\n";
29 static const char fmt_long_hex[] = "%#lx\n";
30 static const char fmt_dec[] = "%d\n";
31 static const char fmt_udec[] = "%u\n";
32 static const char fmt_ulong[] = "%lu\n";
33 static const char fmt_u64[] = "%llu\n";
35 static inline int dev_isalive(const struct net_device *dev)
37 return dev->reg_state <= NETREG_REGISTERED;
40 /* use same locking rules as GIF* ioctl's */
41 static ssize_t netdev_show(const struct device *dev,
42 struct device_attribute *attr, char *buf,
43 ssize_t (*format)(const struct net_device *, char *))
45 struct net_device *net = to_net_dev(dev);
46 ssize_t ret = -EINVAL;
48 read_lock(&dev_base_lock);
50 ret = (*format)(net, buf);
51 read_unlock(&dev_base_lock);
56 /* generate a show function for simple field */
57 #define NETDEVICE_SHOW(field, format_string) \
58 static ssize_t format_##field(const struct net_device *net, char *buf) \
60 return sprintf(buf, format_string, net->field); \
62 static ssize_t show_##field(struct device *dev, \
63 struct device_attribute *attr, char *buf) \
65 return netdev_show(dev, attr, buf, format_##field); \
69 /* use same locking and permission rules as SIF* ioctl's */
70 static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
71 const char *buf, size_t len,
72 int (*set)(struct net_device *, unsigned long))
74 struct net_device *net = to_net_dev(dev);
79 if (!capable(CAP_NET_ADMIN))
82 new = simple_strtoul(buf, &endp, 0);
87 return restart_syscall();
89 if (dev_isalive(net)) {
90 if ((ret = (*set)(net, new)) == 0)
98 NETDEVICE_SHOW(dev_id, fmt_hex);
99 NETDEVICE_SHOW(addr_assign_type, fmt_dec);
100 NETDEVICE_SHOW(addr_len, fmt_dec);
101 NETDEVICE_SHOW(iflink, fmt_dec);
102 NETDEVICE_SHOW(ifindex, fmt_dec);
103 NETDEVICE_SHOW(features, fmt_hex);
104 NETDEVICE_SHOW(type, fmt_dec);
105 NETDEVICE_SHOW(link_mode, fmt_dec);
107 /* use same locking rules as GIFHWADDR ioctl's */
108 static ssize_t show_address(struct device *dev, struct device_attribute *attr,
111 struct net_device *net = to_net_dev(dev);
112 ssize_t ret = -EINVAL;
114 read_lock(&dev_base_lock);
115 if (dev_isalive(net))
116 ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
117 read_unlock(&dev_base_lock);
121 static ssize_t show_broadcast(struct device *dev,
122 struct device_attribute *attr, char *buf)
124 struct net_device *net = to_net_dev(dev);
125 if (dev_isalive(net))
126 return sysfs_format_mac(buf, net->broadcast, net->addr_len);
130 static ssize_t show_carrier(struct device *dev,
131 struct device_attribute *attr, char *buf)
133 struct net_device *netdev = to_net_dev(dev);
134 if (netif_running(netdev)) {
135 return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
140 static ssize_t show_speed(struct device *dev,
141 struct device_attribute *attr, char *buf)
143 struct net_device *netdev = to_net_dev(dev);
147 return restart_syscall();
149 if (netif_running(netdev)) {
150 struct ethtool_cmd cmd;
151 if (!dev_ethtool_get_settings(netdev, &cmd))
152 ret = sprintf(buf, fmt_udec, ethtool_cmd_speed(&cmd));
158 static ssize_t show_duplex(struct device *dev,
159 struct device_attribute *attr, char *buf)
161 struct net_device *netdev = to_net_dev(dev);
165 return restart_syscall();
167 if (netif_running(netdev)) {
168 struct ethtool_cmd cmd;
169 if (!dev_ethtool_get_settings(netdev, &cmd))
170 ret = sprintf(buf, "%s\n",
171 cmd.duplex ? "full" : "half");
177 static ssize_t show_dormant(struct device *dev,
178 struct device_attribute *attr, char *buf)
180 struct net_device *netdev = to_net_dev(dev);
182 if (netif_running(netdev))
183 return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
188 static const char *const operstates[] = {
190 "notpresent", /* currently unused */
193 "testing", /* currently unused */
198 static ssize_t show_operstate(struct device *dev,
199 struct device_attribute *attr, char *buf)
201 const struct net_device *netdev = to_net_dev(dev);
202 unsigned char operstate;
204 read_lock(&dev_base_lock);
205 operstate = netdev->operstate;
206 if (!netif_running(netdev))
207 operstate = IF_OPER_DOWN;
208 read_unlock(&dev_base_lock);
210 if (operstate >= ARRAY_SIZE(operstates))
211 return -EINVAL; /* should not happen */
213 return sprintf(buf, "%s\n", operstates[operstate]);
216 /* read-write attributes */
217 NETDEVICE_SHOW(mtu, fmt_dec);
219 static int change_mtu(struct net_device *net, unsigned long new_mtu)
221 return dev_set_mtu(net, (int) new_mtu);
224 static ssize_t store_mtu(struct device *dev, struct device_attribute *attr,
225 const char *buf, size_t len)
227 return netdev_store(dev, attr, buf, len, change_mtu);
230 NETDEVICE_SHOW(flags, fmt_hex);
232 static int change_flags(struct net_device *net, unsigned long new_flags)
234 return dev_change_flags(net, (unsigned) new_flags);
237 static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
238 const char *buf, size_t len)
240 return netdev_store(dev, attr, buf, len, change_flags);
243 NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
245 static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
247 net->tx_queue_len = new_len;
251 static ssize_t store_tx_queue_len(struct device *dev,
252 struct device_attribute *attr,
253 const char *buf, size_t len)
255 return netdev_store(dev, attr, buf, len, change_tx_queue_len);
258 static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
259 const char *buf, size_t len)
261 struct net_device *netdev = to_net_dev(dev);
265 if (!capable(CAP_NET_ADMIN))
268 /* ignore trailing newline */
269 if (len > 0 && buf[len - 1] == '\n')
273 return restart_syscall();
274 ret = dev_set_alias(netdev, buf, count);
277 return ret < 0 ? ret : len;
280 static ssize_t show_ifalias(struct device *dev,
281 struct device_attribute *attr, char *buf)
283 const struct net_device *netdev = to_net_dev(dev);
287 return restart_syscall();
289 ret = sprintf(buf, "%s\n", netdev->ifalias);
294 NETDEVICE_SHOW(group, fmt_dec);
296 static int change_group(struct net_device *net, unsigned long new_group)
298 dev_set_group(net, (int) new_group);
302 static ssize_t store_group(struct device *dev, struct device_attribute *attr,
303 const char *buf, size_t len)
305 return netdev_store(dev, attr, buf, len, change_group);
308 static struct device_attribute net_class_attributes[] = {
309 __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
310 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
311 __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
312 __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
313 __ATTR(iflink, S_IRUGO, show_iflink, NULL),
314 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
315 __ATTR(features, S_IRUGO, show_features, NULL),
316 __ATTR(type, S_IRUGO, show_type, NULL),
317 __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
318 __ATTR(address, S_IRUGO, show_address, NULL),
319 __ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
320 __ATTR(carrier, S_IRUGO, show_carrier, NULL),
321 __ATTR(speed, S_IRUGO, show_speed, NULL),
322 __ATTR(duplex, S_IRUGO, show_duplex, NULL),
323 __ATTR(dormant, S_IRUGO, show_dormant, NULL),
324 __ATTR(operstate, S_IRUGO, show_operstate, NULL),
325 __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu),
326 __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
327 __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
329 __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
333 /* Show a given an attribute in the statistics group */
334 static ssize_t netstat_show(const struct device *d,
335 struct device_attribute *attr, char *buf,
336 unsigned long offset)
338 struct net_device *dev = to_net_dev(d);
339 ssize_t ret = -EINVAL;
341 WARN_ON(offset > sizeof(struct rtnl_link_stats64) ||
342 offset % sizeof(u64) != 0);
344 read_lock(&dev_base_lock);
345 if (dev_isalive(dev)) {
346 struct rtnl_link_stats64 temp;
347 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
349 ret = sprintf(buf, fmt_u64, *(u64 *)(((u8 *) stats) + offset));
351 read_unlock(&dev_base_lock);
355 /* generate a read-only statistics attribute */
356 #define NETSTAT_ENTRY(name) \
357 static ssize_t show_##name(struct device *d, \
358 struct device_attribute *attr, char *buf) \
360 return netstat_show(d, attr, buf, \
361 offsetof(struct rtnl_link_stats64, name)); \
363 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
365 NETSTAT_ENTRY(rx_packets);
366 NETSTAT_ENTRY(tx_packets);
367 NETSTAT_ENTRY(rx_bytes);
368 NETSTAT_ENTRY(tx_bytes);
369 NETSTAT_ENTRY(rx_errors);
370 NETSTAT_ENTRY(tx_errors);
371 NETSTAT_ENTRY(rx_dropped);
372 NETSTAT_ENTRY(tx_dropped);
373 NETSTAT_ENTRY(multicast);
374 NETSTAT_ENTRY(collisions);
375 NETSTAT_ENTRY(rx_length_errors);
376 NETSTAT_ENTRY(rx_over_errors);
377 NETSTAT_ENTRY(rx_crc_errors);
378 NETSTAT_ENTRY(rx_frame_errors);
379 NETSTAT_ENTRY(rx_fifo_errors);
380 NETSTAT_ENTRY(rx_missed_errors);
381 NETSTAT_ENTRY(tx_aborted_errors);
382 NETSTAT_ENTRY(tx_carrier_errors);
383 NETSTAT_ENTRY(tx_fifo_errors);
384 NETSTAT_ENTRY(tx_heartbeat_errors);
385 NETSTAT_ENTRY(tx_window_errors);
386 NETSTAT_ENTRY(rx_compressed);
387 NETSTAT_ENTRY(tx_compressed);
389 static struct attribute *netstat_attrs[] = {
390 &dev_attr_rx_packets.attr,
391 &dev_attr_tx_packets.attr,
392 &dev_attr_rx_bytes.attr,
393 &dev_attr_tx_bytes.attr,
394 &dev_attr_rx_errors.attr,
395 &dev_attr_tx_errors.attr,
396 &dev_attr_rx_dropped.attr,
397 &dev_attr_tx_dropped.attr,
398 &dev_attr_multicast.attr,
399 &dev_attr_collisions.attr,
400 &dev_attr_rx_length_errors.attr,
401 &dev_attr_rx_over_errors.attr,
402 &dev_attr_rx_crc_errors.attr,
403 &dev_attr_rx_frame_errors.attr,
404 &dev_attr_rx_fifo_errors.attr,
405 &dev_attr_rx_missed_errors.attr,
406 &dev_attr_tx_aborted_errors.attr,
407 &dev_attr_tx_carrier_errors.attr,
408 &dev_attr_tx_fifo_errors.attr,
409 &dev_attr_tx_heartbeat_errors.attr,
410 &dev_attr_tx_window_errors.attr,
411 &dev_attr_rx_compressed.attr,
412 &dev_attr_tx_compressed.attr,
417 static struct attribute_group netstat_group = {
418 .name = "statistics",
419 .attrs = netstat_attrs,
422 #ifdef CONFIG_WIRELESS_EXT_SYSFS
423 /* helper function that does all the locking etc for wireless stats */
424 static ssize_t wireless_show(struct device *d, char *buf,
425 ssize_t (*format)(const struct iw_statistics *,
428 struct net_device *dev = to_net_dev(d);
429 const struct iw_statistics *iw;
430 ssize_t ret = -EINVAL;
433 return restart_syscall();
434 if (dev_isalive(dev)) {
435 iw = get_wireless_stats(dev);
437 ret = (*format)(iw, buf);
444 /* show function template for wireless fields */
445 #define WIRELESS_SHOW(name, field, format_string) \
446 static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \
448 return sprintf(buf, format_string, iw->field); \
450 static ssize_t show_iw_##name(struct device *d, \
451 struct device_attribute *attr, char *buf) \
453 return wireless_show(d, buf, format_iw_##name); \
455 static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL)
457 WIRELESS_SHOW(status, status, fmt_hex);
458 WIRELESS_SHOW(link, qual.qual, fmt_dec);
459 WIRELESS_SHOW(level, qual.level, fmt_dec);
460 WIRELESS_SHOW(noise, qual.noise, fmt_dec);
461 WIRELESS_SHOW(nwid, discard.nwid, fmt_dec);
462 WIRELESS_SHOW(crypt, discard.code, fmt_dec);
463 WIRELESS_SHOW(fragment, discard.fragment, fmt_dec);
464 WIRELESS_SHOW(misc, discard.misc, fmt_dec);
465 WIRELESS_SHOW(retries, discard.retries, fmt_dec);
466 WIRELESS_SHOW(beacon, miss.beacon, fmt_dec);
468 static struct attribute *wireless_attrs[] = {
469 &dev_attr_status.attr,
471 &dev_attr_level.attr,
472 &dev_attr_noise.attr,
474 &dev_attr_crypt.attr,
475 &dev_attr_fragment.attr,
476 &dev_attr_retries.attr,
478 &dev_attr_beacon.attr,
482 static struct attribute_group wireless_group = {
484 .attrs = wireless_attrs,
487 #endif /* CONFIG_SYSFS */
491 * RX queue sysfs structures and functions.
493 struct rx_queue_attribute {
494 struct attribute attr;
495 ssize_t (*show)(struct netdev_rx_queue *queue,
496 struct rx_queue_attribute *attr, char *buf);
497 ssize_t (*store)(struct netdev_rx_queue *queue,
498 struct rx_queue_attribute *attr, const char *buf, size_t len);
500 #define to_rx_queue_attr(_attr) container_of(_attr, \
501 struct rx_queue_attribute, attr)
503 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
505 static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
508 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
509 struct netdev_rx_queue *queue = to_rx_queue(kobj);
511 if (!attribute->show)
514 return attribute->show(queue, attribute, buf);
517 static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
518 const char *buf, size_t count)
520 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
521 struct netdev_rx_queue *queue = to_rx_queue(kobj);
523 if (!attribute->store)
526 return attribute->store(queue, attribute, buf, count);
529 static const struct sysfs_ops rx_queue_sysfs_ops = {
530 .show = rx_queue_attr_show,
531 .store = rx_queue_attr_store,
534 static ssize_t show_rps_map(struct netdev_rx_queue *queue,
535 struct rx_queue_attribute *attribute, char *buf)
542 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
546 map = rcu_dereference(queue->rps_map);
548 for (i = 0; i < map->len; i++)
549 cpumask_set_cpu(map->cpus[i], mask);
551 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
552 if (PAGE_SIZE - len < 3) {
554 free_cpumask_var(mask);
559 free_cpumask_var(mask);
560 len += sprintf(buf + len, "\n");
564 static void rps_map_release(struct rcu_head *rcu)
566 struct rps_map *map = container_of(rcu, struct rps_map, rcu);
571 static ssize_t store_rps_map(struct netdev_rx_queue *queue,
572 struct rx_queue_attribute *attribute,
573 const char *buf, size_t len)
575 struct rps_map *old_map, *map;
578 static DEFINE_SPINLOCK(rps_map_lock);
580 if (!capable(CAP_NET_ADMIN))
583 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
586 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
588 free_cpumask_var(mask);
592 map = kzalloc(max_t(unsigned,
593 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
596 free_cpumask_var(mask);
601 for_each_cpu_and(cpu, mask, cpu_online_mask)
602 map->cpus[i++] = cpu;
611 spin_lock(&rps_map_lock);
612 old_map = rcu_dereference_protected(queue->rps_map,
613 lockdep_is_held(&rps_map_lock));
614 rcu_assign_pointer(queue->rps_map, map);
615 spin_unlock(&rps_map_lock);
618 call_rcu(&old_map->rcu, rps_map_release);
620 free_cpumask_var(mask);
624 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
625 struct rx_queue_attribute *attr,
628 struct rps_dev_flow_table *flow_table;
629 unsigned int val = 0;
632 flow_table = rcu_dereference(queue->rps_flow_table);
634 val = flow_table->mask + 1;
637 return sprintf(buf, "%u\n", val);
640 static void rps_dev_flow_table_release_work(struct work_struct *work)
642 struct rps_dev_flow_table *table = container_of(work,
643 struct rps_dev_flow_table, free_work);
648 static void rps_dev_flow_table_release(struct rcu_head *rcu)
650 struct rps_dev_flow_table *table = container_of(rcu,
651 struct rps_dev_flow_table, rcu);
653 INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
654 schedule_work(&table->free_work);
657 static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
658 struct rx_queue_attribute *attr,
659 const char *buf, size_t len)
663 struct rps_dev_flow_table *table, *old_table;
664 static DEFINE_SPINLOCK(rps_dev_flow_lock);
666 if (!capable(CAP_NET_ADMIN))
669 count = simple_strtoul(buf, &endp, 0);
677 /* Enforce a limit to prevent overflow */
680 count = roundup_pow_of_two(count);
681 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
685 table->mask = count - 1;
686 for (i = 0; i < count; i++)
687 table->flows[i].cpu = RPS_NO_CPU;
691 spin_lock(&rps_dev_flow_lock);
692 old_table = rcu_dereference_protected(queue->rps_flow_table,
693 lockdep_is_held(&rps_dev_flow_lock));
694 rcu_assign_pointer(queue->rps_flow_table, table);
695 spin_unlock(&rps_dev_flow_lock);
698 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
703 static struct rx_queue_attribute rps_cpus_attribute =
704 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
707 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
708 __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
709 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
711 static struct attribute *rx_queue_default_attrs[] = {
712 &rps_cpus_attribute.attr,
713 &rps_dev_flow_table_cnt_attribute.attr,
717 static void rx_queue_release(struct kobject *kobj)
719 struct netdev_rx_queue *queue = to_rx_queue(kobj);
721 struct rps_dev_flow_table *flow_table;
724 map = rcu_dereference_raw(queue->rps_map);
726 RCU_INIT_POINTER(queue->rps_map, NULL);
727 call_rcu(&map->rcu, rps_map_release);
730 flow_table = rcu_dereference_raw(queue->rps_flow_table);
732 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
733 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
736 memset(kobj, 0, sizeof(*kobj));
740 static struct kobj_type rx_queue_ktype = {
741 .sysfs_ops = &rx_queue_sysfs_ops,
742 .release = rx_queue_release,
743 .default_attrs = rx_queue_default_attrs,
746 static int rx_queue_add_kobject(struct net_device *net, int index)
748 struct netdev_rx_queue *queue = net->_rx + index;
749 struct kobject *kobj = &queue->kobj;
752 kobj->kset = net->queues_kset;
753 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
760 kobject_uevent(kobj, KOBJ_ADD);
761 dev_hold(queue->dev);
765 #endif /* CONFIG_RPS */
768 net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
774 for (i = old_num; i < new_num; i++) {
775 error = rx_queue_add_kobject(net, i);
782 while (--i >= new_num)
783 kobject_put(&net->_rx[i].kobj);
793 * netdev_queue sysfs structures and functions.
795 struct netdev_queue_attribute {
796 struct attribute attr;
797 ssize_t (*show)(struct netdev_queue *queue,
798 struct netdev_queue_attribute *attr, char *buf);
799 ssize_t (*store)(struct netdev_queue *queue,
800 struct netdev_queue_attribute *attr, const char *buf, size_t len);
802 #define to_netdev_queue_attr(_attr) container_of(_attr, \
803 struct netdev_queue_attribute, attr)
805 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
807 static ssize_t netdev_queue_attr_show(struct kobject *kobj,
808 struct attribute *attr, char *buf)
810 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
811 struct netdev_queue *queue = to_netdev_queue(kobj);
813 if (!attribute->show)
816 return attribute->show(queue, attribute, buf);
819 static ssize_t netdev_queue_attr_store(struct kobject *kobj,
820 struct attribute *attr,
821 const char *buf, size_t count)
823 struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
824 struct netdev_queue *queue = to_netdev_queue(kobj);
826 if (!attribute->store)
829 return attribute->store(queue, attribute, buf, count);
832 static const struct sysfs_ops netdev_queue_sysfs_ops = {
833 .show = netdev_queue_attr_show,
834 .store = netdev_queue_attr_store,
837 static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
839 struct net_device *dev = queue->dev;
842 for (i = 0; i < dev->num_tx_queues; i++)
843 if (queue == &dev->_tx[i])
846 BUG_ON(i >= dev->num_tx_queues);
852 static ssize_t show_xps_map(struct netdev_queue *queue,
853 struct netdev_queue_attribute *attribute, char *buf)
855 struct net_device *dev = queue->dev;
856 struct xps_dev_maps *dev_maps;
862 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
865 index = get_netdev_queue_index(queue);
868 dev_maps = rcu_dereference(dev->xps_maps);
870 for_each_possible_cpu(i) {
871 struct xps_map *map =
872 rcu_dereference(dev_maps->cpu_map[i]);
875 for (j = 0; j < map->len; j++) {
876 if (map->queues[j] == index) {
877 cpumask_set_cpu(i, mask);
886 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
887 if (PAGE_SIZE - len < 3) {
888 free_cpumask_var(mask);
892 free_cpumask_var(mask);
893 len += sprintf(buf + len, "\n");
897 static void xps_map_release(struct rcu_head *rcu)
899 struct xps_map *map = container_of(rcu, struct xps_map, rcu);
904 static void xps_dev_maps_release(struct rcu_head *rcu)
906 struct xps_dev_maps *dev_maps =
907 container_of(rcu, struct xps_dev_maps, rcu);
912 static DEFINE_MUTEX(xps_map_mutex);
913 #define xmap_dereference(P) \
914 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
916 static ssize_t store_xps_map(struct netdev_queue *queue,
917 struct netdev_queue_attribute *attribute,
918 const char *buf, size_t len)
920 struct net_device *dev = queue->dev;
922 int err, i, cpu, pos, map_len, alloc_len, need_set;
924 struct xps_map *map, *new_map;
925 struct xps_dev_maps *dev_maps, *new_dev_maps;
929 if (!capable(CAP_NET_ADMIN))
932 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
935 index = get_netdev_queue_index(queue);
937 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
939 free_cpumask_var(mask);
943 new_dev_maps = kzalloc(max_t(unsigned,
944 XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
946 free_cpumask_var(mask);
950 mutex_lock(&xps_map_mutex);
952 dev_maps = xmap_dereference(dev->xps_maps);
954 for_each_possible_cpu(cpu) {
956 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
959 for (pos = 0; pos < map->len; pos++)
960 if (map->queues[pos] == index)
963 alloc_len = map->alloc_len;
965 pos = map_len = alloc_len = 0;
967 need_set = cpu_isset(cpu, *mask) && cpu_online(cpu);
971 numa_node = cpu_to_node(cpu);
972 else if (numa_node != cpu_to_node(cpu))
976 if (need_set && pos >= map_len) {
977 /* Need to add queue to this CPU's map */
978 if (map_len >= alloc_len) {
979 alloc_len = alloc_len ?
980 2 * alloc_len : XPS_MIN_MAP_ALLOC;
981 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
986 new_map->alloc_len = alloc_len;
987 for (i = 0; i < map_len; i++)
988 new_map->queues[i] = map->queues[i];
989 new_map->len = map_len;
991 new_map->queues[new_map->len++] = index;
992 } else if (!need_set && pos < map_len) {
993 /* Need to remove queue from this CPU's map */
995 new_map->queues[pos] =
996 new_map->queues[--new_map->len];
1000 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
1003 /* Cleanup old maps */
1004 for_each_possible_cpu(cpu) {
1006 xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
1007 if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
1008 call_rcu(&map->rcu, xps_map_release);
1009 if (new_dev_maps->cpu_map[cpu])
1014 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1016 kfree(new_dev_maps);
1017 rcu_assign_pointer(dev->xps_maps, NULL);
1021 call_rcu(&dev_maps->rcu, xps_dev_maps_release);
1023 netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node :
1026 mutex_unlock(&xps_map_mutex);
1028 free_cpumask_var(mask);
1032 mutex_unlock(&xps_map_mutex);
1035 for_each_possible_cpu(i)
1036 kfree(rcu_dereference_protected(
1037 new_dev_maps->cpu_map[i],
1039 kfree(new_dev_maps);
1040 free_cpumask_var(mask);
1044 static struct netdev_queue_attribute xps_cpus_attribute =
1045 __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
1047 static struct attribute *netdev_queue_default_attrs[] = {
1048 &xps_cpus_attribute.attr,
1052 static void netdev_queue_release(struct kobject *kobj)
1054 struct netdev_queue *queue = to_netdev_queue(kobj);
1055 struct net_device *dev = queue->dev;
1056 struct xps_dev_maps *dev_maps;
1057 struct xps_map *map;
1058 unsigned long index;
1059 int i, pos, nonempty = 0;
1061 index = get_netdev_queue_index(queue);
1063 mutex_lock(&xps_map_mutex);
1064 dev_maps = xmap_dereference(dev->xps_maps);
1067 for_each_possible_cpu(i) {
1068 map = xmap_dereference(dev_maps->cpu_map[i]);
1072 for (pos = 0; pos < map->len; pos++)
1073 if (map->queues[pos] == index)
1076 if (pos < map->len) {
1079 map->queues[--map->len];
1081 RCU_INIT_POINTER(dev_maps->cpu_map[i],
1083 call_rcu(&map->rcu, xps_map_release);
1092 RCU_INIT_POINTER(dev->xps_maps, NULL);
1093 call_rcu(&dev_maps->rcu, xps_dev_maps_release);
1097 mutex_unlock(&xps_map_mutex);
1099 memset(kobj, 0, sizeof(*kobj));
1100 dev_put(queue->dev);
1103 static struct kobj_type netdev_queue_ktype = {
1104 .sysfs_ops = &netdev_queue_sysfs_ops,
1105 .release = netdev_queue_release,
1106 .default_attrs = netdev_queue_default_attrs,
1109 static int netdev_queue_add_kobject(struct net_device *net, int index)
1111 struct netdev_queue *queue = net->_tx + index;
1112 struct kobject *kobj = &queue->kobj;
1115 kobj->kset = net->queues_kset;
1116 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
1123 kobject_uevent(kobj, KOBJ_ADD);
1124 dev_hold(queue->dev);
1128 #endif /* CONFIG_XPS */
1131 netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
1137 for (i = old_num; i < new_num; i++) {
1138 error = netdev_queue_add_kobject(net, i);
1145 while (--i >= new_num)
1146 kobject_put(&net->_tx[i].kobj);
1154 static int register_queue_kobjects(struct net_device *net)
1156 int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
1158 #if defined(CONFIG_RPS) || defined(CONFIG_XPS)
1159 net->queues_kset = kset_create_and_add("queues",
1160 NULL, &net->dev.kobj);
1161 if (!net->queues_kset)
1166 real_rx = net->real_num_rx_queues;
1168 real_tx = net->real_num_tx_queues;
1170 error = net_rx_queue_update_kobjects(net, 0, real_rx);
1175 error = netdev_queue_update_kobjects(net, 0, real_tx);
1183 netdev_queue_update_kobjects(net, txq, 0);
1184 net_rx_queue_update_kobjects(net, rxq, 0);
1188 static void remove_queue_kobjects(struct net_device *net)
1190 int real_rx = 0, real_tx = 0;
1193 real_rx = net->real_num_rx_queues;
1195 real_tx = net->real_num_tx_queues;
1197 net_rx_queue_update_kobjects(net, real_rx, 0);
1198 netdev_queue_update_kobjects(net, real_tx, 0);
1199 #if defined(CONFIG_RPS) || defined(CONFIG_XPS)
1200 kset_unregister(net->queues_kset);
1204 static const void *net_current_ns(void)
1206 return current->nsproxy->net_ns;
1209 static const void *net_initial_ns(void)
1214 static const void *net_netlink_ns(struct sock *sk)
1216 return sock_net(sk);
1219 struct kobj_ns_type_operations net_ns_type_operations = {
1220 .type = KOBJ_NS_TYPE_NET,
1221 .current_ns = net_current_ns,
1222 .netlink_ns = net_netlink_ns,
1223 .initial_ns = net_initial_ns,
1225 EXPORT_SYMBOL_GPL(net_ns_type_operations);
1227 static void net_kobj_ns_exit(struct net *net)
1229 kobj_ns_exit(KOBJ_NS_TYPE_NET, net);
1232 static struct pernet_operations kobj_net_ops = {
1233 .exit = net_kobj_ns_exit,
1237 #ifdef CONFIG_HOTPLUG
1238 static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1240 struct net_device *dev = to_net_dev(d);
1243 /* pass interface to uevent. */
1244 retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
1248 /* pass ifindex to uevent.
1249 * ifindex is useful as it won't change (interface name may change)
1250 * and is what RtNetlink uses natively. */
1251 retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
1259 * netdev_release -- destroy and free a dead device.
1260 * Called when last reference to device kobject is gone.
1262 static void netdev_release(struct device *d)
1264 struct net_device *dev = to_net_dev(d);
1266 BUG_ON(dev->reg_state != NETREG_RELEASED);
1268 kfree(dev->ifalias);
1269 kfree((char *)dev - dev->padded);
1272 static const void *net_namespace(struct device *d)
1274 struct net_device *dev;
1275 dev = container_of(d, struct net_device, dev);
1276 return dev_net(dev);
1279 static struct class net_class = {
1281 .dev_release = netdev_release,
1283 .dev_attrs = net_class_attributes,
1284 #endif /* CONFIG_SYSFS */
1285 #ifdef CONFIG_HOTPLUG
1286 .dev_uevent = netdev_uevent,
1288 .ns_type = &net_ns_type_operations,
1289 .namespace = net_namespace,
1292 /* Delete sysfs entries but hold kobject reference until after all
1293 * netdev references are gone.
1295 void netdev_unregister_kobject(struct net_device * net)
1297 struct device *dev = &(net->dev);
1299 kobject_get(&dev->kobj);
1301 remove_queue_kobjects(net);
1306 /* Create sysfs entries for network device. */
1307 int netdev_register_kobject(struct net_device *net)
1309 struct device *dev = &(net->dev);
1310 const struct attribute_group **groups = net->sysfs_groups;
1313 device_initialize(dev);
1314 dev->class = &net_class;
1315 dev->platform_data = net;
1316 dev->groups = groups;
1318 dev_set_name(dev, "%s", net->name);
1321 /* Allow for a device specific group */
1325 *groups++ = &netstat_group;
1326 #ifdef CONFIG_WIRELESS_EXT_SYSFS
1327 if (net->ieee80211_ptr)
1328 *groups++ = &wireless_group;
1329 #ifdef CONFIG_WIRELESS_EXT
1330 else if (net->wireless_handlers)
1331 *groups++ = &wireless_group;
1334 #endif /* CONFIG_SYSFS */
1336 error = device_add(dev);
1340 error = register_queue_kobjects(net);
1349 int netdev_class_create_file(struct class_attribute *class_attr)
1351 return class_create_file(&net_class, class_attr);
1353 EXPORT_SYMBOL(netdev_class_create_file);
1355 void netdev_class_remove_file(struct class_attribute *class_attr)
1357 class_remove_file(&net_class, class_attr);
1359 EXPORT_SYMBOL(netdev_class_remove_file);
1361 int netdev_kobject_init(void)
1363 kobj_ns_type_register(&net_ns_type_operations);
1364 register_pernet_subsys(&kobj_net_ops);
1365 return class_register(&net_class);