2 * net-sysfs.c - network device class and attributes
4 * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/capability.h>
13 #include <linux/kernel.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/slab.h>
17 #include <linux/nsproxy.h>
19 #include <net/net_namespace.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/vmalloc.h>
22 #include <linux/export.h>
23 #include <linux/jiffies.h>
24 #include <linux/pm_runtime.h>
26 #include "net-sysfs.h"
29 static const char fmt_hex
[] = "%#x\n";
30 static const char fmt_long_hex
[] = "%#lx\n";
31 static const char fmt_dec
[] = "%d\n";
32 static const char fmt_udec
[] = "%u\n";
33 static const char fmt_ulong
[] = "%lu\n";
34 static const char fmt_u64
[] = "%llu\n";
36 static inline int dev_isalive(const struct net_device
*dev
)
38 return dev
->reg_state
<= NETREG_REGISTERED
;
41 /* use same locking rules as GIF* ioctl's */
42 static ssize_t
netdev_show(const struct device
*dev
,
43 struct device_attribute
*attr
, char *buf
,
44 ssize_t (*format
)(const struct net_device
*, char *))
46 struct net_device
*net
= to_net_dev(dev
);
47 ssize_t ret
= -EINVAL
;
49 read_lock(&dev_base_lock
);
51 ret
= (*format
)(net
, buf
);
52 read_unlock(&dev_base_lock
);
57 /* generate a show function for simple field */
58 #define NETDEVICE_SHOW(field, format_string) \
59 static ssize_t format_##field(const struct net_device *net, char *buf) \
61 return sprintf(buf, format_string, net->field); \
63 static ssize_t field##_show(struct device *dev, \
64 struct device_attribute *attr, char *buf) \
66 return netdev_show(dev, attr, buf, format_##field); \
69 #define NETDEVICE_SHOW_RO(field, format_string) \
70 NETDEVICE_SHOW(field, format_string); \
71 static DEVICE_ATTR_RO(field)
73 #define NETDEVICE_SHOW_RW(field, format_string) \
74 NETDEVICE_SHOW(field, format_string); \
75 static DEVICE_ATTR_RW(field)
77 /* use same locking and permission rules as SIF* ioctl's */
78 static ssize_t
netdev_store(struct device
*dev
, struct device_attribute
*attr
,
79 const char *buf
, size_t len
,
80 int (*set
)(struct net_device
*, unsigned long))
82 struct net_device
*netdev
= to_net_dev(dev
);
83 struct net
*net
= dev_net(netdev
);
87 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
90 ret
= kstrtoul(buf
, 0, &new);
95 return restart_syscall();
97 if (dev_isalive(netdev
)) {
98 if ((ret
= (*set
)(netdev
, new)) == 0)
106 NETDEVICE_SHOW_RO(dev_id
, fmt_hex
);
107 NETDEVICE_SHOW_RO(addr_assign_type
, fmt_dec
);
108 NETDEVICE_SHOW_RO(addr_len
, fmt_dec
);
109 NETDEVICE_SHOW_RO(iflink
, fmt_dec
);
110 NETDEVICE_SHOW_RO(ifindex
, fmt_dec
);
111 NETDEVICE_SHOW_RO(type
, fmt_dec
);
112 NETDEVICE_SHOW_RO(link_mode
, fmt_dec
);
114 /* use same locking rules as GIFHWADDR ioctl's */
115 static ssize_t
address_show(struct device
*dev
, struct device_attribute
*attr
,
118 struct net_device
*net
= to_net_dev(dev
);
119 ssize_t ret
= -EINVAL
;
121 read_lock(&dev_base_lock
);
122 if (dev_isalive(net
))
123 ret
= sysfs_format_mac(buf
, net
->dev_addr
, net
->addr_len
);
124 read_unlock(&dev_base_lock
);
127 static DEVICE_ATTR_RO(address
);
129 static ssize_t
broadcast_show(struct device
*dev
,
130 struct device_attribute
*attr
, char *buf
)
132 struct net_device
*net
= to_net_dev(dev
);
133 if (dev_isalive(net
))
134 return sysfs_format_mac(buf
, net
->broadcast
, net
->addr_len
);
137 static DEVICE_ATTR_RO(broadcast
);
139 static int change_carrier(struct net_device
*net
, unsigned long new_carrier
)
141 if (!netif_running(net
))
143 return dev_change_carrier(net
, (bool) new_carrier
);
146 static ssize_t
carrier_store(struct device
*dev
, struct device_attribute
*attr
,
147 const char *buf
, size_t len
)
149 return netdev_store(dev
, attr
, buf
, len
, change_carrier
);
152 static ssize_t
carrier_show(struct device
*dev
,
153 struct device_attribute
*attr
, char *buf
)
155 struct net_device
*netdev
= to_net_dev(dev
);
156 if (netif_running(netdev
)) {
157 return sprintf(buf
, fmt_dec
, !!netif_carrier_ok(netdev
));
161 static DEVICE_ATTR_RW(carrier
);
163 static ssize_t
speed_show(struct device
*dev
,
164 struct device_attribute
*attr
, char *buf
)
166 struct net_device
*netdev
= to_net_dev(dev
);
170 return restart_syscall();
172 if (netif_running(netdev
)) {
173 struct ethtool_cmd cmd
;
174 if (!__ethtool_get_settings(netdev
, &cmd
))
175 ret
= sprintf(buf
, fmt_udec
, ethtool_cmd_speed(&cmd
));
180 static DEVICE_ATTR_RO(speed
);
182 static ssize_t
duplex_show(struct device
*dev
,
183 struct device_attribute
*attr
, char *buf
)
185 struct net_device
*netdev
= to_net_dev(dev
);
189 return restart_syscall();
191 if (netif_running(netdev
)) {
192 struct ethtool_cmd cmd
;
193 if (!__ethtool_get_settings(netdev
, &cmd
)) {
195 switch (cmd
.duplex
) {
206 ret
= sprintf(buf
, "%s\n", duplex
);
212 static DEVICE_ATTR_RO(duplex
);
214 static ssize_t
dormant_show(struct device
*dev
,
215 struct device_attribute
*attr
, char *buf
)
217 struct net_device
*netdev
= to_net_dev(dev
);
219 if (netif_running(netdev
))
220 return sprintf(buf
, fmt_dec
, !!netif_dormant(netdev
));
224 static DEVICE_ATTR_RO(dormant
);
226 static const char *const operstates
[] = {
228 "notpresent", /* currently unused */
231 "testing", /* currently unused */
236 static ssize_t
operstate_show(struct device
*dev
,
237 struct device_attribute
*attr
, char *buf
)
239 const struct net_device
*netdev
= to_net_dev(dev
);
240 unsigned char operstate
;
242 read_lock(&dev_base_lock
);
243 operstate
= netdev
->operstate
;
244 if (!netif_running(netdev
))
245 operstate
= IF_OPER_DOWN
;
246 read_unlock(&dev_base_lock
);
248 if (operstate
>= ARRAY_SIZE(operstates
))
249 return -EINVAL
; /* should not happen */
251 return sprintf(buf
, "%s\n", operstates
[operstate
]);
253 static DEVICE_ATTR_RO(operstate
);
255 /* read-write attributes */
257 static int change_mtu(struct net_device
*net
, unsigned long new_mtu
)
259 return dev_set_mtu(net
, (int) new_mtu
);
262 static ssize_t
mtu_store(struct device
*dev
, struct device_attribute
*attr
,
263 const char *buf
, size_t len
)
265 return netdev_store(dev
, attr
, buf
, len
, change_mtu
);
267 NETDEVICE_SHOW_RW(mtu
, fmt_dec
);
269 static int change_flags(struct net_device
*net
, unsigned long new_flags
)
271 return dev_change_flags(net
, (unsigned int) new_flags
);
274 static ssize_t
flags_store(struct device
*dev
, struct device_attribute
*attr
,
275 const char *buf
, size_t len
)
277 return netdev_store(dev
, attr
, buf
, len
, change_flags
);
279 NETDEVICE_SHOW_RW(flags
, fmt_hex
);
281 static int change_tx_queue_len(struct net_device
*net
, unsigned long new_len
)
283 net
->tx_queue_len
= new_len
;
287 static ssize_t
tx_queue_len_store(struct device
*dev
,
288 struct device_attribute
*attr
,
289 const char *buf
, size_t len
)
291 if (!capable(CAP_NET_ADMIN
))
294 return netdev_store(dev
, attr
, buf
, len
, change_tx_queue_len
);
296 NETDEVICE_SHOW_RW(tx_queue_len
, fmt_ulong
);
298 static ssize_t
ifalias_store(struct device
*dev
, struct device_attribute
*attr
,
299 const char *buf
, size_t len
)
301 struct net_device
*netdev
= to_net_dev(dev
);
302 struct net
*net
= dev_net(netdev
);
306 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
309 /* ignore trailing newline */
310 if (len
> 0 && buf
[len
- 1] == '\n')
314 return restart_syscall();
315 ret
= dev_set_alias(netdev
, buf
, count
);
318 return ret
< 0 ? ret
: len
;
321 static ssize_t
ifalias_show(struct device
*dev
,
322 struct device_attribute
*attr
, char *buf
)
324 const struct net_device
*netdev
= to_net_dev(dev
);
328 return restart_syscall();
330 ret
= sprintf(buf
, "%s\n", netdev
->ifalias
);
334 static DEVICE_ATTR_RW(ifalias
);
336 static int change_group(struct net_device
*net
, unsigned long new_group
)
338 dev_set_group(net
, (int) new_group
);
342 static ssize_t
group_store(struct device
*dev
, struct device_attribute
*attr
,
343 const char *buf
, size_t len
)
345 return netdev_store(dev
, attr
, buf
, len
, change_group
);
347 NETDEVICE_SHOW(group
, fmt_dec
);
348 static DEVICE_ATTR(netdev_group
, S_IRUGO
| S_IWUSR
, group_show
, group_store
);
350 static struct attribute
*net_class_attrs
[] = {
351 &dev_attr_netdev_group
.attr
,
353 &dev_attr_dev_id
.attr
,
354 &dev_attr_iflink
.attr
,
355 &dev_attr_ifindex
.attr
,
356 &dev_attr_addr_assign_type
.attr
,
357 &dev_attr_addr_len
.attr
,
358 &dev_attr_link_mode
.attr
,
359 &dev_attr_address
.attr
,
360 &dev_attr_broadcast
.attr
,
361 &dev_attr_speed
.attr
,
362 &dev_attr_duplex
.attr
,
363 &dev_attr_dormant
.attr
,
364 &dev_attr_operstate
.attr
,
365 &dev_attr_ifalias
.attr
,
366 &dev_attr_carrier
.attr
,
368 &dev_attr_flags
.attr
,
369 &dev_attr_tx_queue_len
.attr
,
372 ATTRIBUTE_GROUPS(net_class
);
374 /* Show a given an attribute in the statistics group */
375 static ssize_t
netstat_show(const struct device
*d
,
376 struct device_attribute
*attr
, char *buf
,
377 unsigned long offset
)
379 struct net_device
*dev
= to_net_dev(d
);
380 ssize_t ret
= -EINVAL
;
382 WARN_ON(offset
> sizeof(struct rtnl_link_stats64
) ||
383 offset
% sizeof(u64
) != 0);
385 read_lock(&dev_base_lock
);
386 if (dev_isalive(dev
)) {
387 struct rtnl_link_stats64 temp
;
388 const struct rtnl_link_stats64
*stats
= dev_get_stats(dev
, &temp
);
390 ret
= sprintf(buf
, fmt_u64
, *(u64
*)(((u8
*) stats
) + offset
));
392 read_unlock(&dev_base_lock
);
396 /* generate a read-only statistics attribute */
397 #define NETSTAT_ENTRY(name) \
398 static ssize_t name##_show(struct device *d, \
399 struct device_attribute *attr, char *buf) \
401 return netstat_show(d, attr, buf, \
402 offsetof(struct rtnl_link_stats64, name)); \
404 static DEVICE_ATTR_RO(name)
406 NETSTAT_ENTRY(rx_packets
);
407 NETSTAT_ENTRY(tx_packets
);
408 NETSTAT_ENTRY(rx_bytes
);
409 NETSTAT_ENTRY(tx_bytes
);
410 NETSTAT_ENTRY(rx_errors
);
411 NETSTAT_ENTRY(tx_errors
);
412 NETSTAT_ENTRY(rx_dropped
);
413 NETSTAT_ENTRY(tx_dropped
);
414 NETSTAT_ENTRY(multicast
);
415 NETSTAT_ENTRY(collisions
);
416 NETSTAT_ENTRY(rx_length_errors
);
417 NETSTAT_ENTRY(rx_over_errors
);
418 NETSTAT_ENTRY(rx_crc_errors
);
419 NETSTAT_ENTRY(rx_frame_errors
);
420 NETSTAT_ENTRY(rx_fifo_errors
);
421 NETSTAT_ENTRY(rx_missed_errors
);
422 NETSTAT_ENTRY(tx_aborted_errors
);
423 NETSTAT_ENTRY(tx_carrier_errors
);
424 NETSTAT_ENTRY(tx_fifo_errors
);
425 NETSTAT_ENTRY(tx_heartbeat_errors
);
426 NETSTAT_ENTRY(tx_window_errors
);
427 NETSTAT_ENTRY(rx_compressed
);
428 NETSTAT_ENTRY(tx_compressed
);
430 static struct attribute
*netstat_attrs
[] = {
431 &dev_attr_rx_packets
.attr
,
432 &dev_attr_tx_packets
.attr
,
433 &dev_attr_rx_bytes
.attr
,
434 &dev_attr_tx_bytes
.attr
,
435 &dev_attr_rx_errors
.attr
,
436 &dev_attr_tx_errors
.attr
,
437 &dev_attr_rx_dropped
.attr
,
438 &dev_attr_tx_dropped
.attr
,
439 &dev_attr_multicast
.attr
,
440 &dev_attr_collisions
.attr
,
441 &dev_attr_rx_length_errors
.attr
,
442 &dev_attr_rx_over_errors
.attr
,
443 &dev_attr_rx_crc_errors
.attr
,
444 &dev_attr_rx_frame_errors
.attr
,
445 &dev_attr_rx_fifo_errors
.attr
,
446 &dev_attr_rx_missed_errors
.attr
,
447 &dev_attr_tx_aborted_errors
.attr
,
448 &dev_attr_tx_carrier_errors
.attr
,
449 &dev_attr_tx_fifo_errors
.attr
,
450 &dev_attr_tx_heartbeat_errors
.attr
,
451 &dev_attr_tx_window_errors
.attr
,
452 &dev_attr_rx_compressed
.attr
,
453 &dev_attr_tx_compressed
.attr
,
458 static struct attribute_group netstat_group
= {
459 .name
= "statistics",
460 .attrs
= netstat_attrs
,
463 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
464 static struct attribute
*wireless_attrs
[] = {
468 static struct attribute_group wireless_group
= {
470 .attrs
= wireless_attrs
,
474 #else /* CONFIG_SYSFS */
475 #define net_class_groups NULL
476 #endif /* CONFIG_SYSFS */
480 * RX queue sysfs structures and functions.
482 struct rx_queue_attribute
{
483 struct attribute attr
;
484 ssize_t (*show
)(struct netdev_rx_queue
*queue
,
485 struct rx_queue_attribute
*attr
, char *buf
);
486 ssize_t (*store
)(struct netdev_rx_queue
*queue
,
487 struct rx_queue_attribute
*attr
, const char *buf
, size_t len
);
489 #define to_rx_queue_attr(_attr) container_of(_attr, \
490 struct rx_queue_attribute, attr)
492 #define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
494 static ssize_t
rx_queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
,
497 struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
498 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
500 if (!attribute
->show
)
503 return attribute
->show(queue
, attribute
, buf
);
506 static ssize_t
rx_queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
507 const char *buf
, size_t count
)
509 struct rx_queue_attribute
*attribute
= to_rx_queue_attr(attr
);
510 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
512 if (!attribute
->store
)
515 return attribute
->store(queue
, attribute
, buf
, count
);
518 static const struct sysfs_ops rx_queue_sysfs_ops
= {
519 .show
= rx_queue_attr_show
,
520 .store
= rx_queue_attr_store
,
523 static ssize_t
show_rps_map(struct netdev_rx_queue
*queue
,
524 struct rx_queue_attribute
*attribute
, char *buf
)
531 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
535 map
= rcu_dereference(queue
->rps_map
);
537 for (i
= 0; i
< map
->len
; i
++)
538 cpumask_set_cpu(map
->cpus
[i
], mask
);
540 len
+= cpumask_scnprintf(buf
+ len
, PAGE_SIZE
, mask
);
541 if (PAGE_SIZE
- len
< 3) {
543 free_cpumask_var(mask
);
548 free_cpumask_var(mask
);
549 len
+= sprintf(buf
+ len
, "\n");
553 static ssize_t
store_rps_map(struct netdev_rx_queue
*queue
,
554 struct rx_queue_attribute
*attribute
,
555 const char *buf
, size_t len
)
557 struct rps_map
*old_map
, *map
;
560 static DEFINE_SPINLOCK(rps_map_lock
);
562 if (!capable(CAP_NET_ADMIN
))
565 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
568 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
570 free_cpumask_var(mask
);
574 map
= kzalloc(max_t(unsigned int,
575 RPS_MAP_SIZE(cpumask_weight(mask
)), L1_CACHE_BYTES
),
578 free_cpumask_var(mask
);
583 for_each_cpu_and(cpu
, mask
, cpu_online_mask
)
584 map
->cpus
[i
++] = cpu
;
593 spin_lock(&rps_map_lock
);
594 old_map
= rcu_dereference_protected(queue
->rps_map
,
595 lockdep_is_held(&rps_map_lock
));
596 rcu_assign_pointer(queue
->rps_map
, map
);
597 spin_unlock(&rps_map_lock
);
600 static_key_slow_inc(&rps_needed
);
602 kfree_rcu(old_map
, rcu
);
603 static_key_slow_dec(&rps_needed
);
605 free_cpumask_var(mask
);
609 static ssize_t
show_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
610 struct rx_queue_attribute
*attr
,
613 struct rps_dev_flow_table
*flow_table
;
614 unsigned long val
= 0;
617 flow_table
= rcu_dereference(queue
->rps_flow_table
);
619 val
= (unsigned long)flow_table
->mask
+ 1;
622 return sprintf(buf
, "%lu\n", val
);
625 static void rps_dev_flow_table_release(struct rcu_head
*rcu
)
627 struct rps_dev_flow_table
*table
= container_of(rcu
,
628 struct rps_dev_flow_table
, rcu
);
632 static ssize_t
store_rps_dev_flow_table_cnt(struct netdev_rx_queue
*queue
,
633 struct rx_queue_attribute
*attr
,
634 const char *buf
, size_t len
)
636 unsigned long mask
, count
;
637 struct rps_dev_flow_table
*table
, *old_table
;
638 static DEFINE_SPINLOCK(rps_dev_flow_lock
);
641 if (!capable(CAP_NET_ADMIN
))
644 rc
= kstrtoul(buf
, 0, &count
);
650 /* mask = roundup_pow_of_two(count) - 1;
651 * without overflows...
653 while ((mask
| (mask
>> 1)) != mask
)
655 /* On 64 bit arches, must check mask fits in table->mask (u32),
656 * and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
659 #if BITS_PER_LONG > 32
660 if (mask
> (unsigned long)(u32
)mask
)
663 if (mask
> (ULONG_MAX
- RPS_DEV_FLOW_TABLE_SIZE(1))
664 / sizeof(struct rps_dev_flow
)) {
665 /* Enforce a limit to prevent overflow */
669 table
= vmalloc(RPS_DEV_FLOW_TABLE_SIZE(mask
+ 1));
674 for (count
= 0; count
<= mask
; count
++)
675 table
->flows
[count
].cpu
= RPS_NO_CPU
;
679 spin_lock(&rps_dev_flow_lock
);
680 old_table
= rcu_dereference_protected(queue
->rps_flow_table
,
681 lockdep_is_held(&rps_dev_flow_lock
));
682 rcu_assign_pointer(queue
->rps_flow_table
, table
);
683 spin_unlock(&rps_dev_flow_lock
);
686 call_rcu(&old_table
->rcu
, rps_dev_flow_table_release
);
691 static struct rx_queue_attribute rps_cpus_attribute
=
692 __ATTR(rps_cpus
, S_IRUGO
| S_IWUSR
, show_rps_map
, store_rps_map
);
695 static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute
=
696 __ATTR(rps_flow_cnt
, S_IRUGO
| S_IWUSR
,
697 show_rps_dev_flow_table_cnt
, store_rps_dev_flow_table_cnt
);
699 static struct attribute
*rx_queue_default_attrs
[] = {
700 &rps_cpus_attribute
.attr
,
701 &rps_dev_flow_table_cnt_attribute
.attr
,
705 static void rx_queue_release(struct kobject
*kobj
)
707 struct netdev_rx_queue
*queue
= to_rx_queue(kobj
);
709 struct rps_dev_flow_table
*flow_table
;
712 map
= rcu_dereference_protected(queue
->rps_map
, 1);
714 RCU_INIT_POINTER(queue
->rps_map
, NULL
);
718 flow_table
= rcu_dereference_protected(queue
->rps_flow_table
, 1);
720 RCU_INIT_POINTER(queue
->rps_flow_table
, NULL
);
721 call_rcu(&flow_table
->rcu
, rps_dev_flow_table_release
);
724 memset(kobj
, 0, sizeof(*kobj
));
728 static struct kobj_type rx_queue_ktype
= {
729 .sysfs_ops
= &rx_queue_sysfs_ops
,
730 .release
= rx_queue_release
,
731 .default_attrs
= rx_queue_default_attrs
,
734 static int rx_queue_add_kobject(struct net_device
*net
, int index
)
736 struct netdev_rx_queue
*queue
= net
->_rx
+ index
;
737 struct kobject
*kobj
= &queue
->kobj
;
740 kobj
->kset
= net
->queues_kset
;
741 error
= kobject_init_and_add(kobj
, &rx_queue_ktype
, NULL
,
748 kobject_uevent(kobj
, KOBJ_ADD
);
749 dev_hold(queue
->dev
);
753 #endif /* CONFIG_RPS */
756 net_rx_queue_update_kobjects(struct net_device
*net
, int old_num
, int new_num
)
762 for (i
= old_num
; i
< new_num
; i
++) {
763 error
= rx_queue_add_kobject(net
, i
);
770 while (--i
>= new_num
)
771 kobject_put(&net
->_rx
[i
].kobj
);
781 * netdev_queue sysfs structures and functions.
783 struct netdev_queue_attribute
{
784 struct attribute attr
;
785 ssize_t (*show
)(struct netdev_queue
*queue
,
786 struct netdev_queue_attribute
*attr
, char *buf
);
787 ssize_t (*store
)(struct netdev_queue
*queue
,
788 struct netdev_queue_attribute
*attr
, const char *buf
, size_t len
);
790 #define to_netdev_queue_attr(_attr) container_of(_attr, \
791 struct netdev_queue_attribute, attr)
793 #define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
795 static ssize_t
netdev_queue_attr_show(struct kobject
*kobj
,
796 struct attribute
*attr
, char *buf
)
798 struct netdev_queue_attribute
*attribute
= to_netdev_queue_attr(attr
);
799 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
801 if (!attribute
->show
)
804 return attribute
->show(queue
, attribute
, buf
);
807 static ssize_t
netdev_queue_attr_store(struct kobject
*kobj
,
808 struct attribute
*attr
,
809 const char *buf
, size_t count
)
811 struct netdev_queue_attribute
*attribute
= to_netdev_queue_attr(attr
);
812 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
814 if (!attribute
->store
)
817 return attribute
->store(queue
, attribute
, buf
, count
);
820 static const struct sysfs_ops netdev_queue_sysfs_ops
= {
821 .show
= netdev_queue_attr_show
,
822 .store
= netdev_queue_attr_store
,
825 static ssize_t
show_trans_timeout(struct netdev_queue
*queue
,
826 struct netdev_queue_attribute
*attribute
,
829 unsigned long trans_timeout
;
831 spin_lock_irq(&queue
->_xmit_lock
);
832 trans_timeout
= queue
->trans_timeout
;
833 spin_unlock_irq(&queue
->_xmit_lock
);
835 return sprintf(buf
, "%lu", trans_timeout
);
838 static struct netdev_queue_attribute queue_trans_timeout
=
839 __ATTR(tx_timeout
, S_IRUGO
, show_trans_timeout
, NULL
);
843 * Byte queue limits sysfs structures and functions.
845 static ssize_t
bql_show(char *buf
, unsigned int value
)
847 return sprintf(buf
, "%u\n", value
);
850 static ssize_t
bql_set(const char *buf
, const size_t count
,
851 unsigned int *pvalue
)
856 if (!strcmp(buf
, "max") || !strcmp(buf
, "max\n"))
857 value
= DQL_MAX_LIMIT
;
859 err
= kstrtouint(buf
, 10, &value
);
862 if (value
> DQL_MAX_LIMIT
)
871 static ssize_t
bql_show_hold_time(struct netdev_queue
*queue
,
872 struct netdev_queue_attribute
*attr
,
875 struct dql
*dql
= &queue
->dql
;
877 return sprintf(buf
, "%u\n", jiffies_to_msecs(dql
->slack_hold_time
));
880 static ssize_t
bql_set_hold_time(struct netdev_queue
*queue
,
881 struct netdev_queue_attribute
*attribute
,
882 const char *buf
, size_t len
)
884 struct dql
*dql
= &queue
->dql
;
888 err
= kstrtouint(buf
, 10, &value
);
892 dql
->slack_hold_time
= msecs_to_jiffies(value
);
897 static struct netdev_queue_attribute bql_hold_time_attribute
=
898 __ATTR(hold_time
, S_IRUGO
| S_IWUSR
, bql_show_hold_time
,
901 static ssize_t
bql_show_inflight(struct netdev_queue
*queue
,
902 struct netdev_queue_attribute
*attr
,
905 struct dql
*dql
= &queue
->dql
;
907 return sprintf(buf
, "%u\n", dql
->num_queued
- dql
->num_completed
);
910 static struct netdev_queue_attribute bql_inflight_attribute
=
911 __ATTR(inflight
, S_IRUGO
, bql_show_inflight
, NULL
);
913 #define BQL_ATTR(NAME, FIELD) \
914 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
915 struct netdev_queue_attribute *attr, \
918 return bql_show(buf, queue->dql.FIELD); \
921 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
922 struct netdev_queue_attribute *attr, \
923 const char *buf, size_t len) \
925 return bql_set(buf, len, &queue->dql.FIELD); \
928 static struct netdev_queue_attribute bql_ ## NAME ## _attribute = \
929 __ATTR(NAME, S_IRUGO | S_IWUSR, bql_show_ ## NAME, \
932 BQL_ATTR(limit
, limit
)
933 BQL_ATTR(limit_max
, max_limit
)
934 BQL_ATTR(limit_min
, min_limit
)
936 static struct attribute
*dql_attrs
[] = {
937 &bql_limit_attribute
.attr
,
938 &bql_limit_max_attribute
.attr
,
939 &bql_limit_min_attribute
.attr
,
940 &bql_hold_time_attribute
.attr
,
941 &bql_inflight_attribute
.attr
,
945 static struct attribute_group dql_group
= {
946 .name
= "byte_queue_limits",
949 #endif /* CONFIG_BQL */
952 static inline unsigned int get_netdev_queue_index(struct netdev_queue
*queue
)
954 struct net_device
*dev
= queue
->dev
;
957 for (i
= 0; i
< dev
->num_tx_queues
; i
++)
958 if (queue
== &dev
->_tx
[i
])
961 BUG_ON(i
>= dev
->num_tx_queues
);
967 static ssize_t
show_xps_map(struct netdev_queue
*queue
,
968 struct netdev_queue_attribute
*attribute
, char *buf
)
970 struct net_device
*dev
= queue
->dev
;
971 struct xps_dev_maps
*dev_maps
;
977 if (!zalloc_cpumask_var(&mask
, GFP_KERNEL
))
980 index
= get_netdev_queue_index(queue
);
983 dev_maps
= rcu_dereference(dev
->xps_maps
);
985 for_each_possible_cpu(i
) {
986 struct xps_map
*map
=
987 rcu_dereference(dev_maps
->cpu_map
[i
]);
990 for (j
= 0; j
< map
->len
; j
++) {
991 if (map
->queues
[j
] == index
) {
992 cpumask_set_cpu(i
, mask
);
1001 len
+= cpumask_scnprintf(buf
+ len
, PAGE_SIZE
, mask
);
1002 if (PAGE_SIZE
- len
< 3) {
1003 free_cpumask_var(mask
);
1007 free_cpumask_var(mask
);
1008 len
+= sprintf(buf
+ len
, "\n");
1012 static ssize_t
store_xps_map(struct netdev_queue
*queue
,
1013 struct netdev_queue_attribute
*attribute
,
1014 const char *buf
, size_t len
)
1016 struct net_device
*dev
= queue
->dev
;
1017 unsigned long index
;
1021 if (!capable(CAP_NET_ADMIN
))
1024 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
))
1027 index
= get_netdev_queue_index(queue
);
1029 err
= bitmap_parse(buf
, len
, cpumask_bits(mask
), nr_cpumask_bits
);
1031 free_cpumask_var(mask
);
1035 err
= netif_set_xps_queue(dev
, mask
, index
);
1037 free_cpumask_var(mask
);
1042 static struct netdev_queue_attribute xps_cpus_attribute
=
1043 __ATTR(xps_cpus
, S_IRUGO
| S_IWUSR
, show_xps_map
, store_xps_map
);
1044 #endif /* CONFIG_XPS */
1046 static struct attribute
*netdev_queue_default_attrs
[] = {
1047 &queue_trans_timeout
.attr
,
1049 &xps_cpus_attribute
.attr
,
1054 static void netdev_queue_release(struct kobject
*kobj
)
1056 struct netdev_queue
*queue
= to_netdev_queue(kobj
);
1058 memset(kobj
, 0, sizeof(*kobj
));
1059 dev_put(queue
->dev
);
1062 static struct kobj_type netdev_queue_ktype
= {
1063 .sysfs_ops
= &netdev_queue_sysfs_ops
,
1064 .release
= netdev_queue_release
,
1065 .default_attrs
= netdev_queue_default_attrs
,
1068 static int netdev_queue_add_kobject(struct net_device
*net
, int index
)
1070 struct netdev_queue
*queue
= net
->_tx
+ index
;
1071 struct kobject
*kobj
= &queue
->kobj
;
1074 kobj
->kset
= net
->queues_kset
;
1075 error
= kobject_init_and_add(kobj
, &netdev_queue_ktype
, NULL
,
1081 error
= sysfs_create_group(kobj
, &dql_group
);
1086 kobject_uevent(kobj
, KOBJ_ADD
);
1087 dev_hold(queue
->dev
);
1094 #endif /* CONFIG_SYSFS */
1097 netdev_queue_update_kobjects(struct net_device
*net
, int old_num
, int new_num
)
1103 for (i
= old_num
; i
< new_num
; i
++) {
1104 error
= netdev_queue_add_kobject(net
, i
);
1111 while (--i
>= new_num
) {
1112 struct netdev_queue
*queue
= net
->_tx
+ i
;
1115 sysfs_remove_group(&queue
->kobj
, &dql_group
);
1117 kobject_put(&queue
->kobj
);
1123 #endif /* CONFIG_SYSFS */
1126 static int register_queue_kobjects(struct net_device
*net
)
1128 int error
= 0, txq
= 0, rxq
= 0, real_rx
= 0, real_tx
= 0;
1131 net
->queues_kset
= kset_create_and_add("queues",
1132 NULL
, &net
->dev
.kobj
);
1133 if (!net
->queues_kset
)
1138 real_rx
= net
->real_num_rx_queues
;
1140 real_tx
= net
->real_num_tx_queues
;
1142 error
= net_rx_queue_update_kobjects(net
, 0, real_rx
);
1147 error
= netdev_queue_update_kobjects(net
, 0, real_tx
);
1155 netdev_queue_update_kobjects(net
, txq
, 0);
1156 net_rx_queue_update_kobjects(net
, rxq
, 0);
1160 static void remove_queue_kobjects(struct net_device
*net
)
1162 int real_rx
= 0, real_tx
= 0;
1165 real_rx
= net
->real_num_rx_queues
;
1167 real_tx
= net
->real_num_tx_queues
;
1169 net_rx_queue_update_kobjects(net
, real_rx
, 0);
1170 netdev_queue_update_kobjects(net
, real_tx
, 0);
1172 kset_unregister(net
->queues_kset
);
1176 static void *net_grab_current_ns(void)
1178 struct net
*ns
= current
->nsproxy
->net_ns
;
1179 #ifdef CONFIG_NET_NS
1181 atomic_inc(&ns
->passive
);
1186 static const void *net_initial_ns(void)
1191 static const void *net_netlink_ns(struct sock
*sk
)
1193 return sock_net(sk
);
1196 struct kobj_ns_type_operations net_ns_type_operations
= {
1197 .type
= KOBJ_NS_TYPE_NET
,
1198 .grab_current_ns
= net_grab_current_ns
,
1199 .netlink_ns
= net_netlink_ns
,
1200 .initial_ns
= net_initial_ns
,
1201 .drop_ns
= net_drop_ns
,
1203 EXPORT_SYMBOL_GPL(net_ns_type_operations
);
1205 static int netdev_uevent(struct device
*d
, struct kobj_uevent_env
*env
)
1207 struct net_device
*dev
= to_net_dev(d
);
1210 /* pass interface to uevent. */
1211 retval
= add_uevent_var(env
, "INTERFACE=%s", dev
->name
);
1215 /* pass ifindex to uevent.
1216 * ifindex is useful as it won't change (interface name may change)
1217 * and is what RtNetlink uses natively. */
1218 retval
= add_uevent_var(env
, "IFINDEX=%d", dev
->ifindex
);
1225 * netdev_release -- destroy and free a dead device.
1226 * Called when last reference to device kobject is gone.
1228 static void netdev_release(struct device
*d
)
1230 struct net_device
*dev
= to_net_dev(d
);
1232 BUG_ON(dev
->reg_state
!= NETREG_RELEASED
);
1234 kfree(dev
->ifalias
);
1235 kfree((char *)dev
- dev
->padded
);
1238 static const void *net_namespace(struct device
*d
)
1240 struct net_device
*dev
;
1241 dev
= container_of(d
, struct net_device
, dev
);
1242 return dev_net(dev
);
1245 static struct class net_class
= {
1247 .dev_release
= netdev_release
,
1248 .dev_groups
= net_class_groups
,
1249 .dev_uevent
= netdev_uevent
,
1250 .ns_type
= &net_ns_type_operations
,
1251 .namespace = net_namespace
,
1254 /* Delete sysfs entries but hold kobject reference until after all
1255 * netdev references are gone.
1257 void netdev_unregister_kobject(struct net_device
* net
)
1259 struct device
*dev
= &(net
->dev
);
1261 kobject_get(&dev
->kobj
);
1263 remove_queue_kobjects(net
);
1265 pm_runtime_set_memalloc_noio(dev
, false);
1270 /* Create sysfs entries for network device. */
1271 int netdev_register_kobject(struct net_device
*net
)
1273 struct device
*dev
= &(net
->dev
);
1274 const struct attribute_group
**groups
= net
->sysfs_groups
;
1277 device_initialize(dev
);
1278 dev
->class = &net_class
;
1279 dev
->platform_data
= net
;
1280 dev
->groups
= groups
;
1282 dev_set_name(dev
, "%s", net
->name
);
1285 /* Allow for a device specific group */
1289 *groups
++ = &netstat_group
;
1291 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
1292 if (net
->ieee80211_ptr
)
1293 *groups
++ = &wireless_group
;
1294 #if IS_ENABLED(CONFIG_WIRELESS_EXT)
1295 else if (net
->wireless_handlers
)
1296 *groups
++ = &wireless_group
;
1299 #endif /* CONFIG_SYSFS */
1301 error
= device_add(dev
);
1305 error
= register_queue_kobjects(net
);
1311 pm_runtime_set_memalloc_noio(dev
, true);
1316 int netdev_class_create_file(struct class_attribute
*class_attr
)
1318 return class_create_file(&net_class
, class_attr
);
1320 EXPORT_SYMBOL(netdev_class_create_file
);
1322 void netdev_class_remove_file(struct class_attribute
*class_attr
)
1324 class_remove_file(&net_class
, class_attr
);
1326 EXPORT_SYMBOL(netdev_class_remove_file
);
1328 int netdev_kobject_init(void)
1330 kobj_ns_type_register(&net_ns_type_operations
);
1331 return class_register(&net_class
);