2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/proc_fs.h>
101 #include <linux/seq_file.h>
102 #include <linux/stat.h>
104 #include <net/pkt_sched.h>
105 #include <net/checksum.h>
106 #include <net/xfrm.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
129 #include <trace/events/napi.h>
130 #include <trace/events/net.h>
131 #include <trace/events/skb.h>
132 #include <linux/pci.h>
133 #include <linux/inetdevice.h>
134 #include <linux/cpu_rmap.h>
135 #include <linux/net_tstamp.h>
136 #include <linux/static_key.h>
137 #include <net/flow_keys.h>
139 #include "net-sysfs.h"
141 /* Instead of increasing this, you should create a hash table. */
142 #define MAX_GRO_SKBS 8
144 /* This should be increased if a protocol with a bigger head is added. */
145 #define GRO_MAX_HEAD (MAX_HEADER + 128)
148 * The list of packet types we will receive (as opposed to discard)
149 * and the routines to invoke.
151 * Why 16. Because with 16 the only overlap we get on a hash of the
152 * low nibble of the protocol value is RARP/SNAP/X.25.
154 * NOTE: That is no longer true with the addition of VLAN tags. Not
155 * sure which should go first, but I bet it won't make much
156 * difference if we are running VLANs. The good news is that
157 * this protocol won't be in the list unless compiled in, so
158 * the average user (w/out VLANs) will not be adversely affected.
175 #define PTYPE_HASH_SIZE (16)
176 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
178 static DEFINE_SPINLOCK(ptype_lock
);
179 static DEFINE_SPINLOCK(offload_lock
);
180 static struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
181 static struct list_head ptype_all __read_mostly
; /* Taps */
182 static struct list_head offload_base __read_mostly
;
185 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
188 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
190 * Writers must hold the rtnl semaphore while they loop through the
191 * dev_base_head list, and hold dev_base_lock for writing when they do the
192 * actual updates. This allows pure readers to access the list even
193 * while a writer is preparing to update it.
195 * To put it another way, dev_base_lock is held for writing only to
196 * protect against pure readers; the rtnl semaphore provides the
197 * protection against other writers.
199 * See, for example usages, register_netdevice() and
200 * unregister_netdevice(), which must be called with the rtnl
203 DEFINE_RWLOCK(dev_base_lock
);
204 EXPORT_SYMBOL(dev_base_lock
);
206 static inline void dev_base_seq_inc(struct net
*net
)
208 while (++net
->dev_base_seq
== 0);
211 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
213 unsigned int hash
= full_name_hash(name
, strnlen(name
, IFNAMSIZ
));
215 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
218 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
220 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
223 static inline void rps_lock(struct softnet_data
*sd
)
226 spin_lock(&sd
->input_pkt_queue
.lock
);
230 static inline void rps_unlock(struct softnet_data
*sd
)
233 spin_unlock(&sd
->input_pkt_queue
.lock
);
237 /* Device list insertion */
238 static int list_netdevice(struct net_device
*dev
)
240 struct net
*net
= dev_net(dev
);
244 write_lock_bh(&dev_base_lock
);
245 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
246 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
247 hlist_add_head_rcu(&dev
->index_hlist
,
248 dev_index_hash(net
, dev
->ifindex
));
249 write_unlock_bh(&dev_base_lock
);
251 dev_base_seq_inc(net
);
256 /* Device list removal
257 * caller must respect a RCU grace period before freeing/reusing dev
259 static void unlist_netdevice(struct net_device
*dev
)
263 /* Unlink dev from the device chain */
264 write_lock_bh(&dev_base_lock
);
265 list_del_rcu(&dev
->dev_list
);
266 hlist_del_rcu(&dev
->name_hlist
);
267 hlist_del_rcu(&dev
->index_hlist
);
268 write_unlock_bh(&dev_base_lock
);
270 dev_base_seq_inc(dev_net(dev
));
277 static RAW_NOTIFIER_HEAD(netdev_chain
);
280 * Device drivers call our routines to queue packets here. We empty the
281 * queue in the local softnet handler.
284 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
285 EXPORT_PER_CPU_SYMBOL(softnet_data
);
287 #ifdef CONFIG_LOCKDEP
289 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
290 * according to dev->type
292 static const unsigned short netdev_lock_type
[] =
293 {ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
294 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
295 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
296 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
297 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
298 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
299 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
300 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
301 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
302 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
303 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
304 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
305 ARPHRD_FCFABRIC
, ARPHRD_IEEE80211
, ARPHRD_IEEE80211_PRISM
,
306 ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
, ARPHRD_PHONET_PIPE
,
307 ARPHRD_IEEE802154
, ARPHRD_VOID
, ARPHRD_NONE
};
309 static const char *const netdev_lock_name
[] =
310 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
311 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
312 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
313 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
314 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
315 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
316 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
317 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
318 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
319 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
320 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
321 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
322 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
323 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
324 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
326 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
327 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
329 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
333 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
334 if (netdev_lock_type
[i
] == dev_type
)
336 /* the last key is used by default */
337 return ARRAY_SIZE(netdev_lock_type
) - 1;
340 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
341 unsigned short dev_type
)
345 i
= netdev_lock_pos(dev_type
);
346 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
347 netdev_lock_name
[i
]);
350 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
354 i
= netdev_lock_pos(dev
->type
);
355 lockdep_set_class_and_name(&dev
->addr_list_lock
,
356 &netdev_addr_lock_key
[i
],
357 netdev_lock_name
[i
]);
360 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
361 unsigned short dev_type
)
364 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
369 /*******************************************************************************
371 Protocol management and registration routines
373 *******************************************************************************/
376 * Add a protocol ID to the list. Now that the input handler is
377 * smarter we can dispense with all the messy stuff that used to be
380 * BEWARE!!! Protocol handlers, mangling input packets,
381 * MUST BE last in hash buckets and checking protocol handlers
382 * MUST start from promiscuous ptype_all chain in net_bh.
383 * It is true now, do not change it.
384 * Explanation follows: if protocol handler, mangling packet, will
385 * be the first on list, it is not able to sense, that packet
386 * is cloned and should be copied-on-write, so that it will
387 * change it and subsequent readers will get broken packet.
391 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
393 if (pt
->type
== htons(ETH_P_ALL
))
396 return &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
400 * dev_add_pack - add packet handler
401 * @pt: packet type declaration
403 * Add a protocol handler to the networking stack. The passed &packet_type
404 * is linked into kernel lists and may not be freed until it has been
405 * removed from the kernel lists.
407 * This call does not sleep therefore it can not
408 * guarantee all CPU's that are in middle of receiving packets
409 * will see the new packet type (until the next received packet).
412 void dev_add_pack(struct packet_type
*pt
)
414 struct list_head
*head
= ptype_head(pt
);
416 spin_lock(&ptype_lock
);
417 list_add_rcu(&pt
->list
, head
);
418 spin_unlock(&ptype_lock
);
420 EXPORT_SYMBOL(dev_add_pack
);
423 * __dev_remove_pack - remove packet handler
424 * @pt: packet type declaration
426 * Remove a protocol handler that was previously added to the kernel
427 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
428 * from the kernel lists and can be freed or reused once this function
431 * The packet type might still be in use by receivers
432 * and must not be freed until after all the CPU's have gone
433 * through a quiescent state.
435 void __dev_remove_pack(struct packet_type
*pt
)
437 struct list_head
*head
= ptype_head(pt
);
438 struct packet_type
*pt1
;
440 spin_lock(&ptype_lock
);
442 list_for_each_entry(pt1
, head
, list
) {
444 list_del_rcu(&pt
->list
);
449 pr_warn("dev_remove_pack: %p not found\n", pt
);
451 spin_unlock(&ptype_lock
);
453 EXPORT_SYMBOL(__dev_remove_pack
);
456 * dev_remove_pack - remove packet handler
457 * @pt: packet type declaration
459 * Remove a protocol handler that was previously added to the kernel
460 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
461 * from the kernel lists and can be freed or reused once this function
464 * This call sleeps to guarantee that no CPU is looking at the packet
467 void dev_remove_pack(struct packet_type
*pt
)
469 __dev_remove_pack(pt
);
473 EXPORT_SYMBOL(dev_remove_pack
);
477 * dev_add_offload - register offload handlers
478 * @po: protocol offload declaration
480 * Add protocol offload handlers to the networking stack. The passed
481 * &proto_offload is linked into kernel lists and may not be freed until
482 * it has been removed from the kernel lists.
484 * This call does not sleep therefore it can not
485 * guarantee all CPU's that are in middle of receiving packets
486 * will see the new offload handlers (until the next received packet).
488 void dev_add_offload(struct packet_offload
*po
)
490 struct list_head
*head
= &offload_base
;
492 spin_lock(&offload_lock
);
493 list_add_rcu(&po
->list
, head
);
494 spin_unlock(&offload_lock
);
496 EXPORT_SYMBOL(dev_add_offload
);
499 * __dev_remove_offload - remove offload handler
500 * @po: packet offload declaration
502 * Remove a protocol offload handler that was previously added to the
503 * kernel offload handlers by dev_add_offload(). The passed &offload_type
504 * is removed from the kernel lists and can be freed or reused once this
507 * The packet type might still be in use by receivers
508 * and must not be freed until after all the CPU's have gone
509 * through a quiescent state.
511 void __dev_remove_offload(struct packet_offload
*po
)
513 struct list_head
*head
= &offload_base
;
514 struct packet_offload
*po1
;
516 spin_lock(&offload_lock
);
518 list_for_each_entry(po1
, head
, list
) {
520 list_del_rcu(&po
->list
);
525 pr_warn("dev_remove_offload: %p not found\n", po
);
527 spin_unlock(&offload_lock
);
529 EXPORT_SYMBOL(__dev_remove_offload
);
532 * dev_remove_offload - remove packet offload handler
533 * @po: packet offload declaration
535 * Remove a packet offload handler that was previously added to the kernel
536 * offload handlers by dev_add_offload(). The passed &offload_type is
537 * removed from the kernel lists and can be freed or reused once this
540 * This call sleeps to guarantee that no CPU is looking at the packet
543 void dev_remove_offload(struct packet_offload
*po
)
545 __dev_remove_offload(po
);
549 EXPORT_SYMBOL(dev_remove_offload
);
551 /******************************************************************************
553 Device Boot-time Settings Routines
555 *******************************************************************************/
557 /* Boot time configuration table */
558 static struct netdev_boot_setup dev_boot_setup
[NETDEV_BOOT_SETUP_MAX
];
561 * netdev_boot_setup_add - add new setup entry
562 * @name: name of the device
563 * @map: configured settings for the device
565 * Adds new setup entry to the dev_boot_setup list. The function
566 * returns 0 on error and 1 on success. This is a generic routine to
569 static int netdev_boot_setup_add(char *name
, struct ifmap
*map
)
571 struct netdev_boot_setup
*s
;
575 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
576 if (s
[i
].name
[0] == '\0' || s
[i
].name
[0] == ' ') {
577 memset(s
[i
].name
, 0, sizeof(s
[i
].name
));
578 strlcpy(s
[i
].name
, name
, IFNAMSIZ
);
579 memcpy(&s
[i
].map
, map
, sizeof(s
[i
].map
));
584 return i
>= NETDEV_BOOT_SETUP_MAX
? 0 : 1;
588 * netdev_boot_setup_check - check boot time settings
589 * @dev: the netdevice
591 * Check boot time settings for the device.
592 * The found settings are set for the device to be used
593 * later in the device probing.
594 * Returns 0 if no settings found, 1 if they are.
596 int netdev_boot_setup_check(struct net_device
*dev
)
598 struct netdev_boot_setup
*s
= dev_boot_setup
;
601 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
602 if (s
[i
].name
[0] != '\0' && s
[i
].name
[0] != ' ' &&
603 !strcmp(dev
->name
, s
[i
].name
)) {
604 dev
->irq
= s
[i
].map
.irq
;
605 dev
->base_addr
= s
[i
].map
.base_addr
;
606 dev
->mem_start
= s
[i
].map
.mem_start
;
607 dev
->mem_end
= s
[i
].map
.mem_end
;
613 EXPORT_SYMBOL(netdev_boot_setup_check
);
617 * netdev_boot_base - get address from boot time settings
618 * @prefix: prefix for network device
619 * @unit: id for network device
621 * Check boot time settings for the base address of device.
622 * The found settings are set for the device to be used
623 * later in the device probing.
624 * Returns 0 if no settings found.
626 unsigned long netdev_boot_base(const char *prefix
, int unit
)
628 const struct netdev_boot_setup
*s
= dev_boot_setup
;
632 sprintf(name
, "%s%d", prefix
, unit
);
635 * If device already registered then return base of 1
636 * to indicate not to probe for this interface
638 if (__dev_get_by_name(&init_net
, name
))
641 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++)
642 if (!strcmp(name
, s
[i
].name
))
643 return s
[i
].map
.base_addr
;
648 * Saves at boot time configured settings for any netdevice.
650 int __init
netdev_boot_setup(char *str
)
655 str
= get_options(str
, ARRAY_SIZE(ints
), ints
);
660 memset(&map
, 0, sizeof(map
));
664 map
.base_addr
= ints
[2];
666 map
.mem_start
= ints
[3];
668 map
.mem_end
= ints
[4];
670 /* Add new entry to the list */
671 return netdev_boot_setup_add(str
, &map
);
674 __setup("netdev=", netdev_boot_setup
);
676 /*******************************************************************************
678 Device Interface Subroutines
680 *******************************************************************************/
683 * __dev_get_by_name - find a device by its name
684 * @net: the applicable net namespace
685 * @name: name to find
687 * Find an interface by name. Must be called under RTNL semaphore
688 * or @dev_base_lock. If the name is found a pointer to the device
689 * is returned. If the name is not found then %NULL is returned. The
690 * reference counters are not incremented so the caller must be
691 * careful with locks.
694 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
696 struct hlist_node
*p
;
697 struct net_device
*dev
;
698 struct hlist_head
*head
= dev_name_hash(net
, name
);
700 hlist_for_each_entry(dev
, p
, head
, name_hlist
)
701 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
706 EXPORT_SYMBOL(__dev_get_by_name
);
709 * dev_get_by_name_rcu - find a device by its name
710 * @net: the applicable net namespace
711 * @name: name to find
713 * Find an interface by name.
714 * If the name is found a pointer to the device is returned.
715 * If the name is not found then %NULL is returned.
716 * The reference counters are not incremented so the caller must be
717 * careful with locks. The caller must hold RCU lock.
720 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
722 struct hlist_node
*p
;
723 struct net_device
*dev
;
724 struct hlist_head
*head
= dev_name_hash(net
, name
);
726 hlist_for_each_entry_rcu(dev
, p
, head
, name_hlist
)
727 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
732 EXPORT_SYMBOL(dev_get_by_name_rcu
);
735 * dev_get_by_name - find a device by its name
736 * @net: the applicable net namespace
737 * @name: name to find
739 * Find an interface by name. This can be called from any
740 * context and does its own locking. The returned handle has
741 * the usage count incremented and the caller must use dev_put() to
742 * release it when it is no longer needed. %NULL is returned if no
743 * matching device is found.
746 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
748 struct net_device
*dev
;
751 dev
= dev_get_by_name_rcu(net
, name
);
757 EXPORT_SYMBOL(dev_get_by_name
);
760 * __dev_get_by_index - find a device by its ifindex
761 * @net: the applicable net namespace
762 * @ifindex: index of device
764 * Search for an interface by index. Returns %NULL if the device
765 * is not found or a pointer to the device. The device has not
766 * had its reference counter increased so the caller must be careful
767 * about locking. The caller must hold either the RTNL semaphore
771 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
773 struct hlist_node
*p
;
774 struct net_device
*dev
;
775 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
777 hlist_for_each_entry(dev
, p
, head
, index_hlist
)
778 if (dev
->ifindex
== ifindex
)
783 EXPORT_SYMBOL(__dev_get_by_index
);
786 * dev_get_by_index_rcu - find a device by its ifindex
787 * @net: the applicable net namespace
788 * @ifindex: index of device
790 * Search for an interface by index. Returns %NULL if the device
791 * is not found or a pointer to the device. The device has not
792 * had its reference counter increased so the caller must be careful
793 * about locking. The caller must hold RCU lock.
796 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
798 struct hlist_node
*p
;
799 struct net_device
*dev
;
800 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
802 hlist_for_each_entry_rcu(dev
, p
, head
, index_hlist
)
803 if (dev
->ifindex
== ifindex
)
808 EXPORT_SYMBOL(dev_get_by_index_rcu
);
812 * dev_get_by_index - find a device by its ifindex
813 * @net: the applicable net namespace
814 * @ifindex: index of device
816 * Search for an interface by index. Returns NULL if the device
817 * is not found or a pointer to the device. The device returned has
818 * had a reference added and the pointer is safe until the user calls
819 * dev_put to indicate they have finished with it.
822 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
824 struct net_device
*dev
;
827 dev
= dev_get_by_index_rcu(net
, ifindex
);
833 EXPORT_SYMBOL(dev_get_by_index
);
836 * dev_getbyhwaddr_rcu - find a device by its hardware address
837 * @net: the applicable net namespace
838 * @type: media type of device
839 * @ha: hardware address
841 * Search for an interface by MAC address. Returns NULL if the device
842 * is not found or a pointer to the device.
843 * The caller must hold RCU or RTNL.
844 * The returned device has not had its ref count increased
845 * and the caller must therefore be careful about locking
849 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
852 struct net_device
*dev
;
854 for_each_netdev_rcu(net
, dev
)
855 if (dev
->type
== type
&&
856 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
861 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
863 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
865 struct net_device
*dev
;
868 for_each_netdev(net
, dev
)
869 if (dev
->type
== type
)
874 EXPORT_SYMBOL(__dev_getfirstbyhwtype
);
876 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
878 struct net_device
*dev
, *ret
= NULL
;
881 for_each_netdev_rcu(net
, dev
)
882 if (dev
->type
== type
) {
890 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
893 * dev_get_by_flags_rcu - find any device with given flags
894 * @net: the applicable net namespace
895 * @if_flags: IFF_* values
896 * @mask: bitmask of bits in if_flags to check
898 * Search for any interface with the given flags. Returns NULL if a device
899 * is not found or a pointer to the device. Must be called inside
900 * rcu_read_lock(), and result refcount is unchanged.
903 struct net_device
*dev_get_by_flags_rcu(struct net
*net
, unsigned short if_flags
,
906 struct net_device
*dev
, *ret
;
909 for_each_netdev_rcu(net
, dev
) {
910 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
917 EXPORT_SYMBOL(dev_get_by_flags_rcu
);
920 * dev_valid_name - check if name is okay for network device
923 * Network device names need to be valid file names to
924 * to allow sysfs to work. We also disallow any kind of
927 bool dev_valid_name(const char *name
)
931 if (strlen(name
) >= IFNAMSIZ
)
933 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
937 if (*name
== '/' || isspace(*name
))
943 EXPORT_SYMBOL(dev_valid_name
);
946 * __dev_alloc_name - allocate a name for a device
947 * @net: network namespace to allocate the device name in
948 * @name: name format string
949 * @buf: scratch buffer and result name string
951 * Passed a format string - eg "lt%d" it will try and find a suitable
952 * id. It scans list of devices to build up a free map, then chooses
953 * the first empty slot. The caller must hold the dev_base or rtnl lock
954 * while allocating the name and adding the device in order to avoid
956 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
957 * Returns the number of the unit assigned or a negative errno code.
960 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
964 const int max_netdevices
= 8*PAGE_SIZE
;
965 unsigned long *inuse
;
966 struct net_device
*d
;
968 p
= strnchr(name
, IFNAMSIZ
-1, '%');
971 * Verify the string as this thing may have come from
972 * the user. There must be either one "%d" and no other "%"
975 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
978 /* Use one page as a bit array of possible slots */
979 inuse
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
983 for_each_netdev(net
, d
) {
984 if (!sscanf(d
->name
, name
, &i
))
986 if (i
< 0 || i
>= max_netdevices
)
989 /* avoid cases where sscanf is not exact inverse of printf */
990 snprintf(buf
, IFNAMSIZ
, name
, i
);
991 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
995 i
= find_first_zero_bit(inuse
, max_netdevices
);
996 free_page((unsigned long) inuse
);
1000 snprintf(buf
, IFNAMSIZ
, name
, i
);
1001 if (!__dev_get_by_name(net
, buf
))
1004 /* It is possible to run out of possible slots
1005 * when the name is long and there isn't enough space left
1006 * for the digits, or if all bits are used.
1012 * dev_alloc_name - allocate a name for a device
1014 * @name: name format string
1016 * Passed a format string - eg "lt%d" it will try and find a suitable
1017 * id. It scans list of devices to build up a free map, then chooses
1018 * the first empty slot. The caller must hold the dev_base or rtnl lock
1019 * while allocating the name and adding the device in order to avoid
1021 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1022 * Returns the number of the unit assigned or a negative errno code.
1025 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1031 BUG_ON(!dev_net(dev
));
1033 ret
= __dev_alloc_name(net
, name
, buf
);
1035 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1038 EXPORT_SYMBOL(dev_alloc_name
);
1040 static int dev_alloc_name_ns(struct net
*net
,
1041 struct net_device
*dev
,
1047 ret
= __dev_alloc_name(net
, name
, buf
);
1049 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1053 static int dev_get_valid_name(struct net
*net
,
1054 struct net_device
*dev
,
1059 if (!dev_valid_name(name
))
1062 if (strchr(name
, '%'))
1063 return dev_alloc_name_ns(net
, dev
, name
);
1064 else if (__dev_get_by_name(net
, name
))
1066 else if (dev
->name
!= name
)
1067 strlcpy(dev
->name
, name
, IFNAMSIZ
);
1073 * dev_change_name - change name of a device
1075 * @newname: name (or format string) must be at least IFNAMSIZ
1077 * Change name of a device, can pass format strings "eth%d".
1080 int dev_change_name(struct net_device
*dev
, const char *newname
)
1082 char oldname
[IFNAMSIZ
];
1088 BUG_ON(!dev_net(dev
));
1091 if (dev
->flags
& IFF_UP
)
1094 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0)
1097 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1099 err
= dev_get_valid_name(net
, dev
, newname
);
1104 ret
= device_rename(&dev
->dev
, dev
->name
);
1106 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1110 write_lock_bh(&dev_base_lock
);
1111 hlist_del_rcu(&dev
->name_hlist
);
1112 write_unlock_bh(&dev_base_lock
);
1116 write_lock_bh(&dev_base_lock
);
1117 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
1118 write_unlock_bh(&dev_base_lock
);
1120 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1121 ret
= notifier_to_errno(ret
);
1124 /* err >= 0 after dev_alloc_name() or stores the first errno */
1127 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1130 pr_err("%s: name change rollback failed: %d\n",
1139 * dev_set_alias - change ifalias of a device
1141 * @alias: name up to IFALIASZ
1142 * @len: limit of bytes to copy from info
1144 * Set ifalias for a device,
1146 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1152 if (len
>= IFALIASZ
)
1156 kfree(dev
->ifalias
);
1157 dev
->ifalias
= NULL
;
1161 new_ifalias
= krealloc(dev
->ifalias
, len
+ 1, GFP_KERNEL
);
1164 dev
->ifalias
= new_ifalias
;
1166 strlcpy(dev
->ifalias
, alias
, len
+1);
1172 * netdev_features_change - device changes features
1173 * @dev: device to cause notification
1175 * Called to indicate a device has changed features.
1177 void netdev_features_change(struct net_device
*dev
)
1179 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1181 EXPORT_SYMBOL(netdev_features_change
);
1184 * netdev_state_change - device changes state
1185 * @dev: device to cause notification
1187 * Called to indicate a device has changed state. This function calls
1188 * the notifier chains for netdev_chain and sends a NEWLINK message
1189 * to the routing socket.
1191 void netdev_state_change(struct net_device
*dev
)
1193 if (dev
->flags
& IFF_UP
) {
1194 call_netdevice_notifiers(NETDEV_CHANGE
, dev
);
1195 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0);
1198 EXPORT_SYMBOL(netdev_state_change
);
1201 * netdev_notify_peers - notify network peers about existence of @dev
1202 * @dev: network device
1204 * Generate traffic such that interested network peers are aware of
1205 * @dev, such as by generating a gratuitous ARP. This may be used when
1206 * a device wants to inform the rest of the network about some sort of
1207 * reconfiguration such as a failover event or virtual machine
1210 void netdev_notify_peers(struct net_device
*dev
)
1213 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1216 EXPORT_SYMBOL(netdev_notify_peers
);
1219 * dev_load - load a network module
1220 * @net: the applicable net namespace
1221 * @name: name of interface
1223 * If a network interface is not present and the process has suitable
1224 * privileges this function loads the module. If module loading is not
1225 * available in this kernel then it becomes a nop.
1228 void dev_load(struct net
*net
, const char *name
)
1230 struct net_device
*dev
;
1234 dev
= dev_get_by_name_rcu(net
, name
);
1238 if (no_module
&& capable(CAP_NET_ADMIN
))
1239 no_module
= request_module("netdev-%s", name
);
1240 if (no_module
&& capable(CAP_SYS_MODULE
)) {
1241 if (!request_module("%s", name
))
1242 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1246 EXPORT_SYMBOL(dev_load
);
1248 static int __dev_open(struct net_device
*dev
)
1250 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1255 if (!netif_device_present(dev
))
1258 ret
= call_netdevice_notifiers(NETDEV_PRE_UP
, dev
);
1259 ret
= notifier_to_errno(ret
);
1263 set_bit(__LINK_STATE_START
, &dev
->state
);
1265 if (ops
->ndo_validate_addr
)
1266 ret
= ops
->ndo_validate_addr(dev
);
1268 if (!ret
&& ops
->ndo_open
)
1269 ret
= ops
->ndo_open(dev
);
1272 clear_bit(__LINK_STATE_START
, &dev
->state
);
1274 dev
->flags
|= IFF_UP
;
1275 net_dmaengine_get();
1276 dev_set_rx_mode(dev
);
1278 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1285 * dev_open - prepare an interface for use.
1286 * @dev: device to open
1288 * Takes a device from down to up state. The device's private open
1289 * function is invoked and then the multicast lists are loaded. Finally
1290 * the device is moved into the up state and a %NETDEV_UP message is
1291 * sent to the netdev notifier chain.
1293 * Calling this function on an active interface is a nop. On a failure
1294 * a negative errno code is returned.
1296 int dev_open(struct net_device
*dev
)
1300 if (dev
->flags
& IFF_UP
)
1303 ret
= __dev_open(dev
);
1307 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
);
1308 call_netdevice_notifiers(NETDEV_UP
, dev
);
1312 EXPORT_SYMBOL(dev_open
);
1314 static int __dev_close_many(struct list_head
*head
)
1316 struct net_device
*dev
;
1321 list_for_each_entry(dev
, head
, unreg_list
) {
1322 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1324 clear_bit(__LINK_STATE_START
, &dev
->state
);
1326 /* Synchronize to scheduled poll. We cannot touch poll list, it
1327 * can be even on different cpu. So just clear netif_running().
1329 * dev->stop() will invoke napi_disable() on all of it's
1330 * napi_struct instances on this device.
1332 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1335 dev_deactivate_many(head
);
1337 list_for_each_entry(dev
, head
, unreg_list
) {
1338 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1341 * Call the device specific close. This cannot fail.
1342 * Only if device is UP
1344 * We allow it to be called even after a DETACH hot-plug
1350 dev
->flags
&= ~IFF_UP
;
1351 net_dmaengine_put();
1357 static int __dev_close(struct net_device
*dev
)
1362 list_add(&dev
->unreg_list
, &single
);
1363 retval
= __dev_close_many(&single
);
1368 static int dev_close_many(struct list_head
*head
)
1370 struct net_device
*dev
, *tmp
;
1371 LIST_HEAD(tmp_list
);
1373 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
)
1374 if (!(dev
->flags
& IFF_UP
))
1375 list_move(&dev
->unreg_list
, &tmp_list
);
1377 __dev_close_many(head
);
1379 list_for_each_entry(dev
, head
, unreg_list
) {
1380 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
);
1381 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1384 /* rollback_registered_many needs the complete original list */
1385 list_splice(&tmp_list
, head
);
1390 * dev_close - shutdown an interface.
1391 * @dev: device to shutdown
1393 * This function moves an active device into down state. A
1394 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1395 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1398 int dev_close(struct net_device
*dev
)
1400 if (dev
->flags
& IFF_UP
) {
1403 list_add(&dev
->unreg_list
, &single
);
1404 dev_close_many(&single
);
1409 EXPORT_SYMBOL(dev_close
);
1413 * dev_disable_lro - disable Large Receive Offload on a device
1416 * Disable Large Receive Offload (LRO) on a net device. Must be
1417 * called under RTNL. This is needed if received packets may be
1418 * forwarded to another interface.
1420 void dev_disable_lro(struct net_device
*dev
)
1423 * If we're trying to disable lro on a vlan device
1424 * use the underlying physical device instead
1426 if (is_vlan_dev(dev
))
1427 dev
= vlan_dev_real_dev(dev
);
1429 dev
->wanted_features
&= ~NETIF_F_LRO
;
1430 netdev_update_features(dev
);
1432 if (unlikely(dev
->features
& NETIF_F_LRO
))
1433 netdev_WARN(dev
, "failed to disable LRO!\n");
1435 EXPORT_SYMBOL(dev_disable_lro
);
1438 static int dev_boot_phase
= 1;
1441 * register_netdevice_notifier - register a network notifier block
1444 * Register a notifier to be called when network device events occur.
1445 * The notifier passed is linked into the kernel structures and must
1446 * not be reused until it has been unregistered. A negative errno code
1447 * is returned on a failure.
1449 * When registered all registration and up events are replayed
1450 * to the new notifier to allow device to have a race free
1451 * view of the network device list.
1454 int register_netdevice_notifier(struct notifier_block
*nb
)
1456 struct net_device
*dev
;
1457 struct net_device
*last
;
1462 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1468 for_each_netdev(net
, dev
) {
1469 err
= nb
->notifier_call(nb
, NETDEV_REGISTER
, dev
);
1470 err
= notifier_to_errno(err
);
1474 if (!(dev
->flags
& IFF_UP
))
1477 nb
->notifier_call(nb
, NETDEV_UP
, dev
);
1488 for_each_netdev(net
, dev
) {
1492 if (dev
->flags
& IFF_UP
) {
1493 nb
->notifier_call(nb
, NETDEV_GOING_DOWN
, dev
);
1494 nb
->notifier_call(nb
, NETDEV_DOWN
, dev
);
1496 nb
->notifier_call(nb
, NETDEV_UNREGISTER
, dev
);
1501 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1504 EXPORT_SYMBOL(register_netdevice_notifier
);
1507 * unregister_netdevice_notifier - unregister a network notifier block
1510 * Unregister a notifier previously registered by
1511 * register_netdevice_notifier(). The notifier is unlinked into the
1512 * kernel structures and may then be reused. A negative errno code
1513 * is returned on a failure.
1515 * After unregistering unregister and down device events are synthesized
1516 * for all devices on the device list to the removed notifier to remove
1517 * the need for special case cleanup code.
1520 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1522 struct net_device
*dev
;
1527 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1532 for_each_netdev(net
, dev
) {
1533 if (dev
->flags
& IFF_UP
) {
1534 nb
->notifier_call(nb
, NETDEV_GOING_DOWN
, dev
);
1535 nb
->notifier_call(nb
, NETDEV_DOWN
, dev
);
1537 nb
->notifier_call(nb
, NETDEV_UNREGISTER
, dev
);
1544 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1547 * call_netdevice_notifiers - call all network notifier blocks
1548 * @val: value passed unmodified to notifier function
1549 * @dev: net_device pointer passed unmodified to notifier function
1551 * Call all network notifier blocks. Parameters and return value
1552 * are as for raw_notifier_call_chain().
1555 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
1558 return raw_notifier_call_chain(&netdev_chain
, val
, dev
);
1560 EXPORT_SYMBOL(call_netdevice_notifiers
);
1562 static struct static_key netstamp_needed __read_mostly
;
1563 #ifdef HAVE_JUMP_LABEL
1564 /* We are not allowed to call static_key_slow_dec() from irq context
1565 * If net_disable_timestamp() is called from irq context, defer the
1566 * static_key_slow_dec() calls.
1568 static atomic_t netstamp_needed_deferred
;
1571 void net_enable_timestamp(void)
1573 #ifdef HAVE_JUMP_LABEL
1574 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
1578 static_key_slow_dec(&netstamp_needed
);
1582 WARN_ON(in_interrupt());
1583 static_key_slow_inc(&netstamp_needed
);
1585 EXPORT_SYMBOL(net_enable_timestamp
);
1587 void net_disable_timestamp(void)
1589 #ifdef HAVE_JUMP_LABEL
1590 if (in_interrupt()) {
1591 atomic_inc(&netstamp_needed_deferred
);
1595 static_key_slow_dec(&netstamp_needed
);
1597 EXPORT_SYMBOL(net_disable_timestamp
);
1599 static inline void net_timestamp_set(struct sk_buff
*skb
)
1601 skb
->tstamp
.tv64
= 0;
1602 if (static_key_false(&netstamp_needed
))
1603 __net_timestamp(skb
);
1606 #define net_timestamp_check(COND, SKB) \
1607 if (static_key_false(&netstamp_needed)) { \
1608 if ((COND) && !(SKB)->tstamp.tv64) \
1609 __net_timestamp(SKB); \
1612 static int net_hwtstamp_validate(struct ifreq *ifr)
1614 struct hwtstamp_config cfg
;
1615 enum hwtstamp_tx_types tx_type
;
1616 enum hwtstamp_rx_filters rx_filter
;
1617 int tx_type_valid
= 0;
1618 int rx_filter_valid
= 0;
1620 if (copy_from_user(&cfg
, ifr
->ifr_data
, sizeof(cfg
)))
1623 if (cfg
.flags
) /* reserved for future extensions */
1626 tx_type
= cfg
.tx_type
;
1627 rx_filter
= cfg
.rx_filter
;
1630 case HWTSTAMP_TX_OFF
:
1631 case HWTSTAMP_TX_ON
:
1632 case HWTSTAMP_TX_ONESTEP_SYNC
:
1637 switch (rx_filter
) {
1638 case HWTSTAMP_FILTER_NONE
:
1639 case HWTSTAMP_FILTER_ALL
:
1640 case HWTSTAMP_FILTER_SOME
:
1641 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
1642 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
1643 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
1644 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
1645 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
1646 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
1647 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
1648 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
1649 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
1650 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
1651 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
1652 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
1653 rx_filter_valid
= 1;
1657 if (!tx_type_valid
|| !rx_filter_valid
)
1663 static inline bool is_skb_forwardable(struct net_device
*dev
,
1664 struct sk_buff
*skb
)
1668 if (!(dev
->flags
& IFF_UP
))
1671 len
= dev
->mtu
+ dev
->hard_header_len
+ VLAN_HLEN
;
1672 if (skb
->len
<= len
)
1675 /* if TSO is enabled, we don't care about the length as the packet
1676 * could be forwarded without being segmented before
1678 if (skb_is_gso(skb
))
1685 * dev_forward_skb - loopback an skb to another netif
1687 * @dev: destination network device
1688 * @skb: buffer to forward
1691 * NET_RX_SUCCESS (no congestion)
1692 * NET_RX_DROP (packet was dropped, but freed)
1694 * dev_forward_skb can be used for injecting an skb from the
1695 * start_xmit function of one device into the receive queue
1696 * of another device.
1698 * The receiving device may be in another namespace, so
1699 * we have to clear all information in the skb that could
1700 * impact namespace isolation.
1702 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1704 if (skb_shinfo(skb
)->tx_flags
& SKBTX_DEV_ZEROCOPY
) {
1705 if (skb_copy_ubufs(skb
, GFP_ATOMIC
)) {
1706 atomic_long_inc(&dev
->rx_dropped
);
1715 if (unlikely(!is_skb_forwardable(dev
, skb
))) {
1716 atomic_long_inc(&dev
->rx_dropped
);
1723 skb
->tstamp
.tv64
= 0;
1724 skb
->pkt_type
= PACKET_HOST
;
1725 skb
->protocol
= eth_type_trans(skb
, dev
);
1729 return netif_rx(skb
);
1731 EXPORT_SYMBOL_GPL(dev_forward_skb
);
1733 static inline int deliver_skb(struct sk_buff
*skb
,
1734 struct packet_type
*pt_prev
,
1735 struct net_device
*orig_dev
)
1737 if (unlikely(skb_orphan_frags(skb
, GFP_ATOMIC
)))
1739 atomic_inc(&skb
->users
);
1740 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
1743 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
1745 if (!ptype
->af_packet_priv
|| !skb
->sk
)
1748 if (ptype
->id_match
)
1749 return ptype
->id_match(ptype
, skb
->sk
);
1750 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
1757 * Support routine. Sends outgoing frames to any network
1758 * taps currently in use.
1761 static void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
1763 struct packet_type
*ptype
;
1764 struct sk_buff
*skb2
= NULL
;
1765 struct packet_type
*pt_prev
= NULL
;
1768 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
1769 /* Never send packets back to the socket
1770 * they originated from - MvS (miquels@drinkel.ow.org)
1772 if ((ptype
->dev
== dev
|| !ptype
->dev
) &&
1773 (!skb_loop_sk(ptype
, skb
))) {
1775 deliver_skb(skb2
, pt_prev
, skb
->dev
);
1780 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1784 net_timestamp_set(skb2
);
1786 /* skb->nh should be correctly
1787 set by sender, so that the second statement is
1788 just protection against buggy protocols.
1790 skb_reset_mac_header(skb2
);
1792 if (skb_network_header(skb2
) < skb2
->data
||
1793 skb2
->network_header
> skb2
->tail
) {
1794 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1795 ntohs(skb2
->protocol
),
1797 skb_reset_network_header(skb2
);
1800 skb2
->transport_header
= skb2
->network_header
;
1801 skb2
->pkt_type
= PACKET_OUTGOING
;
1806 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
1811 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1812 * @dev: Network device
1813 * @txq: number of queues available
1815 * If real_num_tx_queues is changed the tc mappings may no longer be
1816 * valid. To resolve this verify the tc mapping remains valid and if
1817 * not NULL the mapping. With no priorities mapping to this
1818 * offset/count pair it will no longer be used. In the worst case TC0
1819 * is invalid nothing can be done so disable priority mappings. If is
1820 * expected that drivers will fix this mapping if they can before
1821 * calling netif_set_real_num_tx_queues.
1823 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
1826 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
1828 /* If TC0 is invalidated disable TC mapping */
1829 if (tc
->offset
+ tc
->count
> txq
) {
1830 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1835 /* Invalidated prio to tc mappings set to TC0 */
1836 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
1837 int q
= netdev_get_prio_tc_map(dev
, i
);
1839 tc
= &dev
->tc_to_txq
[q
];
1840 if (tc
->offset
+ tc
->count
> txq
) {
1841 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1843 netdev_set_prio_tc_map(dev
, i
, 0);
1849 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1850 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1852 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
1856 if (txq
< 1 || txq
> dev
->num_tx_queues
)
1859 if (dev
->reg_state
== NETREG_REGISTERED
||
1860 dev
->reg_state
== NETREG_UNREGISTERING
) {
1863 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
1869 netif_setup_tc(dev
, txq
);
1871 if (txq
< dev
->real_num_tx_queues
)
1872 qdisc_reset_all_tx_gt(dev
, txq
);
1875 dev
->real_num_tx_queues
= txq
;
1878 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
1882 * netif_set_real_num_rx_queues - set actual number of RX queues used
1883 * @dev: Network device
1884 * @rxq: Actual number of RX queues
1886 * This must be called either with the rtnl_lock held or before
1887 * registration of the net device. Returns 0 on success, or a
1888 * negative error code. If called before registration, it always
1891 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
1895 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
1898 if (dev
->reg_state
== NETREG_REGISTERED
) {
1901 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
1907 dev
->real_num_rx_queues
= rxq
;
1910 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
1914 * netif_get_num_default_rss_queues - default number of RSS queues
1916 * This routine should set an upper limit on the number of RSS queues
1917 * used by default by multiqueue devices.
1919 int netif_get_num_default_rss_queues(void)
1921 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES
, num_online_cpus());
1923 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
1925 static inline void __netif_reschedule(struct Qdisc
*q
)
1927 struct softnet_data
*sd
;
1928 unsigned long flags
;
1930 local_irq_save(flags
);
1931 sd
= &__get_cpu_var(softnet_data
);
1932 q
->next_sched
= NULL
;
1933 *sd
->output_queue_tailp
= q
;
1934 sd
->output_queue_tailp
= &q
->next_sched
;
1935 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
1936 local_irq_restore(flags
);
1939 void __netif_schedule(struct Qdisc
*q
)
1941 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
1942 __netif_reschedule(q
);
1944 EXPORT_SYMBOL(__netif_schedule
);
1946 void dev_kfree_skb_irq(struct sk_buff
*skb
)
1948 if (atomic_dec_and_test(&skb
->users
)) {
1949 struct softnet_data
*sd
;
1950 unsigned long flags
;
1952 local_irq_save(flags
);
1953 sd
= &__get_cpu_var(softnet_data
);
1954 skb
->next
= sd
->completion_queue
;
1955 sd
->completion_queue
= skb
;
1956 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
1957 local_irq_restore(flags
);
1960 EXPORT_SYMBOL(dev_kfree_skb_irq
);
1962 void dev_kfree_skb_any(struct sk_buff
*skb
)
1964 if (in_irq() || irqs_disabled())
1965 dev_kfree_skb_irq(skb
);
1969 EXPORT_SYMBOL(dev_kfree_skb_any
);
1973 * netif_device_detach - mark device as removed
1974 * @dev: network device
1976 * Mark device as removed from system and therefore no longer available.
1978 void netif_device_detach(struct net_device
*dev
)
1980 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
1981 netif_running(dev
)) {
1982 netif_tx_stop_all_queues(dev
);
1985 EXPORT_SYMBOL(netif_device_detach
);
1988 * netif_device_attach - mark device as attached
1989 * @dev: network device
1991 * Mark device as attached from system and restart if needed.
1993 void netif_device_attach(struct net_device
*dev
)
1995 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
1996 netif_running(dev
)) {
1997 netif_tx_wake_all_queues(dev
);
1998 __netdev_watchdog_up(dev
);
2001 EXPORT_SYMBOL(netif_device_attach
);
2003 static void skb_warn_bad_offload(const struct sk_buff
*skb
)
2005 static const netdev_features_t null_features
= 0;
2006 struct net_device
*dev
= skb
->dev
;
2007 const char *driver
= "";
2009 if (dev
&& dev
->dev
.parent
)
2010 driver
= dev_driver_string(dev
->dev
.parent
);
2012 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2013 "gso_type=%d ip_summed=%d\n",
2014 driver
, dev
? &dev
->features
: &null_features
,
2015 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
,
2016 skb
->len
, skb
->data_len
, skb_shinfo(skb
)->gso_size
,
2017 skb_shinfo(skb
)->gso_type
, skb
->ip_summed
);
2021 * Invalidate hardware checksum when packet is to be mangled, and
2022 * complete checksum manually on outgoing path.
2024 int skb_checksum_help(struct sk_buff
*skb
)
2027 int ret
= 0, offset
;
2029 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
2030 goto out_set_summed
;
2032 if (unlikely(skb_shinfo(skb
)->gso_size
)) {
2033 skb_warn_bad_offload(skb
);
2037 offset
= skb_checksum_start_offset(skb
);
2038 BUG_ON(offset
>= skb_headlen(skb
));
2039 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
2041 offset
+= skb
->csum_offset
;
2042 BUG_ON(offset
+ sizeof(__sum16
) > skb_headlen(skb
));
2044 if (skb_cloned(skb
) &&
2045 !skb_clone_writable(skb
, offset
+ sizeof(__sum16
))) {
2046 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2051 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
);
2053 skb
->ip_summed
= CHECKSUM_NONE
;
2057 EXPORT_SYMBOL(skb_checksum_help
);
2060 * skb_gso_segment - Perform segmentation on skb.
2061 * @skb: buffer to segment
2062 * @features: features for the output path (see dev->features)
2064 * This function segments the given skb and returns a list of segments.
2066 * It may return NULL if the skb requires no segmentation. This is
2067 * only possible when GSO is used for verifying header integrity.
2069 struct sk_buff
*skb_gso_segment(struct sk_buff
*skb
,
2070 netdev_features_t features
)
2072 struct sk_buff
*segs
= ERR_PTR(-EPROTONOSUPPORT
);
2073 struct packet_offload
*ptype
;
2074 __be16 type
= skb
->protocol
;
2075 int vlan_depth
= ETH_HLEN
;
2078 while (type
== htons(ETH_P_8021Q
)) {
2079 struct vlan_hdr
*vh
;
2081 if (unlikely(!pskb_may_pull(skb
, vlan_depth
+ VLAN_HLEN
)))
2082 return ERR_PTR(-EINVAL
);
2084 vh
= (struct vlan_hdr
*)(skb
->data
+ vlan_depth
);
2085 type
= vh
->h_vlan_encapsulated_proto
;
2086 vlan_depth
+= VLAN_HLEN
;
2089 skb_reset_mac_header(skb
);
2090 skb
->mac_len
= skb
->network_header
- skb
->mac_header
;
2091 __skb_pull(skb
, skb
->mac_len
);
2093 if (unlikely(skb
->ip_summed
!= CHECKSUM_PARTIAL
)) {
2094 skb_warn_bad_offload(skb
);
2096 if (skb_header_cloned(skb
) &&
2097 (err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
)))
2098 return ERR_PTR(err
);
2102 list_for_each_entry_rcu(ptype
, &offload_base
, list
) {
2103 if (ptype
->type
== type
&& ptype
->callbacks
.gso_segment
) {
2104 if (unlikely(skb
->ip_summed
!= CHECKSUM_PARTIAL
)) {
2105 err
= ptype
->callbacks
.gso_send_check(skb
);
2106 segs
= ERR_PTR(err
);
2107 if (err
|| skb_gso_ok(skb
, features
))
2109 __skb_push(skb
, (skb
->data
-
2110 skb_network_header(skb
)));
2112 segs
= ptype
->callbacks
.gso_segment(skb
, features
);
2118 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
2122 EXPORT_SYMBOL(skb_gso_segment
);
2124 /* Take action when hardware reception checksum errors are detected. */
2126 void netdev_rx_csum_fault(struct net_device
*dev
)
2128 if (net_ratelimit()) {
2129 pr_err("%s: hw csum failure\n", dev
? dev
->name
: "<unknown>");
2133 EXPORT_SYMBOL(netdev_rx_csum_fault
);
2136 /* Actually, we should eliminate this check as soon as we know, that:
2137 * 1. IOMMU is present and allows to map all the memory.
2138 * 2. No high memory really exists on this machine.
2141 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
2143 #ifdef CONFIG_HIGHMEM
2145 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
2146 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2147 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2148 if (PageHighMem(skb_frag_page(frag
)))
2153 if (PCI_DMA_BUS_IS_PHYS
) {
2154 struct device
*pdev
= dev
->dev
.parent
;
2158 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2159 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2160 dma_addr_t addr
= page_to_phys(skb_frag_page(frag
));
2161 if (!pdev
->dma_mask
|| addr
+ PAGE_SIZE
- 1 > *pdev
->dma_mask
)
2170 void (*destructor
)(struct sk_buff
*skb
);
2173 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2175 static void dev_gso_skb_destructor(struct sk_buff
*skb
)
2177 struct dev_gso_cb
*cb
;
2180 struct sk_buff
*nskb
= skb
->next
;
2182 skb
->next
= nskb
->next
;
2185 } while (skb
->next
);
2187 cb
= DEV_GSO_CB(skb
);
2189 cb
->destructor(skb
);
2193 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2194 * @skb: buffer to segment
2195 * @features: device features as applicable to this skb
2197 * This function segments the given skb and stores the list of segments
2200 static int dev_gso_segment(struct sk_buff
*skb
, netdev_features_t features
)
2202 struct sk_buff
*segs
;
2204 segs
= skb_gso_segment(skb
, features
);
2206 /* Verifying header integrity only. */
2211 return PTR_ERR(segs
);
2214 DEV_GSO_CB(skb
)->destructor
= skb
->destructor
;
2215 skb
->destructor
= dev_gso_skb_destructor
;
2220 static bool can_checksum_protocol(netdev_features_t features
, __be16 protocol
)
2222 return ((features
& NETIF_F_GEN_CSUM
) ||
2223 ((features
& NETIF_F_V4_CSUM
) &&
2224 protocol
== htons(ETH_P_IP
)) ||
2225 ((features
& NETIF_F_V6_CSUM
) &&
2226 protocol
== htons(ETH_P_IPV6
)) ||
2227 ((features
& NETIF_F_FCOE_CRC
) &&
2228 protocol
== htons(ETH_P_FCOE
)));
2231 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
2232 __be16 protocol
, netdev_features_t features
)
2234 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
2235 !can_checksum_protocol(features
, protocol
)) {
2236 features
&= ~NETIF_F_ALL_CSUM
;
2237 features
&= ~NETIF_F_SG
;
2238 } else if (illegal_highdma(skb
->dev
, skb
)) {
2239 features
&= ~NETIF_F_SG
;
2245 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
2247 __be16 protocol
= skb
->protocol
;
2248 netdev_features_t features
= skb
->dev
->features
;
2250 if (skb_shinfo(skb
)->gso_segs
> skb
->dev
->gso_max_segs
)
2251 features
&= ~NETIF_F_GSO_MASK
;
2253 if (protocol
== htons(ETH_P_8021Q
)) {
2254 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)skb
->data
;
2255 protocol
= veh
->h_vlan_encapsulated_proto
;
2256 } else if (!vlan_tx_tag_present(skb
)) {
2257 return harmonize_features(skb
, protocol
, features
);
2260 features
&= (skb
->dev
->vlan_features
| NETIF_F_HW_VLAN_TX
);
2262 if (protocol
!= htons(ETH_P_8021Q
)) {
2263 return harmonize_features(skb
, protocol
, features
);
2265 features
&= NETIF_F_SG
| NETIF_F_HIGHDMA
| NETIF_F_FRAGLIST
|
2266 NETIF_F_GEN_CSUM
| NETIF_F_HW_VLAN_TX
;
2267 return harmonize_features(skb
, protocol
, features
);
2270 EXPORT_SYMBOL(netif_skb_features
);
2273 * Returns true if either:
2274 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2275 * 2. skb is fragmented and the device does not support SG.
2277 static inline int skb_needs_linearize(struct sk_buff
*skb
,
2280 return skb_is_nonlinear(skb
) &&
2281 ((skb_has_frag_list(skb
) &&
2282 !(features
& NETIF_F_FRAGLIST
)) ||
2283 (skb_shinfo(skb
)->nr_frags
&&
2284 !(features
& NETIF_F_SG
)));
2287 int dev_hard_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
,
2288 struct netdev_queue
*txq
)
2290 const struct net_device_ops
*ops
= dev
->netdev_ops
;
2291 int rc
= NETDEV_TX_OK
;
2292 unsigned int skb_len
;
2294 if (likely(!skb
->next
)) {
2295 netdev_features_t features
;
2298 * If device doesn't need skb->dst, release it right now while
2299 * its hot in this cpu cache
2301 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
2304 features
= netif_skb_features(skb
);
2306 if (vlan_tx_tag_present(skb
) &&
2307 !(features
& NETIF_F_HW_VLAN_TX
)) {
2308 skb
= __vlan_put_tag(skb
, vlan_tx_tag_get(skb
));
2315 if (netif_needs_gso(skb
, features
)) {
2316 if (unlikely(dev_gso_segment(skb
, features
)))
2321 if (skb_needs_linearize(skb
, features
) &&
2322 __skb_linearize(skb
))
2325 /* If packet is not checksummed and device does not
2326 * support checksumming for this protocol, complete
2327 * checksumming here.
2329 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2330 skb_set_transport_header(skb
,
2331 skb_checksum_start_offset(skb
));
2332 if (!(features
& NETIF_F_ALL_CSUM
) &&
2333 skb_checksum_help(skb
))
2338 if (!list_empty(&ptype_all
))
2339 dev_queue_xmit_nit(skb
, dev
);
2342 rc
= ops
->ndo_start_xmit(skb
, dev
);
2343 trace_net_dev_xmit(skb
, rc
, dev
, skb_len
);
2344 if (rc
== NETDEV_TX_OK
)
2345 txq_trans_update(txq
);
2351 struct sk_buff
*nskb
= skb
->next
;
2353 skb
->next
= nskb
->next
;
2357 * If device doesn't need nskb->dst, release it right now while
2358 * its hot in this cpu cache
2360 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
2363 if (!list_empty(&ptype_all
))
2364 dev_queue_xmit_nit(nskb
, dev
);
2366 skb_len
= nskb
->len
;
2367 rc
= ops
->ndo_start_xmit(nskb
, dev
);
2368 trace_net_dev_xmit(nskb
, rc
, dev
, skb_len
);
2369 if (unlikely(rc
!= NETDEV_TX_OK
)) {
2370 if (rc
& ~NETDEV_TX_MASK
)
2371 goto out_kfree_gso_skb
;
2372 nskb
->next
= skb
->next
;
2376 txq_trans_update(txq
);
2377 if (unlikely(netif_xmit_stopped(txq
) && skb
->next
))
2378 return NETDEV_TX_BUSY
;
2379 } while (skb
->next
);
2382 if (likely(skb
->next
== NULL
))
2383 skb
->destructor
= DEV_GSO_CB(skb
)->destructor
;
2390 static u32 hashrnd __read_mostly
;
2393 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2394 * to be used as a distribution range.
2396 u16
__skb_tx_hash(const struct net_device
*dev
, const struct sk_buff
*skb
,
2397 unsigned int num_tx_queues
)
2401 u16 qcount
= num_tx_queues
;
2403 if (skb_rx_queue_recorded(skb
)) {
2404 hash
= skb_get_rx_queue(skb
);
2405 while (unlikely(hash
>= num_tx_queues
))
2406 hash
-= num_tx_queues
;
2411 u8 tc
= netdev_get_prio_tc_map(dev
, skb
->priority
);
2412 qoffset
= dev
->tc_to_txq
[tc
].offset
;
2413 qcount
= dev
->tc_to_txq
[tc
].count
;
2416 if (skb
->sk
&& skb
->sk
->sk_hash
)
2417 hash
= skb
->sk
->sk_hash
;
2419 hash
= (__force u16
) skb
->protocol
;
2420 hash
= jhash_1word(hash
, hashrnd
);
2422 return (u16
) (((u64
) hash
* qcount
) >> 32) + qoffset
;
2424 EXPORT_SYMBOL(__skb_tx_hash
);
2426 static inline u16
dev_cap_txqueue(struct net_device
*dev
, u16 queue_index
)
2428 if (unlikely(queue_index
>= dev
->real_num_tx_queues
)) {
2429 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2430 dev
->name
, queue_index
,
2431 dev
->real_num_tx_queues
);
2437 static inline int get_xps_queue(struct net_device
*dev
, struct sk_buff
*skb
)
2440 struct xps_dev_maps
*dev_maps
;
2441 struct xps_map
*map
;
2442 int queue_index
= -1;
2445 dev_maps
= rcu_dereference(dev
->xps_maps
);
2447 map
= rcu_dereference(
2448 dev_maps
->cpu_map
[raw_smp_processor_id()]);
2451 queue_index
= map
->queues
[0];
2454 if (skb
->sk
&& skb
->sk
->sk_hash
)
2455 hash
= skb
->sk
->sk_hash
;
2457 hash
= (__force u16
) skb
->protocol
^
2459 hash
= jhash_1word(hash
, hashrnd
);
2460 queue_index
= map
->queues
[
2461 ((u64
)hash
* map
->len
) >> 32];
2463 if (unlikely(queue_index
>= dev
->real_num_tx_queues
))
2475 struct netdev_queue
*netdev_pick_tx(struct net_device
*dev
,
2476 struct sk_buff
*skb
)
2479 const struct net_device_ops
*ops
= dev
->netdev_ops
;
2481 if (dev
->real_num_tx_queues
== 1)
2483 else if (ops
->ndo_select_queue
) {
2484 queue_index
= ops
->ndo_select_queue(dev
, skb
);
2485 queue_index
= dev_cap_txqueue(dev
, queue_index
);
2487 struct sock
*sk
= skb
->sk
;
2488 queue_index
= sk_tx_queue_get(sk
);
2490 if (queue_index
< 0 || skb
->ooo_okay
||
2491 queue_index
>= dev
->real_num_tx_queues
) {
2492 int old_index
= queue_index
;
2494 queue_index
= get_xps_queue(dev
, skb
);
2495 if (queue_index
< 0)
2496 queue_index
= skb_tx_hash(dev
, skb
);
2498 if (queue_index
!= old_index
&& sk
) {
2499 struct dst_entry
*dst
=
2500 rcu_dereference_check(sk
->sk_dst_cache
, 1);
2502 if (dst
&& skb_dst(skb
) == dst
)
2503 sk_tx_queue_set(sk
, queue_index
);
2508 skb_set_queue_mapping(skb
, queue_index
);
2509 return netdev_get_tx_queue(dev
, queue_index
);
2512 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
2513 struct net_device
*dev
,
2514 struct netdev_queue
*txq
)
2516 spinlock_t
*root_lock
= qdisc_lock(q
);
2520 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
2521 qdisc_calculate_pkt_len(skb
, q
);
2523 * Heuristic to force contended enqueues to serialize on a
2524 * separate lock before trying to get qdisc main lock.
2525 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2526 * and dequeue packets faster.
2528 contended
= qdisc_is_running(q
);
2529 if (unlikely(contended
))
2530 spin_lock(&q
->busylock
);
2532 spin_lock(root_lock
);
2533 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
2536 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
2537 qdisc_run_begin(q
)) {
2539 * This is a work-conserving queue; there are no old skbs
2540 * waiting to be sent out; and the qdisc is not running -
2541 * xmit the skb directly.
2543 if (!(dev
->priv_flags
& IFF_XMIT_DST_RELEASE
))
2546 qdisc_bstats_update(q
, skb
);
2548 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
)) {
2549 if (unlikely(contended
)) {
2550 spin_unlock(&q
->busylock
);
2557 rc
= NET_XMIT_SUCCESS
;
2560 rc
= q
->enqueue(skb
, q
) & NET_XMIT_MASK
;
2561 if (qdisc_run_begin(q
)) {
2562 if (unlikely(contended
)) {
2563 spin_unlock(&q
->busylock
);
2569 spin_unlock(root_lock
);
2570 if (unlikely(contended
))
2571 spin_unlock(&q
->busylock
);
2575 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2576 static void skb_update_prio(struct sk_buff
*skb
)
2578 struct netprio_map
*map
= rcu_dereference_bh(skb
->dev
->priomap
);
2580 if (!skb
->priority
&& skb
->sk
&& map
) {
2581 unsigned int prioidx
= skb
->sk
->sk_cgrp_prioidx
;
2583 if (prioidx
< map
->priomap_len
)
2584 skb
->priority
= map
->priomap
[prioidx
];
2588 #define skb_update_prio(skb)
2591 static DEFINE_PER_CPU(int, xmit_recursion
);
2592 #define RECURSION_LIMIT 10
2595 * dev_loopback_xmit - loop back @skb
2596 * @skb: buffer to transmit
2598 int dev_loopback_xmit(struct sk_buff
*skb
)
2600 skb_reset_mac_header(skb
);
2601 __skb_pull(skb
, skb_network_offset(skb
));
2602 skb
->pkt_type
= PACKET_LOOPBACK
;
2603 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2604 WARN_ON(!skb_dst(skb
));
2609 EXPORT_SYMBOL(dev_loopback_xmit
);
2612 * dev_queue_xmit - transmit a buffer
2613 * @skb: buffer to transmit
2615 * Queue a buffer for transmission to a network device. The caller must
2616 * have set the device and priority and built the buffer before calling
2617 * this function. The function can be called from an interrupt.
2619 * A negative errno code is returned on a failure. A success does not
2620 * guarantee the frame will be transmitted as it may be dropped due
2621 * to congestion or traffic shaping.
2623 * -----------------------------------------------------------------------------------
2624 * I notice this method can also return errors from the queue disciplines,
2625 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2628 * Regardless of the return value, the skb is consumed, so it is currently
2629 * difficult to retry a send to this method. (You can bump the ref count
2630 * before sending to hold a reference for retry if you are careful.)
2632 * When calling this method, interrupts MUST be enabled. This is because
2633 * the BH enable code must have IRQs enabled so that it will not deadlock.
2636 int dev_queue_xmit(struct sk_buff
*skb
)
2638 struct net_device
*dev
= skb
->dev
;
2639 struct netdev_queue
*txq
;
2643 /* Disable soft irqs for various locks below. Also
2644 * stops preemption for RCU.
2648 skb_update_prio(skb
);
2650 txq
= netdev_pick_tx(dev
, skb
);
2651 q
= rcu_dereference_bh(txq
->qdisc
);
2653 #ifdef CONFIG_NET_CLS_ACT
2654 skb
->tc_verd
= SET_TC_AT(skb
->tc_verd
, AT_EGRESS
);
2656 trace_net_dev_queue(skb
);
2658 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
2662 /* The device has no queue. Common case for software devices:
2663 loopback, all the sorts of tunnels...
2665 Really, it is unlikely that netif_tx_lock protection is necessary
2666 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2668 However, it is possible, that they rely on protection
2671 Check this and shot the lock. It is not prone from deadlocks.
2672 Either shot noqueue qdisc, it is even simpler 8)
2674 if (dev
->flags
& IFF_UP
) {
2675 int cpu
= smp_processor_id(); /* ok because BHs are off */
2677 if (txq
->xmit_lock_owner
!= cpu
) {
2679 if (__this_cpu_read(xmit_recursion
) > RECURSION_LIMIT
)
2680 goto recursion_alert
;
2682 HARD_TX_LOCK(dev
, txq
, cpu
);
2684 if (!netif_xmit_stopped(txq
)) {
2685 __this_cpu_inc(xmit_recursion
);
2686 rc
= dev_hard_start_xmit(skb
, dev
, txq
);
2687 __this_cpu_dec(xmit_recursion
);
2688 if (dev_xmit_complete(rc
)) {
2689 HARD_TX_UNLOCK(dev
, txq
);
2693 HARD_TX_UNLOCK(dev
, txq
);
2694 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2697 /* Recursion is detected! It is possible,
2701 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2707 rcu_read_unlock_bh();
2712 rcu_read_unlock_bh();
2715 EXPORT_SYMBOL(dev_queue_xmit
);
2718 /*=======================================================================
2720 =======================================================================*/
2722 int netdev_max_backlog __read_mostly
= 1000;
2723 EXPORT_SYMBOL(netdev_max_backlog
);
2725 int netdev_tstamp_prequeue __read_mostly
= 1;
2726 int netdev_budget __read_mostly
= 300;
2727 int weight_p __read_mostly
= 64; /* old backlog weight */
2729 /* Called with irq disabled */
2730 static inline void ____napi_schedule(struct softnet_data
*sd
,
2731 struct napi_struct
*napi
)
2733 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
2734 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
2738 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2739 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2740 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2741 * if hash is a canonical 4-tuple hash over transport ports.
2743 void __skb_get_rxhash(struct sk_buff
*skb
)
2745 struct flow_keys keys
;
2748 if (!skb_flow_dissect(skb
, &keys
))
2754 /* get a consistent hash (same value on both flow directions) */
2755 if (((__force u32
)keys
.dst
< (__force u32
)keys
.src
) ||
2756 (((__force u32
)keys
.dst
== (__force u32
)keys
.src
) &&
2757 ((__force u16
)keys
.port16
[1] < (__force u16
)keys
.port16
[0]))) {
2758 swap(keys
.dst
, keys
.src
);
2759 swap(keys
.port16
[0], keys
.port16
[1]);
2762 hash
= jhash_3words((__force u32
)keys
.dst
,
2763 (__force u32
)keys
.src
,
2764 (__force u32
)keys
.ports
, hashrnd
);
2770 EXPORT_SYMBOL(__skb_get_rxhash
);
2774 /* One global table that all flow-based protocols share. */
2775 struct rps_sock_flow_table __rcu
*rps_sock_flow_table __read_mostly
;
2776 EXPORT_SYMBOL(rps_sock_flow_table
);
2778 struct static_key rps_needed __read_mostly
;
2780 static struct rps_dev_flow
*
2781 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
2782 struct rps_dev_flow
*rflow
, u16 next_cpu
)
2784 if (next_cpu
!= RPS_NO_CPU
) {
2785 #ifdef CONFIG_RFS_ACCEL
2786 struct netdev_rx_queue
*rxqueue
;
2787 struct rps_dev_flow_table
*flow_table
;
2788 struct rps_dev_flow
*old_rflow
;
2793 /* Should we steer this flow to a different hardware queue? */
2794 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
2795 !(dev
->features
& NETIF_F_NTUPLE
))
2797 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
2798 if (rxq_index
== skb_get_rx_queue(skb
))
2801 rxqueue
= dev
->_rx
+ rxq_index
;
2802 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
2805 flow_id
= skb
->rxhash
& flow_table
->mask
;
2806 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
2807 rxq_index
, flow_id
);
2811 rflow
= &flow_table
->flows
[flow_id
];
2813 if (old_rflow
->filter
== rflow
->filter
)
2814 old_rflow
->filter
= RPS_NO_FILTER
;
2818 per_cpu(softnet_data
, next_cpu
).input_queue_head
;
2821 rflow
->cpu
= next_cpu
;
2826 * get_rps_cpu is called from netif_receive_skb and returns the target
2827 * CPU from the RPS map of the receiving queue for a given skb.
2828 * rcu_read_lock must be held on entry.
2830 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
2831 struct rps_dev_flow
**rflowp
)
2833 struct netdev_rx_queue
*rxqueue
;
2834 struct rps_map
*map
;
2835 struct rps_dev_flow_table
*flow_table
;
2836 struct rps_sock_flow_table
*sock_flow_table
;
2840 if (skb_rx_queue_recorded(skb
)) {
2841 u16 index
= skb_get_rx_queue(skb
);
2842 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
2843 WARN_ONCE(dev
->real_num_rx_queues
> 1,
2844 "%s received packet on queue %u, but number "
2845 "of RX queues is %u\n",
2846 dev
->name
, index
, dev
->real_num_rx_queues
);
2849 rxqueue
= dev
->_rx
+ index
;
2853 map
= rcu_dereference(rxqueue
->rps_map
);
2855 if (map
->len
== 1 &&
2856 !rcu_access_pointer(rxqueue
->rps_flow_table
)) {
2857 tcpu
= map
->cpus
[0];
2858 if (cpu_online(tcpu
))
2862 } else if (!rcu_access_pointer(rxqueue
->rps_flow_table
)) {
2866 skb_reset_network_header(skb
);
2867 if (!skb_get_rxhash(skb
))
2870 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
2871 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
2872 if (flow_table
&& sock_flow_table
) {
2874 struct rps_dev_flow
*rflow
;
2876 rflow
= &flow_table
->flows
[skb
->rxhash
& flow_table
->mask
];
2879 next_cpu
= sock_flow_table
->ents
[skb
->rxhash
&
2880 sock_flow_table
->mask
];
2883 * If the desired CPU (where last recvmsg was done) is
2884 * different from current CPU (one in the rx-queue flow
2885 * table entry), switch if one of the following holds:
2886 * - Current CPU is unset (equal to RPS_NO_CPU).
2887 * - Current CPU is offline.
2888 * - The current CPU's queue tail has advanced beyond the
2889 * last packet that was enqueued using this table entry.
2890 * This guarantees that all previous packets for the flow
2891 * have been dequeued, thus preserving in order delivery.
2893 if (unlikely(tcpu
!= next_cpu
) &&
2894 (tcpu
== RPS_NO_CPU
|| !cpu_online(tcpu
) ||
2895 ((int)(per_cpu(softnet_data
, tcpu
).input_queue_head
-
2896 rflow
->last_qtail
)) >= 0)) {
2898 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
2901 if (tcpu
!= RPS_NO_CPU
&& cpu_online(tcpu
)) {
2909 tcpu
= map
->cpus
[((u64
) skb
->rxhash
* map
->len
) >> 32];
2911 if (cpu_online(tcpu
)) {
2921 #ifdef CONFIG_RFS_ACCEL
2924 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2925 * @dev: Device on which the filter was set
2926 * @rxq_index: RX queue index
2927 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2928 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2930 * Drivers that implement ndo_rx_flow_steer() should periodically call
2931 * this function for each installed filter and remove the filters for
2932 * which it returns %true.
2934 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
2935 u32 flow_id
, u16 filter_id
)
2937 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
2938 struct rps_dev_flow_table
*flow_table
;
2939 struct rps_dev_flow
*rflow
;
2944 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
2945 if (flow_table
&& flow_id
<= flow_table
->mask
) {
2946 rflow
= &flow_table
->flows
[flow_id
];
2947 cpu
= ACCESS_ONCE(rflow
->cpu
);
2948 if (rflow
->filter
== filter_id
&& cpu
!= RPS_NO_CPU
&&
2949 ((int)(per_cpu(softnet_data
, cpu
).input_queue_head
-
2950 rflow
->last_qtail
) <
2951 (int)(10 * flow_table
->mask
)))
2957 EXPORT_SYMBOL(rps_may_expire_flow
);
2959 #endif /* CONFIG_RFS_ACCEL */
2961 /* Called from hardirq (IPI) context */
2962 static void rps_trigger_softirq(void *data
)
2964 struct softnet_data
*sd
= data
;
2966 ____napi_schedule(sd
, &sd
->backlog
);
2970 #endif /* CONFIG_RPS */
2973 * Check if this softnet_data structure is another cpu one
2974 * If yes, queue it to our IPI list and return 1
2977 static int rps_ipi_queued(struct softnet_data
*sd
)
2980 struct softnet_data
*mysd
= &__get_cpu_var(softnet_data
);
2983 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
2984 mysd
->rps_ipi_list
= sd
;
2986 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
2989 #endif /* CONFIG_RPS */
2994 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2995 * queue (may be a remote CPU queue).
2997 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
2998 unsigned int *qtail
)
3000 struct softnet_data
*sd
;
3001 unsigned long flags
;
3003 sd
= &per_cpu(softnet_data
, cpu
);
3005 local_irq_save(flags
);
3008 if (skb_queue_len(&sd
->input_pkt_queue
) <= netdev_max_backlog
) {
3009 if (skb_queue_len(&sd
->input_pkt_queue
)) {
3011 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
3012 input_queue_tail_incr_save(sd
, qtail
);
3014 local_irq_restore(flags
);
3015 return NET_RX_SUCCESS
;
3018 /* Schedule NAPI for backlog device
3019 * We can use non atomic operation since we own the queue lock
3021 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
)) {
3022 if (!rps_ipi_queued(sd
))
3023 ____napi_schedule(sd
, &sd
->backlog
);
3031 local_irq_restore(flags
);
3033 atomic_long_inc(&skb
->dev
->rx_dropped
);
3039 * netif_rx - post buffer to the network code
3040 * @skb: buffer to post
3042 * This function receives a packet from a device driver and queues it for
3043 * the upper (protocol) levels to process. It always succeeds. The buffer
3044 * may be dropped during processing for congestion control or by the
3048 * NET_RX_SUCCESS (no congestion)
3049 * NET_RX_DROP (packet was dropped)
3053 int netif_rx(struct sk_buff
*skb
)
3057 /* if netpoll wants it, pretend we never saw it */
3058 if (netpoll_rx(skb
))
3061 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
3063 trace_netif_rx(skb
);
3065 if (static_key_false(&rps_needed
)) {
3066 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
3072 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
3074 cpu
= smp_processor_id();
3076 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
3084 ret
= enqueue_to_backlog(skb
, get_cpu(), &qtail
);
3089 EXPORT_SYMBOL(netif_rx
);
3091 int netif_rx_ni(struct sk_buff
*skb
)
3096 err
= netif_rx(skb
);
3097 if (local_softirq_pending())
3103 EXPORT_SYMBOL(netif_rx_ni
);
3105 static void net_tx_action(struct softirq_action
*h
)
3107 struct softnet_data
*sd
= &__get_cpu_var(softnet_data
);
3109 if (sd
->completion_queue
) {
3110 struct sk_buff
*clist
;
3112 local_irq_disable();
3113 clist
= sd
->completion_queue
;
3114 sd
->completion_queue
= NULL
;
3118 struct sk_buff
*skb
= clist
;
3119 clist
= clist
->next
;
3121 WARN_ON(atomic_read(&skb
->users
));
3122 trace_kfree_skb(skb
, net_tx_action
);
3127 if (sd
->output_queue
) {
3130 local_irq_disable();
3131 head
= sd
->output_queue
;
3132 sd
->output_queue
= NULL
;
3133 sd
->output_queue_tailp
= &sd
->output_queue
;
3137 struct Qdisc
*q
= head
;
3138 spinlock_t
*root_lock
;
3140 head
= head
->next_sched
;
3142 root_lock
= qdisc_lock(q
);
3143 if (spin_trylock(root_lock
)) {
3144 smp_mb__before_clear_bit();
3145 clear_bit(__QDISC_STATE_SCHED
,
3148 spin_unlock(root_lock
);
3150 if (!test_bit(__QDISC_STATE_DEACTIVATED
,
3152 __netif_reschedule(q
);
3154 smp_mb__before_clear_bit();
3155 clear_bit(__QDISC_STATE_SCHED
,
3163 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3164 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3165 /* This hook is defined here for ATM LANE */
3166 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
3167 unsigned char *addr
) __read_mostly
;
3168 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
3171 #ifdef CONFIG_NET_CLS_ACT
3172 /* TODO: Maybe we should just force sch_ingress to be compiled in
3173 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3174 * a compare and 2 stores extra right now if we dont have it on
3175 * but have CONFIG_NET_CLS_ACT
3176 * NOTE: This doesn't stop any functionality; if you dont have
3177 * the ingress scheduler, you just can't add policies on ingress.
3180 static int ing_filter(struct sk_buff
*skb
, struct netdev_queue
*rxq
)
3182 struct net_device
*dev
= skb
->dev
;
3183 u32 ttl
= G_TC_RTTL(skb
->tc_verd
);
3184 int result
= TC_ACT_OK
;
3187 if (unlikely(MAX_RED_LOOP
< ttl
++)) {
3188 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3189 skb
->skb_iif
, dev
->ifindex
);
3193 skb
->tc_verd
= SET_TC_RTTL(skb
->tc_verd
, ttl
);
3194 skb
->tc_verd
= SET_TC_AT(skb
->tc_verd
, AT_INGRESS
);
3197 if (q
!= &noop_qdisc
) {
3198 spin_lock(qdisc_lock(q
));
3199 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
)))
3200 result
= qdisc_enqueue_root(skb
, q
);
3201 spin_unlock(qdisc_lock(q
));
3207 static inline struct sk_buff
*handle_ing(struct sk_buff
*skb
,
3208 struct packet_type
**pt_prev
,
3209 int *ret
, struct net_device
*orig_dev
)
3211 struct netdev_queue
*rxq
= rcu_dereference(skb
->dev
->ingress_queue
);
3213 if (!rxq
|| rxq
->qdisc
== &noop_qdisc
)
3217 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
3221 switch (ing_filter(skb
, rxq
)) {
3235 * netdev_rx_handler_register - register receive handler
3236 * @dev: device to register a handler for
3237 * @rx_handler: receive handler to register
3238 * @rx_handler_data: data pointer that is used by rx handler
3240 * Register a receive hander for a device. This handler will then be
3241 * called from __netif_receive_skb. A negative errno code is returned
3244 * The caller must hold the rtnl_mutex.
3246 * For a general description of rx_handler, see enum rx_handler_result.
3248 int netdev_rx_handler_register(struct net_device
*dev
,
3249 rx_handler_func_t
*rx_handler
,
3250 void *rx_handler_data
)
3254 if (dev
->rx_handler
)
3257 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
3258 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
3262 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
3265 * netdev_rx_handler_unregister - unregister receive handler
3266 * @dev: device to unregister a handler from
3268 * Unregister a receive hander from a device.
3270 * The caller must hold the rtnl_mutex.
3272 void netdev_rx_handler_unregister(struct net_device
*dev
)
3276 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
3277 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
3279 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
3282 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3283 * the special handling of PFMEMALLOC skbs.
3285 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
3287 switch (skb
->protocol
) {
3288 case __constant_htons(ETH_P_ARP
):
3289 case __constant_htons(ETH_P_IP
):
3290 case __constant_htons(ETH_P_IPV6
):
3291 case __constant_htons(ETH_P_8021Q
):
3298 static int __netif_receive_skb(struct sk_buff
*skb
)
3300 struct packet_type
*ptype
, *pt_prev
;
3301 rx_handler_func_t
*rx_handler
;
3302 struct net_device
*orig_dev
;
3303 struct net_device
*null_or_dev
;
3304 bool deliver_exact
= false;
3305 int ret
= NET_RX_DROP
;
3307 unsigned long pflags
= current
->flags
;
3309 net_timestamp_check(!netdev_tstamp_prequeue
, skb
);
3311 trace_netif_receive_skb(skb
);
3314 * PFMEMALLOC skbs are special, they should
3315 * - be delivered to SOCK_MEMALLOC sockets only
3316 * - stay away from userspace
3317 * - have bounded memory usage
3319 * Use PF_MEMALLOC as this saves us from propagating the allocation
3320 * context down to all allocation sites.
3322 if (sk_memalloc_socks() && skb_pfmemalloc(skb
))
3323 current
->flags
|= PF_MEMALLOC
;
3325 /* if we've gotten here through NAPI, check netpoll */
3326 if (netpoll_receive_skb(skb
))
3329 orig_dev
= skb
->dev
;
3331 skb_reset_network_header(skb
);
3332 skb_reset_transport_header(skb
);
3333 skb_reset_mac_len(skb
);
3340 skb
->skb_iif
= skb
->dev
->ifindex
;
3342 __this_cpu_inc(softnet_data
.processed
);
3344 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
)) {
3345 skb
= vlan_untag(skb
);
3350 #ifdef CONFIG_NET_CLS_ACT
3351 if (skb
->tc_verd
& TC_NCLS
) {
3352 skb
->tc_verd
= CLR_TC_NCLS(skb
->tc_verd
);
3357 if (sk_memalloc_socks() && skb_pfmemalloc(skb
))
3360 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
3361 if (!ptype
->dev
|| ptype
->dev
== skb
->dev
) {
3363 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3369 #ifdef CONFIG_NET_CLS_ACT
3370 skb
= handle_ing(skb
, &pt_prev
, &ret
, orig_dev
);
3376 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)
3377 && !skb_pfmemalloc_protocol(skb
))
3380 if (vlan_tx_tag_present(skb
)) {
3382 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3385 if (vlan_do_receive(&skb
))
3387 else if (unlikely(!skb
))
3391 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
3394 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3397 switch (rx_handler(&skb
)) {
3398 case RX_HANDLER_CONSUMED
:
3400 case RX_HANDLER_ANOTHER
:
3402 case RX_HANDLER_EXACT
:
3403 deliver_exact
= true;
3404 case RX_HANDLER_PASS
:
3411 if (vlan_tx_nonzero_tag_present(skb
))
3412 skb
->pkt_type
= PACKET_OTHERHOST
;
3414 /* deliver only exact match when indicated */
3415 null_or_dev
= deliver_exact
? skb
->dev
: NULL
;
3417 type
= skb
->protocol
;
3418 list_for_each_entry_rcu(ptype
,
3419 &ptype_base
[ntohs(type
) & PTYPE_HASH_MASK
], list
) {
3420 if (ptype
->type
== type
&&
3421 (ptype
->dev
== null_or_dev
|| ptype
->dev
== skb
->dev
||
3422 ptype
->dev
== orig_dev
)) {
3424 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3430 if (unlikely(skb_orphan_frags(skb
, GFP_ATOMIC
)))
3433 ret
= pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
3436 atomic_long_inc(&skb
->dev
->rx_dropped
);
3438 /* Jamal, now you will not able to escape explaining
3439 * me how you were going to use this. :-)
3447 tsk_restore_flags(current
, pflags
, PF_MEMALLOC
);
3452 * netif_receive_skb - process receive buffer from network
3453 * @skb: buffer to process
3455 * netif_receive_skb() is the main receive data processing function.
3456 * It always succeeds. The buffer may be dropped during processing
3457 * for congestion control or by the protocol layers.
3459 * This function may only be called from softirq context and interrupts
3460 * should be enabled.
3462 * Return values (usually ignored):
3463 * NET_RX_SUCCESS: no congestion
3464 * NET_RX_DROP: packet was dropped
3466 int netif_receive_skb(struct sk_buff
*skb
)
3468 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
3470 if (skb_defer_rx_timestamp(skb
))
3471 return NET_RX_SUCCESS
;
3474 if (static_key_false(&rps_needed
)) {
3475 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
3480 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
3483 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
3490 return __netif_receive_skb(skb
);
3492 EXPORT_SYMBOL(netif_receive_skb
);
3494 /* Network device is going away, flush any packets still pending
3495 * Called with irqs disabled.
3497 static void flush_backlog(void *arg
)
3499 struct net_device
*dev
= arg
;
3500 struct softnet_data
*sd
= &__get_cpu_var(softnet_data
);
3501 struct sk_buff
*skb
, *tmp
;
3504 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
3505 if (skb
->dev
== dev
) {
3506 __skb_unlink(skb
, &sd
->input_pkt_queue
);
3508 input_queue_head_incr(sd
);
3513 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
3514 if (skb
->dev
== dev
) {
3515 __skb_unlink(skb
, &sd
->process_queue
);
3517 input_queue_head_incr(sd
);
3522 static int napi_gro_complete(struct sk_buff
*skb
)
3524 struct packet_offload
*ptype
;
3525 __be16 type
= skb
->protocol
;
3526 struct list_head
*head
= &offload_base
;
3529 if (NAPI_GRO_CB(skb
)->count
== 1) {
3530 skb_shinfo(skb
)->gso_size
= 0;
3535 list_for_each_entry_rcu(ptype
, head
, list
) {
3536 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
3539 err
= ptype
->callbacks
.gro_complete(skb
);
3545 WARN_ON(&ptype
->list
== head
);
3547 return NET_RX_SUCCESS
;
3551 return netif_receive_skb(skb
);
3554 /* napi->gro_list contains packets ordered by age.
3555 * youngest packets at the head of it.
3556 * Complete skbs in reverse order to reduce latencies.
3558 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
)
3560 struct sk_buff
*skb
, *prev
= NULL
;
3562 /* scan list and build reverse chain */
3563 for (skb
= napi
->gro_list
; skb
!= NULL
; skb
= skb
->next
) {
3568 for (skb
= prev
; skb
; skb
= prev
) {
3571 if (flush_old
&& NAPI_GRO_CB(skb
)->age
== jiffies
)
3575 napi_gro_complete(skb
);
3579 napi
->gro_list
= NULL
;
3581 EXPORT_SYMBOL(napi_gro_flush
);
3583 enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
3585 struct sk_buff
**pp
= NULL
;
3586 struct packet_offload
*ptype
;
3587 __be16 type
= skb
->protocol
;
3588 struct list_head
*head
= &offload_base
;
3591 enum gro_result ret
;
3593 if (!(skb
->dev
->features
& NETIF_F_GRO
) || netpoll_rx_on(skb
))
3596 if (skb_is_gso(skb
) || skb_has_frag_list(skb
))
3600 list_for_each_entry_rcu(ptype
, head
, list
) {
3601 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
3604 skb_set_network_header(skb
, skb_gro_offset(skb
));
3605 mac_len
= skb
->network_header
- skb
->mac_header
;
3606 skb
->mac_len
= mac_len
;
3607 NAPI_GRO_CB(skb
)->same_flow
= 0;
3608 NAPI_GRO_CB(skb
)->flush
= 0;
3609 NAPI_GRO_CB(skb
)->free
= 0;
3611 pp
= ptype
->callbacks
.gro_receive(&napi
->gro_list
, skb
);
3616 if (&ptype
->list
== head
)
3619 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
3620 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
3623 struct sk_buff
*nskb
= *pp
;
3627 napi_gro_complete(nskb
);
3634 if (NAPI_GRO_CB(skb
)->flush
|| napi
->gro_count
>= MAX_GRO_SKBS
)
3638 NAPI_GRO_CB(skb
)->count
= 1;
3639 NAPI_GRO_CB(skb
)->age
= jiffies
;
3640 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
3641 skb
->next
= napi
->gro_list
;
3642 napi
->gro_list
= skb
;
3646 if (skb_headlen(skb
) < skb_gro_offset(skb
)) {
3647 int grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
3649 BUG_ON(skb
->end
- skb
->tail
< grow
);
3651 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
3654 skb
->data_len
-= grow
;
3656 skb_shinfo(skb
)->frags
[0].page_offset
+= grow
;
3657 skb_frag_size_sub(&skb_shinfo(skb
)->frags
[0], grow
);
3659 if (unlikely(!skb_frag_size(&skb_shinfo(skb
)->frags
[0]))) {
3660 skb_frag_unref(skb
, 0);
3661 memmove(skb_shinfo(skb
)->frags
,
3662 skb_shinfo(skb
)->frags
+ 1,
3663 --skb_shinfo(skb
)->nr_frags
* sizeof(skb_frag_t
));
3674 EXPORT_SYMBOL(dev_gro_receive
);
3676 static inline gro_result_t
3677 __napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
3680 unsigned int maclen
= skb
->dev
->hard_header_len
;
3682 for (p
= napi
->gro_list
; p
; p
= p
->next
) {
3683 unsigned long diffs
;
3685 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
3686 diffs
|= p
->vlan_tci
^ skb
->vlan_tci
;
3687 if (maclen
== ETH_HLEN
)
3688 diffs
|= compare_ether_header(skb_mac_header(p
),
3689 skb_gro_mac_header(skb
));
3691 diffs
= memcmp(skb_mac_header(p
),
3692 skb_gro_mac_header(skb
),
3694 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
3695 NAPI_GRO_CB(p
)->flush
= 0;
3698 return dev_gro_receive(napi
, skb
);
3701 gro_result_t
napi_skb_finish(gro_result_t ret
, struct sk_buff
*skb
)
3705 if (netif_receive_skb(skb
))
3713 case GRO_MERGED_FREE
:
3714 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
3715 kmem_cache_free(skbuff_head_cache
, skb
);
3727 EXPORT_SYMBOL(napi_skb_finish
);
3729 static void skb_gro_reset_offset(struct sk_buff
*skb
)
3731 const struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
3732 const skb_frag_t
*frag0
= &pinfo
->frags
[0];
3734 NAPI_GRO_CB(skb
)->data_offset
= 0;
3735 NAPI_GRO_CB(skb
)->frag0
= NULL
;
3736 NAPI_GRO_CB(skb
)->frag0_len
= 0;
3738 if (skb
->mac_header
== skb
->tail
&&
3740 !PageHighMem(skb_frag_page(frag0
))) {
3741 NAPI_GRO_CB(skb
)->frag0
= skb_frag_address(frag0
);
3742 NAPI_GRO_CB(skb
)->frag0_len
= skb_frag_size(frag0
);
3746 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
3748 skb_gro_reset_offset(skb
);
3750 return napi_skb_finish(__napi_gro_receive(napi
, skb
), skb
);
3752 EXPORT_SYMBOL(napi_gro_receive
);
3754 static void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
3756 __skb_pull(skb
, skb_headlen(skb
));
3757 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3758 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
- skb_headroom(skb
));
3760 skb
->dev
= napi
->dev
;
3766 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
3768 struct sk_buff
*skb
= napi
->skb
;
3771 skb
= netdev_alloc_skb_ip_align(napi
->dev
, GRO_MAX_HEAD
);
3777 EXPORT_SYMBOL(napi_get_frags
);
3779 gro_result_t
napi_frags_finish(struct napi_struct
*napi
, struct sk_buff
*skb
,
3785 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
3787 if (ret
== GRO_HELD
)
3788 skb_gro_pull(skb
, -ETH_HLEN
);
3789 else if (netif_receive_skb(skb
))
3794 case GRO_MERGED_FREE
:
3795 napi_reuse_skb(napi
, skb
);
3804 EXPORT_SYMBOL(napi_frags_finish
);
3806 static struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
3808 struct sk_buff
*skb
= napi
->skb
;
3815 skb_reset_mac_header(skb
);
3816 skb_gro_reset_offset(skb
);
3818 off
= skb_gro_offset(skb
);
3819 hlen
= off
+ sizeof(*eth
);
3820 eth
= skb_gro_header_fast(skb
, off
);
3821 if (skb_gro_header_hard(skb
, hlen
)) {
3822 eth
= skb_gro_header_slow(skb
, hlen
, off
);
3823 if (unlikely(!eth
)) {
3824 napi_reuse_skb(napi
, skb
);
3830 skb_gro_pull(skb
, sizeof(*eth
));
3833 * This works because the only protocols we care about don't require
3834 * special handling. We'll fix it up properly at the end.
3836 skb
->protocol
= eth
->h_proto
;
3842 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
3844 struct sk_buff
*skb
= napi_frags_skb(napi
);
3849 return napi_frags_finish(napi
, skb
, __napi_gro_receive(napi
, skb
));
3851 EXPORT_SYMBOL(napi_gro_frags
);
3854 * net_rps_action sends any pending IPI's for rps.
3855 * Note: called with local irq disabled, but exits with local irq enabled.
3857 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
3860 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
3863 sd
->rps_ipi_list
= NULL
;
3867 /* Send pending IPI's to kick RPS processing on remote cpus. */
3869 struct softnet_data
*next
= remsd
->rps_ipi_next
;
3871 if (cpu_online(remsd
->cpu
))
3872 __smp_call_function_single(remsd
->cpu
,
3881 static int process_backlog(struct napi_struct
*napi
, int quota
)
3884 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
3887 /* Check if we have pending ipi, its better to send them now,
3888 * not waiting net_rx_action() end.
3890 if (sd
->rps_ipi_list
) {
3891 local_irq_disable();
3892 net_rps_action_and_irq_enable(sd
);
3895 napi
->weight
= weight_p
;
3896 local_irq_disable();
3897 while (work
< quota
) {
3898 struct sk_buff
*skb
;
3901 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
3903 __netif_receive_skb(skb
);
3904 local_irq_disable();
3905 input_queue_head_incr(sd
);
3906 if (++work
>= quota
) {
3913 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
3915 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
3916 &sd
->process_queue
);
3918 if (qlen
< quota
- work
) {
3920 * Inline a custom version of __napi_complete().
3921 * only current cpu owns and manipulates this napi,
3922 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3923 * we can use a plain write instead of clear_bit(),
3924 * and we dont need an smp_mb() memory barrier.
3926 list_del(&napi
->poll_list
);
3929 quota
= work
+ qlen
;
3939 * __napi_schedule - schedule for receive
3940 * @n: entry to schedule
3942 * The entry's receive function will be scheduled to run
3944 void __napi_schedule(struct napi_struct
*n
)
3946 unsigned long flags
;
3948 local_irq_save(flags
);
3949 ____napi_schedule(&__get_cpu_var(softnet_data
), n
);
3950 local_irq_restore(flags
);
3952 EXPORT_SYMBOL(__napi_schedule
);
3954 void __napi_complete(struct napi_struct
*n
)
3956 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
3957 BUG_ON(n
->gro_list
);
3959 list_del(&n
->poll_list
);
3960 smp_mb__before_clear_bit();
3961 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
3963 EXPORT_SYMBOL(__napi_complete
);
3965 void napi_complete(struct napi_struct
*n
)
3967 unsigned long flags
;
3970 * don't let napi dequeue from the cpu poll list
3971 * just in case its running on a different cpu
3973 if (unlikely(test_bit(NAPI_STATE_NPSVC
, &n
->state
)))
3976 napi_gro_flush(n
, false);
3977 local_irq_save(flags
);
3979 local_irq_restore(flags
);
3981 EXPORT_SYMBOL(napi_complete
);
3983 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
3984 int (*poll
)(struct napi_struct
*, int), int weight
)
3986 INIT_LIST_HEAD(&napi
->poll_list
);
3987 napi
->gro_count
= 0;
3988 napi
->gro_list
= NULL
;
3991 napi
->weight
= weight
;
3992 list_add(&napi
->dev_list
, &dev
->napi_list
);
3994 #ifdef CONFIG_NETPOLL
3995 spin_lock_init(&napi
->poll_lock
);
3996 napi
->poll_owner
= -1;
3998 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
4000 EXPORT_SYMBOL(netif_napi_add
);
4002 void netif_napi_del(struct napi_struct
*napi
)
4004 struct sk_buff
*skb
, *next
;
4006 list_del_init(&napi
->dev_list
);
4007 napi_free_frags(napi
);
4009 for (skb
= napi
->gro_list
; skb
; skb
= next
) {
4015 napi
->gro_list
= NULL
;
4016 napi
->gro_count
= 0;
4018 EXPORT_SYMBOL(netif_napi_del
);
4020 static void net_rx_action(struct softirq_action
*h
)
4022 struct softnet_data
*sd
= &__get_cpu_var(softnet_data
);
4023 unsigned long time_limit
= jiffies
+ 2;
4024 int budget
= netdev_budget
;
4027 local_irq_disable();
4029 while (!list_empty(&sd
->poll_list
)) {
4030 struct napi_struct
*n
;
4033 /* If softirq window is exhuasted then punt.
4034 * Allow this to run for 2 jiffies since which will allow
4035 * an average latency of 1.5/HZ.
4037 if (unlikely(budget
<= 0 || time_after(jiffies
, time_limit
)))
4042 /* Even though interrupts have been re-enabled, this
4043 * access is safe because interrupts can only add new
4044 * entries to the tail of this list, and only ->poll()
4045 * calls can remove this head entry from the list.
4047 n
= list_first_entry(&sd
->poll_list
, struct napi_struct
, poll_list
);
4049 have
= netpoll_poll_lock(n
);
4053 /* This NAPI_STATE_SCHED test is for avoiding a race
4054 * with netpoll's poll_napi(). Only the entity which
4055 * obtains the lock and sees NAPI_STATE_SCHED set will
4056 * actually make the ->poll() call. Therefore we avoid
4057 * accidentally calling ->poll() when NAPI is not scheduled.
4060 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
4061 work
= n
->poll(n
, weight
);
4065 WARN_ON_ONCE(work
> weight
);
4069 local_irq_disable();
4071 /* Drivers must not modify the NAPI state if they
4072 * consume the entire weight. In such cases this code
4073 * still "owns" the NAPI instance and therefore can
4074 * move the instance around on the list at-will.
4076 if (unlikely(work
== weight
)) {
4077 if (unlikely(napi_disable_pending(n
))) {
4080 local_irq_disable();
4083 /* flush too old packets
4084 * If HZ < 1000, flush all packets.
4087 napi_gro_flush(n
, HZ
>= 1000);
4088 local_irq_disable();
4090 list_move_tail(&n
->poll_list
, &sd
->poll_list
);
4094 netpoll_poll_unlock(have
);
4097 net_rps_action_and_irq_enable(sd
);
4099 #ifdef CONFIG_NET_DMA
4101 * There may not be any more sk_buffs coming right now, so push
4102 * any pending DMA copies to hardware
4104 dma_issue_pending_all();
4111 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4115 static gifconf_func_t
*gifconf_list
[NPROTO
];
4118 * register_gifconf - register a SIOCGIF handler
4119 * @family: Address family
4120 * @gifconf: Function handler
4122 * Register protocol dependent address dumping routines. The handler
4123 * that is passed must not be freed or reused until it has been replaced
4124 * by another handler.
4126 int register_gifconf(unsigned int family
, gifconf_func_t
*gifconf
)
4128 if (family
>= NPROTO
)
4130 gifconf_list
[family
] = gifconf
;
4133 EXPORT_SYMBOL(register_gifconf
);
4137 * Map an interface index to its name (SIOCGIFNAME)
4141 * We need this ioctl for efficient implementation of the
4142 * if_indextoname() function required by the IPv6 API. Without
4143 * it, we would have to search all the interfaces to find a
4147 static int dev_ifname(struct net
*net
, struct ifreq __user
*arg
)
4149 struct net_device
*dev
;
4153 * Fetch the caller's info block.
4156 if (copy_from_user(&ifr
, arg
, sizeof(struct ifreq
)))
4160 dev
= dev_get_by_index_rcu(net
, ifr
.ifr_ifindex
);
4166 strcpy(ifr
.ifr_name
, dev
->name
);
4169 if (copy_to_user(arg
, &ifr
, sizeof(struct ifreq
)))
4175 * Perform a SIOCGIFCONF call. This structure will change
4176 * size eventually, and there is nothing I can do about it.
4177 * Thus we will need a 'compatibility mode'.
4180 static int dev_ifconf(struct net
*net
, char __user
*arg
)
4183 struct net_device
*dev
;
4190 * Fetch the caller's info block.
4193 if (copy_from_user(&ifc
, arg
, sizeof(struct ifconf
)))
4200 * Loop over the interfaces, and write an info block for each.
4204 for_each_netdev(net
, dev
) {
4205 for (i
= 0; i
< NPROTO
; i
++) {
4206 if (gifconf_list
[i
]) {
4209 done
= gifconf_list
[i
](dev
, NULL
, 0);
4211 done
= gifconf_list
[i
](dev
, pos
+ total
,
4221 * All done. Write the updated control block back to the caller.
4223 ifc
.ifc_len
= total
;
4226 * Both BSD and Solaris return 0 here, so we do too.
4228 return copy_to_user(arg
, &ifc
, sizeof(struct ifconf
)) ? -EFAULT
: 0;
4231 #ifdef CONFIG_PROC_FS
4233 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4235 #define get_bucket(x) ((x) >> BUCKET_SPACE)
4236 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4237 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4239 static inline struct net_device
*dev_from_same_bucket(struct seq_file
*seq
, loff_t
*pos
)
4241 struct net
*net
= seq_file_net(seq
);
4242 struct net_device
*dev
;
4243 struct hlist_node
*p
;
4244 struct hlist_head
*h
;
4245 unsigned int count
= 0, offset
= get_offset(*pos
);
4247 h
= &net
->dev_name_head
[get_bucket(*pos
)];
4248 hlist_for_each_entry_rcu(dev
, p
, h
, name_hlist
) {
4249 if (++count
== offset
)
4256 static inline struct net_device
*dev_from_bucket(struct seq_file
*seq
, loff_t
*pos
)
4258 struct net_device
*dev
;
4259 unsigned int bucket
;
4262 dev
= dev_from_same_bucket(seq
, pos
);
4266 bucket
= get_bucket(*pos
) + 1;
4267 *pos
= set_bucket_offset(bucket
, 1);
4268 } while (bucket
< NETDEV_HASHENTRIES
);
4274 * This is invoked by the /proc filesystem handler to display a device
4277 void *dev_seq_start(struct seq_file
*seq
, loff_t
*pos
)
4282 return SEQ_START_TOKEN
;
4284 if (get_bucket(*pos
) >= NETDEV_HASHENTRIES
)
4287 return dev_from_bucket(seq
, pos
);
4290 void *dev_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
4293 return dev_from_bucket(seq
, pos
);
4296 void dev_seq_stop(struct seq_file
*seq
, void *v
)
4302 static void dev_seq_printf_stats(struct seq_file
*seq
, struct net_device
*dev
)
4304 struct rtnl_link_stats64 temp
;
4305 const struct rtnl_link_stats64
*stats
= dev_get_stats(dev
, &temp
);
4307 seq_printf(seq
, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4308 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4309 dev
->name
, stats
->rx_bytes
, stats
->rx_packets
,
4311 stats
->rx_dropped
+ stats
->rx_missed_errors
,
4312 stats
->rx_fifo_errors
,
4313 stats
->rx_length_errors
+ stats
->rx_over_errors
+
4314 stats
->rx_crc_errors
+ stats
->rx_frame_errors
,
4315 stats
->rx_compressed
, stats
->multicast
,
4316 stats
->tx_bytes
, stats
->tx_packets
,
4317 stats
->tx_errors
, stats
->tx_dropped
,
4318 stats
->tx_fifo_errors
, stats
->collisions
,
4319 stats
->tx_carrier_errors
+
4320 stats
->tx_aborted_errors
+
4321 stats
->tx_window_errors
+
4322 stats
->tx_heartbeat_errors
,
4323 stats
->tx_compressed
);
4327 * Called from the PROCfs module. This now uses the new arbitrary sized
4328 * /proc/net interface to create /proc/net/dev
4330 static int dev_seq_show(struct seq_file
*seq
, void *v
)
4332 if (v
== SEQ_START_TOKEN
)
4333 seq_puts(seq
, "Inter-| Receive "
4335 " face |bytes packets errs drop fifo frame "
4336 "compressed multicast|bytes packets errs "
4337 "drop fifo colls carrier compressed\n");
4339 dev_seq_printf_stats(seq
, v
);
4343 static struct softnet_data
*softnet_get_online(loff_t
*pos
)
4345 struct softnet_data
*sd
= NULL
;
4347 while (*pos
< nr_cpu_ids
)
4348 if (cpu_online(*pos
)) {
4349 sd
= &per_cpu(softnet_data
, *pos
);
4356 static void *softnet_seq_start(struct seq_file
*seq
, loff_t
*pos
)
4358 return softnet_get_online(pos
);
4361 static void *softnet_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
4364 return softnet_get_online(pos
);
4367 static void softnet_seq_stop(struct seq_file
*seq
, void *v
)
4371 static int softnet_seq_show(struct seq_file
*seq
, void *v
)
4373 struct softnet_data
*sd
= v
;
4375 seq_printf(seq
, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
4376 sd
->processed
, sd
->dropped
, sd
->time_squeeze
, 0,
4377 0, 0, 0, 0, /* was fastroute */
4378 sd
->cpu_collision
, sd
->received_rps
);
4382 static const struct seq_operations dev_seq_ops
= {
4383 .start
= dev_seq_start
,
4384 .next
= dev_seq_next
,
4385 .stop
= dev_seq_stop
,
4386 .show
= dev_seq_show
,
4389 static int dev_seq_open(struct inode
*inode
, struct file
*file
)
4391 return seq_open_net(inode
, file
, &dev_seq_ops
,
4392 sizeof(struct seq_net_private
));
4395 static const struct file_operations dev_seq_fops
= {
4396 .owner
= THIS_MODULE
,
4397 .open
= dev_seq_open
,
4399 .llseek
= seq_lseek
,
4400 .release
= seq_release_net
,
4403 static const struct seq_operations softnet_seq_ops
= {
4404 .start
= softnet_seq_start
,
4405 .next
= softnet_seq_next
,
4406 .stop
= softnet_seq_stop
,
4407 .show
= softnet_seq_show
,
4410 static int softnet_seq_open(struct inode
*inode
, struct file
*file
)
4412 return seq_open(file
, &softnet_seq_ops
);
4415 static const struct file_operations softnet_seq_fops
= {
4416 .owner
= THIS_MODULE
,
4417 .open
= softnet_seq_open
,
4419 .llseek
= seq_lseek
,
4420 .release
= seq_release
,
4423 static void *ptype_get_idx(loff_t pos
)
4425 struct packet_type
*pt
= NULL
;
4429 list_for_each_entry_rcu(pt
, &ptype_all
, list
) {
4435 for (t
= 0; t
< PTYPE_HASH_SIZE
; t
++) {
4436 list_for_each_entry_rcu(pt
, &ptype_base
[t
], list
) {
4445 static void *ptype_seq_start(struct seq_file
*seq
, loff_t
*pos
)
4449 return *pos
? ptype_get_idx(*pos
- 1) : SEQ_START_TOKEN
;
4452 static void *ptype_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
4454 struct packet_type
*pt
;
4455 struct list_head
*nxt
;
4459 if (v
== SEQ_START_TOKEN
)
4460 return ptype_get_idx(0);
4463 nxt
= pt
->list
.next
;
4464 if (pt
->type
== htons(ETH_P_ALL
)) {
4465 if (nxt
!= &ptype_all
)
4468 nxt
= ptype_base
[0].next
;
4470 hash
= ntohs(pt
->type
) & PTYPE_HASH_MASK
;
4472 while (nxt
== &ptype_base
[hash
]) {
4473 if (++hash
>= PTYPE_HASH_SIZE
)
4475 nxt
= ptype_base
[hash
].next
;
4478 return list_entry(nxt
, struct packet_type
, list
);
4481 static void ptype_seq_stop(struct seq_file
*seq
, void *v
)
4487 static int ptype_seq_show(struct seq_file
*seq
, void *v
)
4489 struct packet_type
*pt
= v
;
4491 if (v
== SEQ_START_TOKEN
)
4492 seq_puts(seq
, "Type Device Function\n");
4493 else if (pt
->dev
== NULL
|| dev_net(pt
->dev
) == seq_file_net(seq
)) {
4494 if (pt
->type
== htons(ETH_P_ALL
))
4495 seq_puts(seq
, "ALL ");
4497 seq_printf(seq
, "%04x", ntohs(pt
->type
));
4499 seq_printf(seq
, " %-8s %pF\n",
4500 pt
->dev
? pt
->dev
->name
: "", pt
->func
);
4506 static const struct seq_operations ptype_seq_ops
= {
4507 .start
= ptype_seq_start
,
4508 .next
= ptype_seq_next
,
4509 .stop
= ptype_seq_stop
,
4510 .show
= ptype_seq_show
,
4513 static int ptype_seq_open(struct inode
*inode
, struct file
*file
)
4515 return seq_open_net(inode
, file
, &ptype_seq_ops
,
4516 sizeof(struct seq_net_private
));
4519 static const struct file_operations ptype_seq_fops
= {
4520 .owner
= THIS_MODULE
,
4521 .open
= ptype_seq_open
,
4523 .llseek
= seq_lseek
,
4524 .release
= seq_release_net
,
4528 static int __net_init
dev_proc_net_init(struct net
*net
)
4532 if (!proc_net_fops_create(net
, "dev", S_IRUGO
, &dev_seq_fops
))
4534 if (!proc_net_fops_create(net
, "softnet_stat", S_IRUGO
, &softnet_seq_fops
))
4536 if (!proc_net_fops_create(net
, "ptype", S_IRUGO
, &ptype_seq_fops
))
4539 if (wext_proc_init(net
))
4545 proc_net_remove(net
, "ptype");
4547 proc_net_remove(net
, "softnet_stat");
4549 proc_net_remove(net
, "dev");
4553 static void __net_exit
dev_proc_net_exit(struct net
*net
)
4555 wext_proc_exit(net
);
4557 proc_net_remove(net
, "ptype");
4558 proc_net_remove(net
, "softnet_stat");
4559 proc_net_remove(net
, "dev");
4562 static struct pernet_operations __net_initdata dev_proc_ops
= {
4563 .init
= dev_proc_net_init
,
4564 .exit
= dev_proc_net_exit
,
4567 static int __init
dev_proc_init(void)
4569 return register_pernet_subsys(&dev_proc_ops
);
4572 #define dev_proc_init() 0
4573 #endif /* CONFIG_PROC_FS */
4577 * netdev_set_master - set up master pointer
4578 * @slave: slave device
4579 * @master: new master device
4581 * Changes the master device of the slave. Pass %NULL to break the
4582 * bonding. The caller must hold the RTNL semaphore. On a failure
4583 * a negative errno code is returned. On success the reference counts
4584 * are adjusted and the function returns zero.
4586 int netdev_set_master(struct net_device
*slave
, struct net_device
*master
)
4588 struct net_device
*old
= slave
->master
;
4598 slave
->master
= master
;
4604 EXPORT_SYMBOL(netdev_set_master
);
4607 * netdev_set_bond_master - set up bonding master/slave pair
4608 * @slave: slave device
4609 * @master: new master device
4611 * Changes the master device of the slave. Pass %NULL to break the
4612 * bonding. The caller must hold the RTNL semaphore. On a failure
4613 * a negative errno code is returned. On success %RTM_NEWLINK is sent
4614 * to the routing socket and the function returns zero.
4616 int netdev_set_bond_master(struct net_device
*slave
, struct net_device
*master
)
4622 err
= netdev_set_master(slave
, master
);
4626 slave
->flags
|= IFF_SLAVE
;
4628 slave
->flags
&= ~IFF_SLAVE
;
4630 rtmsg_ifinfo(RTM_NEWLINK
, slave
, IFF_SLAVE
);
4633 EXPORT_SYMBOL(netdev_set_bond_master
);
4635 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
4637 const struct net_device_ops
*ops
= dev
->netdev_ops
;
4639 if ((dev
->flags
& IFF_UP
) && ops
->ndo_change_rx_flags
)
4640 ops
->ndo_change_rx_flags(dev
, flags
);
4643 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
)
4645 unsigned int old_flags
= dev
->flags
;
4651 dev
->flags
|= IFF_PROMISC
;
4652 dev
->promiscuity
+= inc
;
4653 if (dev
->promiscuity
== 0) {
4656 * If inc causes overflow, untouch promisc and return error.
4659 dev
->flags
&= ~IFF_PROMISC
;
4661 dev
->promiscuity
-= inc
;
4662 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4667 if (dev
->flags
!= old_flags
) {
4668 pr_info("device %s %s promiscuous mode\n",
4670 dev
->flags
& IFF_PROMISC
? "entered" : "left");
4671 if (audit_enabled
) {
4672 current_uid_gid(&uid
, &gid
);
4673 audit_log(current
->audit_context
, GFP_ATOMIC
,
4674 AUDIT_ANOM_PROMISCUOUS
,
4675 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4676 dev
->name
, (dev
->flags
& IFF_PROMISC
),
4677 (old_flags
& IFF_PROMISC
),
4678 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
4679 from_kuid(&init_user_ns
, uid
),
4680 from_kgid(&init_user_ns
, gid
),
4681 audit_get_sessionid(current
));
4684 dev_change_rx_flags(dev
, IFF_PROMISC
);
4690 * dev_set_promiscuity - update promiscuity count on a device
4694 * Add or remove promiscuity from a device. While the count in the device
4695 * remains above zero the interface remains promiscuous. Once it hits zero
4696 * the device reverts back to normal filtering operation. A negative inc
4697 * value is used to drop promiscuity on the device.
4698 * Return 0 if successful or a negative errno code on error.
4700 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
4702 unsigned int old_flags
= dev
->flags
;
4705 err
= __dev_set_promiscuity(dev
, inc
);
4708 if (dev
->flags
!= old_flags
)
4709 dev_set_rx_mode(dev
);
4712 EXPORT_SYMBOL(dev_set_promiscuity
);
4715 * dev_set_allmulti - update allmulti count on a device
4719 * Add or remove reception of all multicast frames to a device. While the
4720 * count in the device remains above zero the interface remains listening
4721 * to all interfaces. Once it hits zero the device reverts back to normal
4722 * filtering operation. A negative @inc value is used to drop the counter
4723 * when releasing a resource needing all multicasts.
4724 * Return 0 if successful or a negative errno code on error.
4727 int dev_set_allmulti(struct net_device
*dev
, int inc
)
4729 unsigned int old_flags
= dev
->flags
;
4733 dev
->flags
|= IFF_ALLMULTI
;
4734 dev
->allmulti
+= inc
;
4735 if (dev
->allmulti
== 0) {
4738 * If inc causes overflow, untouch allmulti and return error.
4741 dev
->flags
&= ~IFF_ALLMULTI
;
4743 dev
->allmulti
-= inc
;
4744 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4749 if (dev
->flags
^ old_flags
) {
4750 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
4751 dev_set_rx_mode(dev
);
4755 EXPORT_SYMBOL(dev_set_allmulti
);
4758 * Upload unicast and multicast address lists to device and
4759 * configure RX filtering. When the device doesn't support unicast
4760 * filtering it is put in promiscuous mode while unicast addresses
4763 void __dev_set_rx_mode(struct net_device
*dev
)
4765 const struct net_device_ops
*ops
= dev
->netdev_ops
;
4767 /* dev_open will call this function so the list will stay sane. */
4768 if (!(dev
->flags
&IFF_UP
))
4771 if (!netif_device_present(dev
))
4774 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
4775 /* Unicast addresses changes may only happen under the rtnl,
4776 * therefore calling __dev_set_promiscuity here is safe.
4778 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
4779 __dev_set_promiscuity(dev
, 1);
4780 dev
->uc_promisc
= true;
4781 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
4782 __dev_set_promiscuity(dev
, -1);
4783 dev
->uc_promisc
= false;
4787 if (ops
->ndo_set_rx_mode
)
4788 ops
->ndo_set_rx_mode(dev
);
4791 void dev_set_rx_mode(struct net_device
*dev
)
4793 netif_addr_lock_bh(dev
);
4794 __dev_set_rx_mode(dev
);
4795 netif_addr_unlock_bh(dev
);
4799 * dev_get_flags - get flags reported to userspace
4802 * Get the combination of flag bits exported through APIs to userspace.
4804 unsigned int dev_get_flags(const struct net_device
*dev
)
4808 flags
= (dev
->flags
& ~(IFF_PROMISC
|
4813 (dev
->gflags
& (IFF_PROMISC
|
4816 if (netif_running(dev
)) {
4817 if (netif_oper_up(dev
))
4818 flags
|= IFF_RUNNING
;
4819 if (netif_carrier_ok(dev
))
4820 flags
|= IFF_LOWER_UP
;
4821 if (netif_dormant(dev
))
4822 flags
|= IFF_DORMANT
;
4827 EXPORT_SYMBOL(dev_get_flags
);
4829 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
)
4831 unsigned int old_flags
= dev
->flags
;
4837 * Set the flags on our device.
4840 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
4841 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
4843 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
4847 * Load in the correct multicast list now the flags have changed.
4850 if ((old_flags
^ flags
) & IFF_MULTICAST
)
4851 dev_change_rx_flags(dev
, IFF_MULTICAST
);
4853 dev_set_rx_mode(dev
);
4856 * Have we downed the interface. We handle IFF_UP ourselves
4857 * according to user attempts to set it, rather than blindly
4862 if ((old_flags
^ flags
) & IFF_UP
) { /* Bit is different ? */
4863 ret
= ((old_flags
& IFF_UP
) ? __dev_close
: __dev_open
)(dev
);
4866 dev_set_rx_mode(dev
);
4869 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
4870 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
4872 dev
->gflags
^= IFF_PROMISC
;
4873 dev_set_promiscuity(dev
, inc
);
4876 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4877 is important. Some (broken) drivers set IFF_PROMISC, when
4878 IFF_ALLMULTI is requested not asking us and not reporting.
4880 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
4881 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
4883 dev
->gflags
^= IFF_ALLMULTI
;
4884 dev_set_allmulti(dev
, inc
);
4890 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
)
4892 unsigned int changes
= dev
->flags
^ old_flags
;
4894 if (changes
& IFF_UP
) {
4895 if (dev
->flags
& IFF_UP
)
4896 call_netdevice_notifiers(NETDEV_UP
, dev
);
4898 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
4901 if (dev
->flags
& IFF_UP
&&
4902 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
)))
4903 call_netdevice_notifiers(NETDEV_CHANGE
, dev
);
4907 * dev_change_flags - change device settings
4909 * @flags: device state flags
4911 * Change settings on device based state flags. The flags are
4912 * in the userspace exported format.
4914 int dev_change_flags(struct net_device
*dev
, unsigned int flags
)
4917 unsigned int changes
, old_flags
= dev
->flags
;
4919 ret
= __dev_change_flags(dev
, flags
);
4923 changes
= old_flags
^ dev
->flags
;
4925 rtmsg_ifinfo(RTM_NEWLINK
, dev
, changes
);
4927 __dev_notify_flags(dev
, old_flags
);
4930 EXPORT_SYMBOL(dev_change_flags
);
4933 * dev_set_mtu - Change maximum transfer unit
4935 * @new_mtu: new transfer unit
4937 * Change the maximum transfer size of the network device.
4939 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
4941 const struct net_device_ops
*ops
= dev
->netdev_ops
;
4944 if (new_mtu
== dev
->mtu
)
4947 /* MTU must be positive. */
4951 if (!netif_device_present(dev
))
4955 if (ops
->ndo_change_mtu
)
4956 err
= ops
->ndo_change_mtu(dev
, new_mtu
);
4960 if (!err
&& dev
->flags
& IFF_UP
)
4961 call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
4964 EXPORT_SYMBOL(dev_set_mtu
);
4967 * dev_set_group - Change group this device belongs to
4969 * @new_group: group this device should belong to
4971 void dev_set_group(struct net_device
*dev
, int new_group
)
4973 dev
->group
= new_group
;
4975 EXPORT_SYMBOL(dev_set_group
);
4978 * dev_set_mac_address - Change Media Access Control Address
4982 * Change the hardware (MAC) address of the device
4984 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
)
4986 const struct net_device_ops
*ops
= dev
->netdev_ops
;
4989 if (!ops
->ndo_set_mac_address
)
4991 if (sa
->sa_family
!= dev
->type
)
4993 if (!netif_device_present(dev
))
4995 err
= ops
->ndo_set_mac_address(dev
, sa
);
4997 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
4998 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
5001 EXPORT_SYMBOL(dev_set_mac_address
);
5004 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
5006 static int dev_ifsioc_locked(struct net
*net
, struct ifreq
*ifr
, unsigned int cmd
)
5009 struct net_device
*dev
= dev_get_by_name_rcu(net
, ifr
->ifr_name
);
5015 case SIOCGIFFLAGS
: /* Get interface flags */
5016 ifr
->ifr_flags
= (short) dev_get_flags(dev
);
5019 case SIOCGIFMETRIC
: /* Get the metric on the interface
5020 (currently unused) */
5021 ifr
->ifr_metric
= 0;
5024 case SIOCGIFMTU
: /* Get the MTU of a device */
5025 ifr
->ifr_mtu
= dev
->mtu
;
5030 memset(ifr
->ifr_hwaddr
.sa_data
, 0, sizeof ifr
->ifr_hwaddr
.sa_data
);
5032 memcpy(ifr
->ifr_hwaddr
.sa_data
, dev
->dev_addr
,
5033 min(sizeof ifr
->ifr_hwaddr
.sa_data
, (size_t) dev
->addr_len
));
5034 ifr
->ifr_hwaddr
.sa_family
= dev
->type
;
5042 ifr
->ifr_map
.mem_start
= dev
->mem_start
;
5043 ifr
->ifr_map
.mem_end
= dev
->mem_end
;
5044 ifr
->ifr_map
.base_addr
= dev
->base_addr
;
5045 ifr
->ifr_map
.irq
= dev
->irq
;
5046 ifr
->ifr_map
.dma
= dev
->dma
;
5047 ifr
->ifr_map
.port
= dev
->if_port
;
5051 ifr
->ifr_ifindex
= dev
->ifindex
;
5055 ifr
->ifr_qlen
= dev
->tx_queue_len
;
5059 /* dev_ioctl() should ensure this case
5071 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
5073 static int dev_ifsioc(struct net
*net
, struct ifreq
*ifr
, unsigned int cmd
)
5076 struct net_device
*dev
= __dev_get_by_name(net
, ifr
->ifr_name
);
5077 const struct net_device_ops
*ops
;
5082 ops
= dev
->netdev_ops
;
5085 case SIOCSIFFLAGS
: /* Set interface flags */
5086 return dev_change_flags(dev
, ifr
->ifr_flags
);
5088 case SIOCSIFMETRIC
: /* Set the metric on the interface
5089 (currently unused) */
5092 case SIOCSIFMTU
: /* Set the MTU of a device */
5093 return dev_set_mtu(dev
, ifr
->ifr_mtu
);
5096 return dev_set_mac_address(dev
, &ifr
->ifr_hwaddr
);
5098 case SIOCSIFHWBROADCAST
:
5099 if (ifr
->ifr_hwaddr
.sa_family
!= dev
->type
)
5101 memcpy(dev
->broadcast
, ifr
->ifr_hwaddr
.sa_data
,
5102 min(sizeof ifr
->ifr_hwaddr
.sa_data
, (size_t) dev
->addr_len
));
5103 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
5107 if (ops
->ndo_set_config
) {
5108 if (!netif_device_present(dev
))
5110 return ops
->ndo_set_config(dev
, &ifr
->ifr_map
);
5115 if (!ops
->ndo_set_rx_mode
||
5116 ifr
->ifr_hwaddr
.sa_family
!= AF_UNSPEC
)
5118 if (!netif_device_present(dev
))
5120 return dev_mc_add_global(dev
, ifr
->ifr_hwaddr
.sa_data
);
5123 if (!ops
->ndo_set_rx_mode
||
5124 ifr
->ifr_hwaddr
.sa_family
!= AF_UNSPEC
)
5126 if (!netif_device_present(dev
))
5128 return dev_mc_del_global(dev
, ifr
->ifr_hwaddr
.sa_data
);
5131 if (ifr
->ifr_qlen
< 0)
5133 dev
->tx_queue_len
= ifr
->ifr_qlen
;
5137 ifr
->ifr_newname
[IFNAMSIZ
-1] = '\0';
5138 return dev_change_name(dev
, ifr
->ifr_newname
);
5141 err
= net_hwtstamp_validate(ifr
);
5147 * Unknown or private ioctl
5150 if ((cmd
>= SIOCDEVPRIVATE
&&
5151 cmd
<= SIOCDEVPRIVATE
+ 15) ||
5152 cmd
== SIOCBONDENSLAVE
||
5153 cmd
== SIOCBONDRELEASE
||
5154 cmd
== SIOCBONDSETHWADDR
||
5155 cmd
== SIOCBONDSLAVEINFOQUERY
||
5156 cmd
== SIOCBONDINFOQUERY
||
5157 cmd
== SIOCBONDCHANGEACTIVE
||
5158 cmd
== SIOCGMIIPHY
||
5159 cmd
== SIOCGMIIREG
||
5160 cmd
== SIOCSMIIREG
||
5161 cmd
== SIOCBRADDIF
||
5162 cmd
== SIOCBRDELIF
||
5163 cmd
== SIOCSHWTSTAMP
||
5164 cmd
== SIOCWANDEV
) {
5166 if (ops
->ndo_do_ioctl
) {
5167 if (netif_device_present(dev
))
5168 err
= ops
->ndo_do_ioctl(dev
, ifr
, cmd
);
5180 * This function handles all "interface"-type I/O control requests. The actual
5181 * 'doing' part of this is dev_ifsioc above.
5185 * dev_ioctl - network device ioctl
5186 * @net: the applicable net namespace
5187 * @cmd: command to issue
5188 * @arg: pointer to a struct ifreq in user space
5190 * Issue ioctl functions to devices. This is normally called by the
5191 * user space syscall interfaces but can sometimes be useful for
5192 * other purposes. The return value is the return from the syscall if
5193 * positive or a negative errno code on error.
5196 int dev_ioctl(struct net
*net
, unsigned int cmd
, void __user
*arg
)
5202 /* One special case: SIOCGIFCONF takes ifconf argument
5203 and requires shared lock, because it sleeps writing
5207 if (cmd
== SIOCGIFCONF
) {
5209 ret
= dev_ifconf(net
, (char __user
*) arg
);
5213 if (cmd
== SIOCGIFNAME
)
5214 return dev_ifname(net
, (struct ifreq __user
*)arg
);
5216 if (copy_from_user(&ifr
, arg
, sizeof(struct ifreq
)))
5219 ifr
.ifr_name
[IFNAMSIZ
-1] = 0;
5221 colon
= strchr(ifr
.ifr_name
, ':');
5226 * See which interface the caller is talking about.
5231 * These ioctl calls:
5232 * - can be done by all.
5233 * - atomic and do not require locking.
5244 dev_load(net
, ifr
.ifr_name
);
5246 ret
= dev_ifsioc_locked(net
, &ifr
, cmd
);
5251 if (copy_to_user(arg
, &ifr
,
5252 sizeof(struct ifreq
)))
5258 dev_load(net
, ifr
.ifr_name
);
5260 ret
= dev_ethtool(net
, &ifr
);
5265 if (copy_to_user(arg
, &ifr
,
5266 sizeof(struct ifreq
)))
5272 * These ioctl calls:
5273 * - require superuser power.
5274 * - require strict serialization.
5280 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
5282 dev_load(net
, ifr
.ifr_name
);
5284 ret
= dev_ifsioc(net
, &ifr
, cmd
);
5289 if (copy_to_user(arg
, &ifr
,
5290 sizeof(struct ifreq
)))
5296 * These ioctl calls:
5297 * - require superuser power.
5298 * - require strict serialization.
5299 * - do not return a value
5303 if (!capable(CAP_NET_ADMIN
))
5307 * These ioctl calls:
5308 * - require local superuser power.
5309 * - require strict serialization.
5310 * - do not return a value
5319 case SIOCSIFHWBROADCAST
:
5321 case SIOCBONDENSLAVE
:
5322 case SIOCBONDRELEASE
:
5323 case SIOCBONDSETHWADDR
:
5324 case SIOCBONDCHANGEACTIVE
:
5328 if (!ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
5331 case SIOCBONDSLAVEINFOQUERY
:
5332 case SIOCBONDINFOQUERY
:
5333 dev_load(net
, ifr
.ifr_name
);
5335 ret
= dev_ifsioc(net
, &ifr
, cmd
);
5340 /* Get the per device memory space. We can add this but
5341 * currently do not support it */
5343 /* Set the per device memory buffer space.
5344 * Not applicable in our case */
5349 * Unknown or private ioctl.
5352 if (cmd
== SIOCWANDEV
||
5353 (cmd
>= SIOCDEVPRIVATE
&&
5354 cmd
<= SIOCDEVPRIVATE
+ 15)) {
5355 dev_load(net
, ifr
.ifr_name
);
5357 ret
= dev_ifsioc(net
, &ifr
, cmd
);
5359 if (!ret
&& copy_to_user(arg
, &ifr
,
5360 sizeof(struct ifreq
)))
5364 /* Take care of Wireless Extensions */
5365 if (cmd
>= SIOCIWFIRST
&& cmd
<= SIOCIWLAST
)
5366 return wext_handle_ioctl(net
, &ifr
, cmd
, arg
);
5373 * dev_new_index - allocate an ifindex
5374 * @net: the applicable net namespace
5376 * Returns a suitable unique value for a new device interface
5377 * number. The caller must hold the rtnl semaphore or the
5378 * dev_base_lock to be sure it remains unique.
5380 static int dev_new_index(struct net
*net
)
5382 int ifindex
= net
->ifindex
;
5386 if (!__dev_get_by_index(net
, ifindex
))
5387 return net
->ifindex
= ifindex
;
5391 /* Delayed registration/unregisteration */
5392 static LIST_HEAD(net_todo_list
);
5394 static void net_set_todo(struct net_device
*dev
)
5396 list_add_tail(&dev
->todo_list
, &net_todo_list
);
5399 static void rollback_registered_many(struct list_head
*head
)
5401 struct net_device
*dev
, *tmp
;
5403 BUG_ON(dev_boot_phase
);
5406 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
5407 /* Some devices call without registering
5408 * for initialization unwind. Remove those
5409 * devices and proceed with the remaining.
5411 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
5412 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5416 list_del(&dev
->unreg_list
);
5419 dev
->dismantle
= true;
5420 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
5423 /* If device is running, close it first. */
5424 dev_close_many(head
);
5426 list_for_each_entry(dev
, head
, unreg_list
) {
5427 /* And unlink it from device chain. */
5428 unlist_netdevice(dev
);
5430 dev
->reg_state
= NETREG_UNREGISTERING
;
5435 list_for_each_entry(dev
, head
, unreg_list
) {
5436 /* Shutdown queueing discipline. */
5440 /* Notify protocols, that we are about to destroy
5441 this device. They should clean all the things.
5443 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
5445 if (!dev
->rtnl_link_ops
||
5446 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
5447 rtmsg_ifinfo(RTM_DELLINK
, dev
, ~0U);
5450 * Flush the unicast and multicast chains
5455 if (dev
->netdev_ops
->ndo_uninit
)
5456 dev
->netdev_ops
->ndo_uninit(dev
);
5458 /* Notifier chain MUST detach us from master device. */
5459 WARN_ON(dev
->master
);
5461 /* Remove entries from kobject tree */
5462 netdev_unregister_kobject(dev
);
5467 list_for_each_entry(dev
, head
, unreg_list
)
5471 static void rollback_registered(struct net_device
*dev
)
5475 list_add(&dev
->unreg_list
, &single
);
5476 rollback_registered_many(&single
);
5480 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
5481 netdev_features_t features
)
5483 /* Fix illegal checksum combinations */
5484 if ((features
& NETIF_F_HW_CSUM
) &&
5485 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
5486 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
5487 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
5490 /* Fix illegal SG+CSUM combinations. */
5491 if ((features
& NETIF_F_SG
) &&
5492 !(features
& NETIF_F_ALL_CSUM
)) {
5494 "Dropping NETIF_F_SG since no checksum feature.\n");
5495 features
&= ~NETIF_F_SG
;
5498 /* TSO requires that SG is present as well. */
5499 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
5500 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
5501 features
&= ~NETIF_F_ALL_TSO
;
5504 /* TSO ECN requires that TSO is present as well. */
5505 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
5506 features
&= ~NETIF_F_TSO_ECN
;
5508 /* Software GSO depends on SG. */
5509 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
5510 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
5511 features
&= ~NETIF_F_GSO
;
5514 /* UFO needs SG and checksumming */
5515 if (features
& NETIF_F_UFO
) {
5516 /* maybe split UFO into V4 and V6? */
5517 if (!((features
& NETIF_F_GEN_CSUM
) ||
5518 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))
5519 == (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
5521 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5522 features
&= ~NETIF_F_UFO
;
5525 if (!(features
& NETIF_F_SG
)) {
5527 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5528 features
&= ~NETIF_F_UFO
;
5535 int __netdev_update_features(struct net_device
*dev
)
5537 netdev_features_t features
;
5542 features
= netdev_get_wanted_features(dev
);
5544 if (dev
->netdev_ops
->ndo_fix_features
)
5545 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
5547 /* driver might be less strict about feature dependencies */
5548 features
= netdev_fix_features(dev
, features
);
5550 if (dev
->features
== features
)
5553 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
5554 &dev
->features
, &features
);
5556 if (dev
->netdev_ops
->ndo_set_features
)
5557 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
5559 if (unlikely(err
< 0)) {
5561 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5562 err
, &features
, &dev
->features
);
5567 dev
->features
= features
;
5573 * netdev_update_features - recalculate device features
5574 * @dev: the device to check
5576 * Recalculate dev->features set and send notifications if it
5577 * has changed. Should be called after driver or hardware dependent
5578 * conditions might have changed that influence the features.
5580 void netdev_update_features(struct net_device
*dev
)
5582 if (__netdev_update_features(dev
))
5583 netdev_features_change(dev
);
5585 EXPORT_SYMBOL(netdev_update_features
);
5588 * netdev_change_features - recalculate device features
5589 * @dev: the device to check
5591 * Recalculate dev->features set and send notifications even
5592 * if they have not changed. Should be called instead of
5593 * netdev_update_features() if also dev->vlan_features might
5594 * have changed to allow the changes to be propagated to stacked
5597 void netdev_change_features(struct net_device
*dev
)
5599 __netdev_update_features(dev
);
5600 netdev_features_change(dev
);
5602 EXPORT_SYMBOL(netdev_change_features
);
5605 * netif_stacked_transfer_operstate - transfer operstate
5606 * @rootdev: the root or lower level device to transfer state from
5607 * @dev: the device to transfer operstate to
5609 * Transfer operational state from root to device. This is normally
5610 * called when a stacking relationship exists between the root
5611 * device and the device(a leaf device).
5613 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
5614 struct net_device
*dev
)
5616 if (rootdev
->operstate
== IF_OPER_DORMANT
)
5617 netif_dormant_on(dev
);
5619 netif_dormant_off(dev
);
5621 if (netif_carrier_ok(rootdev
)) {
5622 if (!netif_carrier_ok(dev
))
5623 netif_carrier_on(dev
);
5625 if (netif_carrier_ok(dev
))
5626 netif_carrier_off(dev
);
5629 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
5632 static int netif_alloc_rx_queues(struct net_device
*dev
)
5634 unsigned int i
, count
= dev
->num_rx_queues
;
5635 struct netdev_rx_queue
*rx
;
5639 rx
= kcalloc(count
, sizeof(struct netdev_rx_queue
), GFP_KERNEL
);
5641 pr_err("netdev: Unable to allocate %u rx queues\n", count
);
5646 for (i
= 0; i
< count
; i
++)
5652 static void netdev_init_one_queue(struct net_device
*dev
,
5653 struct netdev_queue
*queue
, void *_unused
)
5655 /* Initialize queue lock */
5656 spin_lock_init(&queue
->_xmit_lock
);
5657 netdev_set_xmit_lockdep_class(&queue
->_xmit_lock
, dev
->type
);
5658 queue
->xmit_lock_owner
= -1;
5659 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
5662 dql_init(&queue
->dql
, HZ
);
5666 static int netif_alloc_netdev_queues(struct net_device
*dev
)
5668 unsigned int count
= dev
->num_tx_queues
;
5669 struct netdev_queue
*tx
;
5673 tx
= kcalloc(count
, sizeof(struct netdev_queue
), GFP_KERNEL
);
5675 pr_err("netdev: Unable to allocate %u tx queues\n", count
);
5680 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
5681 spin_lock_init(&dev
->tx_global_lock
);
5687 * register_netdevice - register a network device
5688 * @dev: device to register
5690 * Take a completed network device structure and add it to the kernel
5691 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5692 * chain. 0 is returned on success. A negative errno code is returned
5693 * on a failure to set up the device, or if the name is a duplicate.
5695 * Callers must hold the rtnl semaphore. You may want
5696 * register_netdev() instead of this.
5699 * The locking appears insufficient to guarantee two parallel registers
5700 * will not get the same name.
5703 int register_netdevice(struct net_device
*dev
)
5706 struct net
*net
= dev_net(dev
);
5708 BUG_ON(dev_boot_phase
);
5713 /* When net_device's are persistent, this will be fatal. */
5714 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
5717 spin_lock_init(&dev
->addr_list_lock
);
5718 netdev_set_addr_lockdep_class(dev
);
5722 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
5726 /* Init, if this function is available */
5727 if (dev
->netdev_ops
->ndo_init
) {
5728 ret
= dev
->netdev_ops
->ndo_init(dev
);
5738 dev
->ifindex
= dev_new_index(net
);
5739 else if (__dev_get_by_index(net
, dev
->ifindex
))
5742 if (dev
->iflink
== -1)
5743 dev
->iflink
= dev
->ifindex
;
5745 /* Transfer changeable features to wanted_features and enable
5746 * software offloads (GSO and GRO).
5748 dev
->hw_features
|= NETIF_F_SOFT_FEATURES
;
5749 dev
->features
|= NETIF_F_SOFT_FEATURES
;
5750 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
5752 /* Turn on no cache copy if HW is doing checksum */
5753 if (!(dev
->flags
& IFF_LOOPBACK
)) {
5754 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
5755 if (dev
->features
& NETIF_F_ALL_CSUM
) {
5756 dev
->wanted_features
|= NETIF_F_NOCACHE_COPY
;
5757 dev
->features
|= NETIF_F_NOCACHE_COPY
;
5761 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5763 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
5765 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
5766 ret
= notifier_to_errno(ret
);
5770 ret
= netdev_register_kobject(dev
);
5773 dev
->reg_state
= NETREG_REGISTERED
;
5775 __netdev_update_features(dev
);
5778 * Default initial state at registry is that the
5779 * device is present.
5782 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
5784 linkwatch_init_dev(dev
);
5786 dev_init_scheduler(dev
);
5788 list_netdevice(dev
);
5789 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
5791 /* Notify protocols, that a new device appeared. */
5792 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
5793 ret
= notifier_to_errno(ret
);
5795 rollback_registered(dev
);
5796 dev
->reg_state
= NETREG_UNREGISTERED
;
5799 * Prevent userspace races by waiting until the network
5800 * device is fully setup before sending notifications.
5802 if (!dev
->rtnl_link_ops
||
5803 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
5804 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U);
5810 if (dev
->netdev_ops
->ndo_uninit
)
5811 dev
->netdev_ops
->ndo_uninit(dev
);
5814 EXPORT_SYMBOL(register_netdevice
);
5817 * init_dummy_netdev - init a dummy network device for NAPI
5818 * @dev: device to init
5820 * This takes a network device structure and initialize the minimum
5821 * amount of fields so it can be used to schedule NAPI polls without
5822 * registering a full blown interface. This is to be used by drivers
5823 * that need to tie several hardware interfaces to a single NAPI
5824 * poll scheduler due to HW limitations.
5826 int init_dummy_netdev(struct net_device
*dev
)
5828 /* Clear everything. Note we don't initialize spinlocks
5829 * are they aren't supposed to be taken by any of the
5830 * NAPI code and this dummy netdev is supposed to be
5831 * only ever used for NAPI polls
5833 memset(dev
, 0, sizeof(struct net_device
));
5835 /* make sure we BUG if trying to hit standard
5836 * register/unregister code path
5838 dev
->reg_state
= NETREG_DUMMY
;
5840 /* NAPI wants this */
5841 INIT_LIST_HEAD(&dev
->napi_list
);
5843 /* a dummy interface is started by default */
5844 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
5845 set_bit(__LINK_STATE_START
, &dev
->state
);
5847 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5848 * because users of this 'device' dont need to change
5854 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
5858 * register_netdev - register a network device
5859 * @dev: device to register
5861 * Take a completed network device structure and add it to the kernel
5862 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5863 * chain. 0 is returned on success. A negative errno code is returned
5864 * on a failure to set up the device, or if the name is a duplicate.
5866 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5867 * and expands the device name if you passed a format string to
5870 int register_netdev(struct net_device
*dev
)
5875 err
= register_netdevice(dev
);
5879 EXPORT_SYMBOL(register_netdev
);
5881 int netdev_refcnt_read(const struct net_device
*dev
)
5885 for_each_possible_cpu(i
)
5886 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
5889 EXPORT_SYMBOL(netdev_refcnt_read
);
5892 * netdev_wait_allrefs - wait until all references are gone.
5893 * @dev: target net_device
5895 * This is called when unregistering network devices.
5897 * Any protocol or device that holds a reference should register
5898 * for netdevice notification, and cleanup and put back the
5899 * reference if they receive an UNREGISTER event.
5900 * We can get stuck here if buggy protocols don't correctly
5903 static void netdev_wait_allrefs(struct net_device
*dev
)
5905 unsigned long rebroadcast_time
, warning_time
;
5908 linkwatch_forget_dev(dev
);
5910 rebroadcast_time
= warning_time
= jiffies
;
5911 refcnt
= netdev_refcnt_read(dev
);
5913 while (refcnt
!= 0) {
5914 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
5917 /* Rebroadcast unregister notification */
5918 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
5924 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
5925 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
5927 /* We must not have linkwatch events
5928 * pending on unregister. If this
5929 * happens, we simply run the queue
5930 * unscheduled, resulting in a noop
5933 linkwatch_run_queue();
5938 rebroadcast_time
= jiffies
;
5943 refcnt
= netdev_refcnt_read(dev
);
5945 if (time_after(jiffies
, warning_time
+ 10 * HZ
)) {
5946 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5948 warning_time
= jiffies
;
5957 * register_netdevice(x1);
5958 * register_netdevice(x2);
5960 * unregister_netdevice(y1);
5961 * unregister_netdevice(y2);
5967 * We are invoked by rtnl_unlock().
5968 * This allows us to deal with problems:
5969 * 1) We can delete sysfs objects which invoke hotplug
5970 * without deadlocking with linkwatch via keventd.
5971 * 2) Since we run with the RTNL semaphore not held, we can sleep
5972 * safely in order to wait for the netdev refcnt to drop to zero.
5974 * We must not return until all unregister events added during
5975 * the interval the lock was held have been completed.
5977 void netdev_run_todo(void)
5979 struct list_head list
;
5981 /* Snapshot list, allow later requests */
5982 list_replace_init(&net_todo_list
, &list
);
5987 /* Wait for rcu callbacks to finish before next phase */
5988 if (!list_empty(&list
))
5991 while (!list_empty(&list
)) {
5992 struct net_device
*dev
5993 = list_first_entry(&list
, struct net_device
, todo_list
);
5994 list_del(&dev
->todo_list
);
5997 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
6000 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
6001 pr_err("network todo '%s' but state %d\n",
6002 dev
->name
, dev
->reg_state
);
6007 dev
->reg_state
= NETREG_UNREGISTERED
;
6009 on_each_cpu(flush_backlog
, dev
, 1);
6011 netdev_wait_allrefs(dev
);
6014 BUG_ON(netdev_refcnt_read(dev
));
6015 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
6016 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
6017 WARN_ON(dev
->dn_ptr
);
6019 if (dev
->destructor
)
6020 dev
->destructor(dev
);
6022 /* Free network device */
6023 kobject_put(&dev
->dev
.kobj
);
6027 /* Convert net_device_stats to rtnl_link_stats64. They have the same
6028 * fields in the same order, with only the type differing.
6030 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
6031 const struct net_device_stats
*netdev_stats
)
6033 #if BITS_PER_LONG == 64
6034 BUILD_BUG_ON(sizeof(*stats64
) != sizeof(*netdev_stats
));
6035 memcpy(stats64
, netdev_stats
, sizeof(*stats64
));
6037 size_t i
, n
= sizeof(*stats64
) / sizeof(u64
);
6038 const unsigned long *src
= (const unsigned long *)netdev_stats
;
6039 u64
*dst
= (u64
*)stats64
;
6041 BUILD_BUG_ON(sizeof(*netdev_stats
) / sizeof(unsigned long) !=
6042 sizeof(*stats64
) / sizeof(u64
));
6043 for (i
= 0; i
< n
; i
++)
6047 EXPORT_SYMBOL(netdev_stats_to_stats64
);
6050 * dev_get_stats - get network device statistics
6051 * @dev: device to get statistics from
6052 * @storage: place to store stats
6054 * Get network statistics from device. Return @storage.
6055 * The device driver may provide its own method by setting
6056 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6057 * otherwise the internal statistics structure is used.
6059 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
6060 struct rtnl_link_stats64
*storage
)
6062 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6064 if (ops
->ndo_get_stats64
) {
6065 memset(storage
, 0, sizeof(*storage
));
6066 ops
->ndo_get_stats64(dev
, storage
);
6067 } else if (ops
->ndo_get_stats
) {
6068 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
6070 netdev_stats_to_stats64(storage
, &dev
->stats
);
6072 storage
->rx_dropped
+= atomic_long_read(&dev
->rx_dropped
);
6075 EXPORT_SYMBOL(dev_get_stats
);
6077 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
6079 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
6081 #ifdef CONFIG_NET_CLS_ACT
6084 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
6087 netdev_init_one_queue(dev
, queue
, NULL
);
6088 queue
->qdisc
= &noop_qdisc
;
6089 queue
->qdisc_sleeping
= &noop_qdisc
;
6090 rcu_assign_pointer(dev
->ingress_queue
, queue
);
6095 static const struct ethtool_ops default_ethtool_ops
;
6098 * alloc_netdev_mqs - allocate network device
6099 * @sizeof_priv: size of private data to allocate space for
6100 * @name: device name format string
6101 * @setup: callback to initialize device
6102 * @txqs: the number of TX subqueues to allocate
6103 * @rxqs: the number of RX subqueues to allocate
6105 * Allocates a struct net_device with private data area for driver use
6106 * and performs basic initialization. Also allocates subquue structs
6107 * for each queue on the device.
6109 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
6110 void (*setup
)(struct net_device
*),
6111 unsigned int txqs
, unsigned int rxqs
)
6113 struct net_device
*dev
;
6115 struct net_device
*p
;
6117 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
6120 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6126 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6131 alloc_size
= sizeof(struct net_device
);
6133 /* ensure 32-byte alignment of private area */
6134 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
6135 alloc_size
+= sizeof_priv
;
6137 /* ensure 32-byte alignment of whole construct */
6138 alloc_size
+= NETDEV_ALIGN
- 1;
6140 p
= kzalloc(alloc_size
, GFP_KERNEL
);
6142 pr_err("alloc_netdev: Unable to allocate device\n");
6146 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
6147 dev
->padded
= (char *)dev
- (char *)p
;
6149 dev
->pcpu_refcnt
= alloc_percpu(int);
6150 if (!dev
->pcpu_refcnt
)
6153 if (dev_addr_init(dev
))
6159 dev_net_set(dev
, &init_net
);
6161 dev
->gso_max_size
= GSO_MAX_SIZE
;
6162 dev
->gso_max_segs
= GSO_MAX_SEGS
;
6164 INIT_LIST_HEAD(&dev
->napi_list
);
6165 INIT_LIST_HEAD(&dev
->unreg_list
);
6166 INIT_LIST_HEAD(&dev
->link_watch_list
);
6167 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
;
6170 dev
->num_tx_queues
= txqs
;
6171 dev
->real_num_tx_queues
= txqs
;
6172 if (netif_alloc_netdev_queues(dev
))
6176 dev
->num_rx_queues
= rxqs
;
6177 dev
->real_num_rx_queues
= rxqs
;
6178 if (netif_alloc_rx_queues(dev
))
6182 strcpy(dev
->name
, name
);
6183 dev
->group
= INIT_NETDEV_GROUP
;
6184 if (!dev
->ethtool_ops
)
6185 dev
->ethtool_ops
= &default_ethtool_ops
;
6193 free_percpu(dev
->pcpu_refcnt
);
6203 EXPORT_SYMBOL(alloc_netdev_mqs
);
6206 * free_netdev - free network device
6209 * This function does the last stage of destroying an allocated device
6210 * interface. The reference to the device object is released.
6211 * If this is the last reference then it will be freed.
6213 void free_netdev(struct net_device
*dev
)
6215 struct napi_struct
*p
, *n
;
6217 release_net(dev_net(dev
));
6224 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
6226 /* Flush device addresses */
6227 dev_addr_flush(dev
);
6229 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
6232 free_percpu(dev
->pcpu_refcnt
);
6233 dev
->pcpu_refcnt
= NULL
;
6235 /* Compatibility with error handling in drivers */
6236 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
6237 kfree((char *)dev
- dev
->padded
);
6241 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
6242 dev
->reg_state
= NETREG_RELEASED
;
6244 /* will free via device release */
6245 put_device(&dev
->dev
);
6247 EXPORT_SYMBOL(free_netdev
);
6250 * synchronize_net - Synchronize with packet receive processing
6252 * Wait for packets currently being received to be done.
6253 * Does not block later packets from starting.
6255 void synchronize_net(void)
6258 if (rtnl_is_locked())
6259 synchronize_rcu_expedited();
6263 EXPORT_SYMBOL(synchronize_net
);
6266 * unregister_netdevice_queue - remove device from the kernel
6270 * This function shuts down a device interface and removes it
6271 * from the kernel tables.
6272 * If head not NULL, device is queued to be unregistered later.
6274 * Callers must hold the rtnl semaphore. You may want
6275 * unregister_netdev() instead of this.
6278 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
6283 list_move_tail(&dev
->unreg_list
, head
);
6285 rollback_registered(dev
);
6286 /* Finish processing unregister after unlock */
6290 EXPORT_SYMBOL(unregister_netdevice_queue
);
6293 * unregister_netdevice_many - unregister many devices
6294 * @head: list of devices
6296 void unregister_netdevice_many(struct list_head
*head
)
6298 struct net_device
*dev
;
6300 if (!list_empty(head
)) {
6301 rollback_registered_many(head
);
6302 list_for_each_entry(dev
, head
, unreg_list
)
6306 EXPORT_SYMBOL(unregister_netdevice_many
);
6309 * unregister_netdev - remove device from the kernel
6312 * This function shuts down a device interface and removes it
6313 * from the kernel tables.
6315 * This is just a wrapper for unregister_netdevice that takes
6316 * the rtnl semaphore. In general you want to use this and not
6317 * unregister_netdevice.
6319 void unregister_netdev(struct net_device
*dev
)
6322 unregister_netdevice(dev
);
6325 EXPORT_SYMBOL(unregister_netdev
);
6328 * dev_change_net_namespace - move device to different nethost namespace
6330 * @net: network namespace
6331 * @pat: If not NULL name pattern to try if the current device name
6332 * is already taken in the destination network namespace.
6334 * This function shuts down a device interface and moves it
6335 * to a new network namespace. On success 0 is returned, on
6336 * a failure a netagive errno code is returned.
6338 * Callers must hold the rtnl semaphore.
6341 int dev_change_net_namespace(struct net_device
*dev
, struct net
*net
, const char *pat
)
6347 /* Don't allow namespace local devices to be moved. */
6349 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
6352 /* Ensure the device has been registrered */
6353 if (dev
->reg_state
!= NETREG_REGISTERED
)
6356 /* Get out if there is nothing todo */
6358 if (net_eq(dev_net(dev
), net
))
6361 /* Pick the destination device name, and ensure
6362 * we can use it in the destination network namespace.
6365 if (__dev_get_by_name(net
, dev
->name
)) {
6366 /* We get here if we can't use the current device name */
6369 if (dev_get_valid_name(net
, dev
, pat
) < 0)
6374 * And now a mini version of register_netdevice unregister_netdevice.
6377 /* If device is running close it first. */
6380 /* And unlink it from device chain */
6382 unlist_netdevice(dev
);
6386 /* Shutdown queueing discipline. */
6389 /* Notify protocols, that we are about to destroy
6390 this device. They should clean all the things.
6392 Note that dev->reg_state stays at NETREG_REGISTERED.
6393 This is wanted because this way 8021q and macvlan know
6394 the device is just moving and can keep their slaves up.
6396 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
6398 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
6399 rtmsg_ifinfo(RTM_DELLINK
, dev
, ~0U);
6402 * Flush the unicast and multicast chains
6407 /* Actually switch the network namespace */
6408 dev_net_set(dev
, net
);
6410 /* If there is an ifindex conflict assign a new one */
6411 if (__dev_get_by_index(net
, dev
->ifindex
)) {
6412 int iflink
= (dev
->iflink
== dev
->ifindex
);
6413 dev
->ifindex
= dev_new_index(net
);
6415 dev
->iflink
= dev
->ifindex
;
6418 /* Fixup kobjects */
6419 err
= device_rename(&dev
->dev
, dev
->name
);
6422 /* Add the device back in the hashes */
6423 list_netdevice(dev
);
6425 /* Notify protocols, that a new device appeared. */
6426 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
6429 * Prevent userspace races by waiting until the network
6430 * device is fully setup before sending notifications.
6432 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U);
6439 EXPORT_SYMBOL_GPL(dev_change_net_namespace
);
6441 static int dev_cpu_callback(struct notifier_block
*nfb
,
6442 unsigned long action
,
6445 struct sk_buff
**list_skb
;
6446 struct sk_buff
*skb
;
6447 unsigned int cpu
, oldcpu
= (unsigned long)ocpu
;
6448 struct softnet_data
*sd
, *oldsd
;
6450 if (action
!= CPU_DEAD
&& action
!= CPU_DEAD_FROZEN
)
6453 local_irq_disable();
6454 cpu
= smp_processor_id();
6455 sd
= &per_cpu(softnet_data
, cpu
);
6456 oldsd
= &per_cpu(softnet_data
, oldcpu
);
6458 /* Find end of our completion_queue. */
6459 list_skb
= &sd
->completion_queue
;
6461 list_skb
= &(*list_skb
)->next
;
6462 /* Append completion queue from offline CPU. */
6463 *list_skb
= oldsd
->completion_queue
;
6464 oldsd
->completion_queue
= NULL
;
6466 /* Append output queue from offline CPU. */
6467 if (oldsd
->output_queue
) {
6468 *sd
->output_queue_tailp
= oldsd
->output_queue
;
6469 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
6470 oldsd
->output_queue
= NULL
;
6471 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
6473 /* Append NAPI poll list from offline CPU. */
6474 if (!list_empty(&oldsd
->poll_list
)) {
6475 list_splice_init(&oldsd
->poll_list
, &sd
->poll_list
);
6476 raise_softirq_irqoff(NET_RX_SOFTIRQ
);
6479 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
6482 /* Process offline CPU's input_pkt_queue */
6483 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
6485 input_queue_head_incr(oldsd
);
6487 while ((skb
= __skb_dequeue(&oldsd
->input_pkt_queue
))) {
6489 input_queue_head_incr(oldsd
);
6497 * netdev_increment_features - increment feature set by one
6498 * @all: current feature set
6499 * @one: new feature set
6500 * @mask: mask feature set
6502 * Computes a new feature set after adding a device with feature set
6503 * @one to the master device with current feature set @all. Will not
6504 * enable anything that is off in @mask. Returns the new feature set.
6506 netdev_features_t
netdev_increment_features(netdev_features_t all
,
6507 netdev_features_t one
, netdev_features_t mask
)
6509 if (mask
& NETIF_F_GEN_CSUM
)
6510 mask
|= NETIF_F_ALL_CSUM
;
6511 mask
|= NETIF_F_VLAN_CHALLENGED
;
6513 all
|= one
& (NETIF_F_ONE_FOR_ALL
|NETIF_F_ALL_CSUM
) & mask
;
6514 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
6516 /* If one device supports hw checksumming, set for all. */
6517 if (all
& NETIF_F_GEN_CSUM
)
6518 all
&= ~(NETIF_F_ALL_CSUM
& ~NETIF_F_GEN_CSUM
);
6522 EXPORT_SYMBOL(netdev_increment_features
);
6524 static struct hlist_head
*netdev_create_hash(void)
6527 struct hlist_head
*hash
;
6529 hash
= kmalloc(sizeof(*hash
) * NETDEV_HASHENTRIES
, GFP_KERNEL
);
6531 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
6532 INIT_HLIST_HEAD(&hash
[i
]);
6537 /* Initialize per network namespace state */
6538 static int __net_init
netdev_init(struct net
*net
)
6540 if (net
!= &init_net
)
6541 INIT_LIST_HEAD(&net
->dev_base_head
);
6543 net
->dev_name_head
= netdev_create_hash();
6544 if (net
->dev_name_head
== NULL
)
6547 net
->dev_index_head
= netdev_create_hash();
6548 if (net
->dev_index_head
== NULL
)
6554 kfree(net
->dev_name_head
);
6560 * netdev_drivername - network driver for the device
6561 * @dev: network device
6563 * Determine network driver for device.
6565 const char *netdev_drivername(const struct net_device
*dev
)
6567 const struct device_driver
*driver
;
6568 const struct device
*parent
;
6569 const char *empty
= "";
6571 parent
= dev
->dev
.parent
;
6575 driver
= parent
->driver
;
6576 if (driver
&& driver
->name
)
6577 return driver
->name
;
6581 static int __netdev_printk(const char *level
, const struct net_device
*dev
,
6582 struct va_format
*vaf
)
6586 if (dev
&& dev
->dev
.parent
) {
6587 r
= dev_printk_emit(level
[1] - '0',
6590 dev_driver_string(dev
->dev
.parent
),
6591 dev_name(dev
->dev
.parent
),
6592 netdev_name(dev
), vaf
);
6594 r
= printk("%s%s: %pV", level
, netdev_name(dev
), vaf
);
6596 r
= printk("%s(NULL net_device): %pV", level
, vaf
);
6602 int netdev_printk(const char *level
, const struct net_device
*dev
,
6603 const char *format
, ...)
6605 struct va_format vaf
;
6609 va_start(args
, format
);
6614 r
= __netdev_printk(level
, dev
, &vaf
);
6620 EXPORT_SYMBOL(netdev_printk
);
6622 #define define_netdev_printk_level(func, level) \
6623 int func(const struct net_device *dev, const char *fmt, ...) \
6626 struct va_format vaf; \
6629 va_start(args, fmt); \
6634 r = __netdev_printk(level, dev, &vaf); \
6640 EXPORT_SYMBOL(func);
6642 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
6643 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
6644 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
6645 define_netdev_printk_level(netdev_err
, KERN_ERR
);
6646 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
6647 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
6648 define_netdev_printk_level(netdev_info
, KERN_INFO
);
6650 static void __net_exit
netdev_exit(struct net
*net
)
6652 kfree(net
->dev_name_head
);
6653 kfree(net
->dev_index_head
);
6656 static struct pernet_operations __net_initdata netdev_net_ops
= {
6657 .init
= netdev_init
,
6658 .exit
= netdev_exit
,
6661 static void __net_exit
default_device_exit(struct net
*net
)
6663 struct net_device
*dev
, *aux
;
6665 * Push all migratable network devices back to the
6666 * initial network namespace
6669 for_each_netdev_safe(net
, dev
, aux
) {
6671 char fb_name
[IFNAMSIZ
];
6673 /* Ignore unmoveable devices (i.e. loopback) */
6674 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
6677 /* Leave virtual devices for the generic cleanup */
6678 if (dev
->rtnl_link_ops
)
6681 /* Push remaining network devices to init_net */
6682 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
6683 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
6685 pr_emerg("%s: failed to move %s to init_net: %d\n",
6686 __func__
, dev
->name
, err
);
6693 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
6695 /* At exit all network devices most be removed from a network
6696 * namespace. Do this in the reverse order of registration.
6697 * Do this across as many network namespaces as possible to
6698 * improve batching efficiency.
6700 struct net_device
*dev
;
6702 LIST_HEAD(dev_kill_list
);
6705 list_for_each_entry(net
, net_list
, exit_list
) {
6706 for_each_netdev_reverse(net
, dev
) {
6707 if (dev
->rtnl_link_ops
)
6708 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
6710 unregister_netdevice_queue(dev
, &dev_kill_list
);
6713 unregister_netdevice_many(&dev_kill_list
);
6714 list_del(&dev_kill_list
);
6718 static struct pernet_operations __net_initdata default_device_ops
= {
6719 .exit
= default_device_exit
,
6720 .exit_batch
= default_device_exit_batch
,
6724 * Initialize the DEV module. At boot time this walks the device list and
6725 * unhooks any devices that fail to initialise (normally hardware not
6726 * present) and leaves us with a valid list of present and active devices.
6731 * This is called single threaded during boot, so no need
6732 * to take the rtnl semaphore.
6734 static int __init
net_dev_init(void)
6736 int i
, rc
= -ENOMEM
;
6738 BUG_ON(!dev_boot_phase
);
6740 if (dev_proc_init())
6743 if (netdev_kobject_init())
6746 INIT_LIST_HEAD(&ptype_all
);
6747 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
6748 INIT_LIST_HEAD(&ptype_base
[i
]);
6750 INIT_LIST_HEAD(&offload_base
);
6752 if (register_pernet_subsys(&netdev_net_ops
))
6756 * Initialise the packet receive queues.
6759 for_each_possible_cpu(i
) {
6760 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
6762 memset(sd
, 0, sizeof(*sd
));
6763 skb_queue_head_init(&sd
->input_pkt_queue
);
6764 skb_queue_head_init(&sd
->process_queue
);
6765 sd
->completion_queue
= NULL
;
6766 INIT_LIST_HEAD(&sd
->poll_list
);
6767 sd
->output_queue
= NULL
;
6768 sd
->output_queue_tailp
= &sd
->output_queue
;
6770 sd
->csd
.func
= rps_trigger_softirq
;
6776 sd
->backlog
.poll
= process_backlog
;
6777 sd
->backlog
.weight
= weight_p
;
6778 sd
->backlog
.gro_list
= NULL
;
6779 sd
->backlog
.gro_count
= 0;
6784 /* The loopback device is special if any other network devices
6785 * is present in a network namespace the loopback device must
6786 * be present. Since we now dynamically allocate and free the
6787 * loopback device ensure this invariant is maintained by
6788 * keeping the loopback device as the first device on the
6789 * list of network devices. Ensuring the loopback devices
6790 * is the first device that appears and the last network device
6793 if (register_pernet_device(&loopback_net_ops
))
6796 if (register_pernet_device(&default_device_ops
))
6799 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
6800 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
6802 hotcpu_notifier(dev_cpu_callback
, 0);
6810 subsys_initcall(net_dev_init
);
6812 static int __init
initialize_hashrnd(void)
6814 get_random_bytes(&hashrnd
, sizeof(hashrnd
));
6818 late_initcall_sync(initialize_hashrnd
);