2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
121 #include <net/mpls.h>
122 #include <linux/ipv6.h>
123 #include <linux/in.h>
124 #include <linux/jhash.h>
125 #include <linux/random.h>
126 #include <trace/events/napi.h>
127 #include <trace/events/net.h>
128 #include <trace/events/skb.h>
129 #include <linux/pci.h>
130 #include <linux/inetdevice.h>
131 #include <linux/cpu_rmap.h>
132 #include <linux/static_key.h>
133 #include <linux/hashtable.h>
134 #include <linux/vmalloc.h>
135 #include <linux/if_macvlan.h>
136 #include <linux/errqueue.h>
137 #include <linux/hrtimer.h>
139 #include "net-sysfs.h"
141 /* Instead of increasing this, you should create a hash table. */
142 #define MAX_GRO_SKBS 8
144 /* This should be increased if a protocol with a bigger head is added. */
145 #define GRO_MAX_HEAD (MAX_HEADER + 128)
147 static DEFINE_SPINLOCK(ptype_lock
);
148 static DEFINE_SPINLOCK(offload_lock
);
149 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
150 struct list_head ptype_all __read_mostly
; /* Taps */
151 static struct list_head offload_base __read_mostly
;
153 static int netif_rx_internal(struct sk_buff
*skb
);
154 static int call_netdevice_notifiers_info(unsigned long val
,
155 struct net_device
*dev
,
156 struct netdev_notifier_info
*info
);
159 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
162 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
164 * Writers must hold the rtnl semaphore while they loop through the
165 * dev_base_head list, and hold dev_base_lock for writing when they do the
166 * actual updates. This allows pure readers to access the list even
167 * while a writer is preparing to update it.
169 * To put it another way, dev_base_lock is held for writing only to
170 * protect against pure readers; the rtnl semaphore provides the
171 * protection against other writers.
173 * See, for example usages, register_netdevice() and
174 * unregister_netdevice(), which must be called with the rtnl
177 DEFINE_RWLOCK(dev_base_lock
);
178 EXPORT_SYMBOL(dev_base_lock
);
180 /* protects napi_hash addition/deletion and napi_gen_id */
181 static DEFINE_SPINLOCK(napi_hash_lock
);
183 static unsigned int napi_gen_id
;
184 static DEFINE_HASHTABLE(napi_hash
, 8);
186 static seqcount_t devnet_rename_seq
;
188 static inline void dev_base_seq_inc(struct net
*net
)
190 while (++net
->dev_base_seq
== 0);
193 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
195 unsigned int hash
= full_name_hash(name
, strnlen(name
, IFNAMSIZ
));
197 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
200 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
202 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
205 static inline void rps_lock(struct softnet_data
*sd
)
208 spin_lock(&sd
->input_pkt_queue
.lock
);
212 static inline void rps_unlock(struct softnet_data
*sd
)
215 spin_unlock(&sd
->input_pkt_queue
.lock
);
219 /* Device list insertion */
220 static void list_netdevice(struct net_device
*dev
)
222 struct net
*net
= dev_net(dev
);
226 write_lock_bh(&dev_base_lock
);
227 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
228 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
229 hlist_add_head_rcu(&dev
->index_hlist
,
230 dev_index_hash(net
, dev
->ifindex
));
231 write_unlock_bh(&dev_base_lock
);
233 dev_base_seq_inc(net
);
236 /* Device list removal
237 * caller must respect a RCU grace period before freeing/reusing dev
239 static void unlist_netdevice(struct net_device
*dev
)
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock
);
245 list_del_rcu(&dev
->dev_list
);
246 hlist_del_rcu(&dev
->name_hlist
);
247 hlist_del_rcu(&dev
->index_hlist
);
248 write_unlock_bh(&dev_base_lock
);
250 dev_base_seq_inc(dev_net(dev
));
257 static RAW_NOTIFIER_HEAD(netdev_chain
);
260 * Device drivers call our routines to queue packets here. We empty the
261 * queue in the local softnet handler.
264 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
265 EXPORT_PER_CPU_SYMBOL(softnet_data
);
267 #ifdef CONFIG_LOCKDEP
269 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
270 * according to dev->type
272 static const unsigned short netdev_lock_type
[] =
273 {ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
274 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
275 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
276 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
277 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
278 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
279 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
280 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
281 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
282 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
283 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
284 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
285 ARPHRD_FCFABRIC
, ARPHRD_IEEE80211
, ARPHRD_IEEE80211_PRISM
,
286 ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
, ARPHRD_PHONET_PIPE
,
287 ARPHRD_IEEE802154
, ARPHRD_VOID
, ARPHRD_NONE
};
289 static const char *const netdev_lock_name
[] =
290 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
291 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
292 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
293 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
294 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
295 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
296 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
297 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
298 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
299 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
300 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
301 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
302 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
303 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
304 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
306 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
307 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
309 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
313 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
314 if (netdev_lock_type
[i
] == dev_type
)
316 /* the last key is used by default */
317 return ARRAY_SIZE(netdev_lock_type
) - 1;
320 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
321 unsigned short dev_type
)
325 i
= netdev_lock_pos(dev_type
);
326 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
327 netdev_lock_name
[i
]);
330 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
334 i
= netdev_lock_pos(dev
->type
);
335 lockdep_set_class_and_name(&dev
->addr_list_lock
,
336 &netdev_addr_lock_key
[i
],
337 netdev_lock_name
[i
]);
340 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
341 unsigned short dev_type
)
344 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
349 /*******************************************************************************
351 Protocol management and registration routines
353 *******************************************************************************/
356 * Add a protocol ID to the list. Now that the input handler is
357 * smarter we can dispense with all the messy stuff that used to be
360 * BEWARE!!! Protocol handlers, mangling input packets,
361 * MUST BE last in hash buckets and checking protocol handlers
362 * MUST start from promiscuous ptype_all chain in net_bh.
363 * It is true now, do not change it.
364 * Explanation follows: if protocol handler, mangling packet, will
365 * be the first on list, it is not able to sense, that packet
366 * is cloned and should be copied-on-write, so that it will
367 * change it and subsequent readers will get broken packet.
371 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
373 if (pt
->type
== htons(ETH_P_ALL
))
374 return pt
->dev
? &pt
->dev
->ptype_all
: &ptype_all
;
376 return pt
->dev
? &pt
->dev
->ptype_specific
:
377 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
381 * dev_add_pack - add packet handler
382 * @pt: packet type declaration
384 * Add a protocol handler to the networking stack. The passed &packet_type
385 * is linked into kernel lists and may not be freed until it has been
386 * removed from the kernel lists.
388 * This call does not sleep therefore it can not
389 * guarantee all CPU's that are in middle of receiving packets
390 * will see the new packet type (until the next received packet).
393 void dev_add_pack(struct packet_type
*pt
)
395 struct list_head
*head
= ptype_head(pt
);
397 spin_lock(&ptype_lock
);
398 list_add_rcu(&pt
->list
, head
);
399 spin_unlock(&ptype_lock
);
401 EXPORT_SYMBOL(dev_add_pack
);
404 * __dev_remove_pack - remove packet handler
405 * @pt: packet type declaration
407 * Remove a protocol handler that was previously added to the kernel
408 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
409 * from the kernel lists and can be freed or reused once this function
412 * The packet type might still be in use by receivers
413 * and must not be freed until after all the CPU's have gone
414 * through a quiescent state.
416 void __dev_remove_pack(struct packet_type
*pt
)
418 struct list_head
*head
= ptype_head(pt
);
419 struct packet_type
*pt1
;
421 spin_lock(&ptype_lock
);
423 list_for_each_entry(pt1
, head
, list
) {
425 list_del_rcu(&pt
->list
);
430 pr_warn("dev_remove_pack: %p not found\n", pt
);
432 spin_unlock(&ptype_lock
);
434 EXPORT_SYMBOL(__dev_remove_pack
);
437 * dev_remove_pack - remove packet handler
438 * @pt: packet type declaration
440 * Remove a protocol handler that was previously added to the kernel
441 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
442 * from the kernel lists and can be freed or reused once this function
445 * This call sleeps to guarantee that no CPU is looking at the packet
448 void dev_remove_pack(struct packet_type
*pt
)
450 __dev_remove_pack(pt
);
454 EXPORT_SYMBOL(dev_remove_pack
);
458 * dev_add_offload - register offload handlers
459 * @po: protocol offload declaration
461 * Add protocol offload handlers to the networking stack. The passed
462 * &proto_offload is linked into kernel lists and may not be freed until
463 * it has been removed from the kernel lists.
465 * This call does not sleep therefore it can not
466 * guarantee all CPU's that are in middle of receiving packets
467 * will see the new offload handlers (until the next received packet).
469 void dev_add_offload(struct packet_offload
*po
)
471 struct list_head
*head
= &offload_base
;
473 spin_lock(&offload_lock
);
474 list_add_rcu(&po
->list
, head
);
475 spin_unlock(&offload_lock
);
477 EXPORT_SYMBOL(dev_add_offload
);
480 * __dev_remove_offload - remove offload handler
481 * @po: packet offload declaration
483 * Remove a protocol offload handler that was previously added to the
484 * kernel offload handlers by dev_add_offload(). The passed &offload_type
485 * is removed from the kernel lists and can be freed or reused once this
488 * The packet type might still be in use by receivers
489 * and must not be freed until after all the CPU's have gone
490 * through a quiescent state.
492 static void __dev_remove_offload(struct packet_offload
*po
)
494 struct list_head
*head
= &offload_base
;
495 struct packet_offload
*po1
;
497 spin_lock(&offload_lock
);
499 list_for_each_entry(po1
, head
, list
) {
501 list_del_rcu(&po
->list
);
506 pr_warn("dev_remove_offload: %p not found\n", po
);
508 spin_unlock(&offload_lock
);
512 * dev_remove_offload - remove packet offload handler
513 * @po: packet offload declaration
515 * Remove a packet offload handler that was previously added to the kernel
516 * offload handlers by dev_add_offload(). The passed &offload_type is
517 * removed from the kernel lists and can be freed or reused once this
520 * This call sleeps to guarantee that no CPU is looking at the packet
523 void dev_remove_offload(struct packet_offload
*po
)
525 __dev_remove_offload(po
);
529 EXPORT_SYMBOL(dev_remove_offload
);
531 /******************************************************************************
533 Device Boot-time Settings Routines
535 *******************************************************************************/
537 /* Boot time configuration table */
538 static struct netdev_boot_setup dev_boot_setup
[NETDEV_BOOT_SETUP_MAX
];
541 * netdev_boot_setup_add - add new setup entry
542 * @name: name of the device
543 * @map: configured settings for the device
545 * Adds new setup entry to the dev_boot_setup list. The function
546 * returns 0 on error and 1 on success. This is a generic routine to
549 static int netdev_boot_setup_add(char *name
, struct ifmap
*map
)
551 struct netdev_boot_setup
*s
;
555 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
556 if (s
[i
].name
[0] == '\0' || s
[i
].name
[0] == ' ') {
557 memset(s
[i
].name
, 0, sizeof(s
[i
].name
));
558 strlcpy(s
[i
].name
, name
, IFNAMSIZ
);
559 memcpy(&s
[i
].map
, map
, sizeof(s
[i
].map
));
564 return i
>= NETDEV_BOOT_SETUP_MAX
? 0 : 1;
568 * netdev_boot_setup_check - check boot time settings
569 * @dev: the netdevice
571 * Check boot time settings for the device.
572 * The found settings are set for the device to be used
573 * later in the device probing.
574 * Returns 0 if no settings found, 1 if they are.
576 int netdev_boot_setup_check(struct net_device
*dev
)
578 struct netdev_boot_setup
*s
= dev_boot_setup
;
581 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
582 if (s
[i
].name
[0] != '\0' && s
[i
].name
[0] != ' ' &&
583 !strcmp(dev
->name
, s
[i
].name
)) {
584 dev
->irq
= s
[i
].map
.irq
;
585 dev
->base_addr
= s
[i
].map
.base_addr
;
586 dev
->mem_start
= s
[i
].map
.mem_start
;
587 dev
->mem_end
= s
[i
].map
.mem_end
;
593 EXPORT_SYMBOL(netdev_boot_setup_check
);
597 * netdev_boot_base - get address from boot time settings
598 * @prefix: prefix for network device
599 * @unit: id for network device
601 * Check boot time settings for the base address of device.
602 * The found settings are set for the device to be used
603 * later in the device probing.
604 * Returns 0 if no settings found.
606 unsigned long netdev_boot_base(const char *prefix
, int unit
)
608 const struct netdev_boot_setup
*s
= dev_boot_setup
;
612 sprintf(name
, "%s%d", prefix
, unit
);
615 * If device already registered then return base of 1
616 * to indicate not to probe for this interface
618 if (__dev_get_by_name(&init_net
, name
))
621 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++)
622 if (!strcmp(name
, s
[i
].name
))
623 return s
[i
].map
.base_addr
;
628 * Saves at boot time configured settings for any netdevice.
630 int __init
netdev_boot_setup(char *str
)
635 str
= get_options(str
, ARRAY_SIZE(ints
), ints
);
640 memset(&map
, 0, sizeof(map
));
644 map
.base_addr
= ints
[2];
646 map
.mem_start
= ints
[3];
648 map
.mem_end
= ints
[4];
650 /* Add new entry to the list */
651 return netdev_boot_setup_add(str
, &map
);
654 __setup("netdev=", netdev_boot_setup
);
656 /*******************************************************************************
658 Device Interface Subroutines
660 *******************************************************************************/
663 * dev_get_iflink - get 'iflink' value of a interface
664 * @dev: targeted interface
666 * Indicates the ifindex the interface is linked to.
667 * Physical interfaces have the same 'ifindex' and 'iflink' values.
670 int dev_get_iflink(const struct net_device
*dev
)
672 if (dev
->netdev_ops
&& dev
->netdev_ops
->ndo_get_iflink
)
673 return dev
->netdev_ops
->ndo_get_iflink(dev
);
677 EXPORT_SYMBOL(dev_get_iflink
);
680 * __dev_get_by_name - find a device by its name
681 * @net: the applicable net namespace
682 * @name: name to find
684 * Find an interface by name. Must be called under RTNL semaphore
685 * or @dev_base_lock. If the name is found a pointer to the device
686 * is returned. If the name is not found then %NULL is returned. The
687 * reference counters are not incremented so the caller must be
688 * careful with locks.
691 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
693 struct net_device
*dev
;
694 struct hlist_head
*head
= dev_name_hash(net
, name
);
696 hlist_for_each_entry(dev
, head
, name_hlist
)
697 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
702 EXPORT_SYMBOL(__dev_get_by_name
);
705 * dev_get_by_name_rcu - find a device by its name
706 * @net: the applicable net namespace
707 * @name: name to find
709 * Find an interface by name.
710 * If the name is found a pointer to the device is returned.
711 * If the name is not found then %NULL is returned.
712 * The reference counters are not incremented so the caller must be
713 * careful with locks. The caller must hold RCU lock.
716 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
718 struct net_device
*dev
;
719 struct hlist_head
*head
= dev_name_hash(net
, name
);
721 hlist_for_each_entry_rcu(dev
, head
, name_hlist
)
722 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
727 EXPORT_SYMBOL(dev_get_by_name_rcu
);
730 * dev_get_by_name - find a device by its name
731 * @net: the applicable net namespace
732 * @name: name to find
734 * Find an interface by name. This can be called from any
735 * context and does its own locking. The returned handle has
736 * the usage count incremented and the caller must use dev_put() to
737 * release it when it is no longer needed. %NULL is returned if no
738 * matching device is found.
741 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
743 struct net_device
*dev
;
746 dev
= dev_get_by_name_rcu(net
, name
);
752 EXPORT_SYMBOL(dev_get_by_name
);
755 * __dev_get_by_index - find a device by its ifindex
756 * @net: the applicable net namespace
757 * @ifindex: index of device
759 * Search for an interface by index. Returns %NULL if the device
760 * is not found or a pointer to the device. The device has not
761 * had its reference counter increased so the caller must be careful
762 * about locking. The caller must hold either the RTNL semaphore
766 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
768 struct net_device
*dev
;
769 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
771 hlist_for_each_entry(dev
, head
, index_hlist
)
772 if (dev
->ifindex
== ifindex
)
777 EXPORT_SYMBOL(__dev_get_by_index
);
780 * dev_get_by_index_rcu - find a device by its ifindex
781 * @net: the applicable net namespace
782 * @ifindex: index of device
784 * Search for an interface by index. Returns %NULL if the device
785 * is not found or a pointer to the device. The device has not
786 * had its reference counter increased so the caller must be careful
787 * about locking. The caller must hold RCU lock.
790 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
792 struct net_device
*dev
;
793 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
795 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
796 if (dev
->ifindex
== ifindex
)
801 EXPORT_SYMBOL(dev_get_by_index_rcu
);
805 * dev_get_by_index - find a device by its ifindex
806 * @net: the applicable net namespace
807 * @ifindex: index of device
809 * Search for an interface by index. Returns NULL if the device
810 * is not found or a pointer to the device. The device returned has
811 * had a reference added and the pointer is safe until the user calls
812 * dev_put to indicate they have finished with it.
815 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
817 struct net_device
*dev
;
820 dev
= dev_get_by_index_rcu(net
, ifindex
);
826 EXPORT_SYMBOL(dev_get_by_index
);
829 * netdev_get_name - get a netdevice name, knowing its ifindex.
830 * @net: network namespace
831 * @name: a pointer to the buffer where the name will be stored.
832 * @ifindex: the ifindex of the interface to get the name from.
834 * The use of raw_seqcount_begin() and cond_resched() before
835 * retrying is required as we want to give the writers a chance
836 * to complete when CONFIG_PREEMPT is not set.
838 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
840 struct net_device
*dev
;
844 seq
= raw_seqcount_begin(&devnet_rename_seq
);
846 dev
= dev_get_by_index_rcu(net
, ifindex
);
852 strcpy(name
, dev
->name
);
854 if (read_seqcount_retry(&devnet_rename_seq
, seq
)) {
863 * dev_getbyhwaddr_rcu - find a device by its hardware address
864 * @net: the applicable net namespace
865 * @type: media type of device
866 * @ha: hardware address
868 * Search for an interface by MAC address. Returns NULL if the device
869 * is not found or a pointer to the device.
870 * The caller must hold RCU or RTNL.
871 * The returned device has not had its ref count increased
872 * and the caller must therefore be careful about locking
876 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
879 struct net_device
*dev
;
881 for_each_netdev_rcu(net
, dev
)
882 if (dev
->type
== type
&&
883 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
888 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
890 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
892 struct net_device
*dev
;
895 for_each_netdev(net
, dev
)
896 if (dev
->type
== type
)
901 EXPORT_SYMBOL(__dev_getfirstbyhwtype
);
903 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
905 struct net_device
*dev
, *ret
= NULL
;
908 for_each_netdev_rcu(net
, dev
)
909 if (dev
->type
== type
) {
917 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
920 * __dev_get_by_flags - find any device with given flags
921 * @net: the applicable net namespace
922 * @if_flags: IFF_* values
923 * @mask: bitmask of bits in if_flags to check
925 * Search for any interface with the given flags. Returns NULL if a device
926 * is not found or a pointer to the device. Must be called inside
927 * rtnl_lock(), and result refcount is unchanged.
930 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
933 struct net_device
*dev
, *ret
;
938 for_each_netdev(net
, dev
) {
939 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
946 EXPORT_SYMBOL(__dev_get_by_flags
);
949 * dev_valid_name - check if name is okay for network device
952 * Network device names need to be valid file names to
953 * to allow sysfs to work. We also disallow any kind of
956 bool dev_valid_name(const char *name
)
960 if (strlen(name
) >= IFNAMSIZ
)
962 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
966 if (*name
== '/' || *name
== ':' || isspace(*name
))
972 EXPORT_SYMBOL(dev_valid_name
);
975 * __dev_alloc_name - allocate a name for a device
976 * @net: network namespace to allocate the device name in
977 * @name: name format string
978 * @buf: scratch buffer and result name string
980 * Passed a format string - eg "lt%d" it will try and find a suitable
981 * id. It scans list of devices to build up a free map, then chooses
982 * the first empty slot. The caller must hold the dev_base or rtnl lock
983 * while allocating the name and adding the device in order to avoid
985 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
986 * Returns the number of the unit assigned or a negative errno code.
989 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
993 const int max_netdevices
= 8*PAGE_SIZE
;
994 unsigned long *inuse
;
995 struct net_device
*d
;
997 p
= strnchr(name
, IFNAMSIZ
-1, '%');
1000 * Verify the string as this thing may have come from
1001 * the user. There must be either one "%d" and no other "%"
1004 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
1007 /* Use one page as a bit array of possible slots */
1008 inuse
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
1012 for_each_netdev(net
, d
) {
1013 if (!sscanf(d
->name
, name
, &i
))
1015 if (i
< 0 || i
>= max_netdevices
)
1018 /* avoid cases where sscanf is not exact inverse of printf */
1019 snprintf(buf
, IFNAMSIZ
, name
, i
);
1020 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1024 i
= find_first_zero_bit(inuse
, max_netdevices
);
1025 free_page((unsigned long) inuse
);
1029 snprintf(buf
, IFNAMSIZ
, name
, i
);
1030 if (!__dev_get_by_name(net
, buf
))
1033 /* It is possible to run out of possible slots
1034 * when the name is long and there isn't enough space left
1035 * for the digits, or if all bits are used.
1041 * dev_alloc_name - allocate a name for a device
1043 * @name: name format string
1045 * Passed a format string - eg "lt%d" it will try and find a suitable
1046 * id. It scans list of devices to build up a free map, then chooses
1047 * the first empty slot. The caller must hold the dev_base or rtnl lock
1048 * while allocating the name and adding the device in order to avoid
1050 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1051 * Returns the number of the unit assigned or a negative errno code.
1054 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1060 BUG_ON(!dev_net(dev
));
1062 ret
= __dev_alloc_name(net
, name
, buf
);
1064 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1067 EXPORT_SYMBOL(dev_alloc_name
);
1069 static int dev_alloc_name_ns(struct net
*net
,
1070 struct net_device
*dev
,
1076 ret
= __dev_alloc_name(net
, name
, buf
);
1078 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1082 static int dev_get_valid_name(struct net
*net
,
1083 struct net_device
*dev
,
1088 if (!dev_valid_name(name
))
1091 if (strchr(name
, '%'))
1092 return dev_alloc_name_ns(net
, dev
, name
);
1093 else if (__dev_get_by_name(net
, name
))
1095 else if (dev
->name
!= name
)
1096 strlcpy(dev
->name
, name
, IFNAMSIZ
);
1102 * dev_change_name - change name of a device
1104 * @newname: name (or format string) must be at least IFNAMSIZ
1106 * Change name of a device, can pass format strings "eth%d".
1109 int dev_change_name(struct net_device
*dev
, const char *newname
)
1111 unsigned char old_assign_type
;
1112 char oldname
[IFNAMSIZ
];
1118 BUG_ON(!dev_net(dev
));
1121 if (dev
->flags
& IFF_UP
)
1124 write_seqcount_begin(&devnet_rename_seq
);
1126 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0) {
1127 write_seqcount_end(&devnet_rename_seq
);
1131 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1133 err
= dev_get_valid_name(net
, dev
, newname
);
1135 write_seqcount_end(&devnet_rename_seq
);
1139 if (oldname
[0] && !strchr(oldname
, '%'))
1140 netdev_info(dev
, "renamed from %s\n", oldname
);
1142 old_assign_type
= dev
->name_assign_type
;
1143 dev
->name_assign_type
= NET_NAME_RENAMED
;
1146 ret
= device_rename(&dev
->dev
, dev
->name
);
1148 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1149 dev
->name_assign_type
= old_assign_type
;
1150 write_seqcount_end(&devnet_rename_seq
);
1154 write_seqcount_end(&devnet_rename_seq
);
1156 netdev_adjacent_rename_links(dev
, oldname
);
1158 write_lock_bh(&dev_base_lock
);
1159 hlist_del_rcu(&dev
->name_hlist
);
1160 write_unlock_bh(&dev_base_lock
);
1164 write_lock_bh(&dev_base_lock
);
1165 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
1166 write_unlock_bh(&dev_base_lock
);
1168 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1169 ret
= notifier_to_errno(ret
);
1172 /* err >= 0 after dev_alloc_name() or stores the first errno */
1175 write_seqcount_begin(&devnet_rename_seq
);
1176 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1177 memcpy(oldname
, newname
, IFNAMSIZ
);
1178 dev
->name_assign_type
= old_assign_type
;
1179 old_assign_type
= NET_NAME_RENAMED
;
1182 pr_err("%s: name change rollback failed: %d\n",
1191 * dev_set_alias - change ifalias of a device
1193 * @alias: name up to IFALIASZ
1194 * @len: limit of bytes to copy from info
1196 * Set ifalias for a device,
1198 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1204 if (len
>= IFALIASZ
)
1208 kfree(dev
->ifalias
);
1209 dev
->ifalias
= NULL
;
1213 new_ifalias
= krealloc(dev
->ifalias
, len
+ 1, GFP_KERNEL
);
1216 dev
->ifalias
= new_ifalias
;
1218 strlcpy(dev
->ifalias
, alias
, len
+1);
1224 * netdev_features_change - device changes features
1225 * @dev: device to cause notification
1227 * Called to indicate a device has changed features.
1229 void netdev_features_change(struct net_device
*dev
)
1231 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1233 EXPORT_SYMBOL(netdev_features_change
);
1236 * netdev_state_change - device changes state
1237 * @dev: device to cause notification
1239 * Called to indicate a device has changed state. This function calls
1240 * the notifier chains for netdev_chain and sends a NEWLINK message
1241 * to the routing socket.
1243 void netdev_state_change(struct net_device
*dev
)
1245 if (dev
->flags
& IFF_UP
) {
1246 struct netdev_notifier_change_info change_info
;
1248 change_info
.flags_changed
= 0;
1249 call_netdevice_notifiers_info(NETDEV_CHANGE
, dev
,
1251 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
);
1254 EXPORT_SYMBOL(netdev_state_change
);
1257 * netdev_notify_peers - notify network peers about existence of @dev
1258 * @dev: network device
1260 * Generate traffic such that interested network peers are aware of
1261 * @dev, such as by generating a gratuitous ARP. This may be used when
1262 * a device wants to inform the rest of the network about some sort of
1263 * reconfiguration such as a failover event or virtual machine
1266 void netdev_notify_peers(struct net_device
*dev
)
1269 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1272 EXPORT_SYMBOL(netdev_notify_peers
);
1274 static int __dev_open(struct net_device
*dev
)
1276 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1281 if (!netif_device_present(dev
))
1284 /* Block netpoll from trying to do any rx path servicing.
1285 * If we don't do this there is a chance ndo_poll_controller
1286 * or ndo_poll may be running while we open the device
1288 netpoll_poll_disable(dev
);
1290 ret
= call_netdevice_notifiers(NETDEV_PRE_UP
, dev
);
1291 ret
= notifier_to_errno(ret
);
1295 set_bit(__LINK_STATE_START
, &dev
->state
);
1297 if (ops
->ndo_validate_addr
)
1298 ret
= ops
->ndo_validate_addr(dev
);
1300 if (!ret
&& ops
->ndo_open
)
1301 ret
= ops
->ndo_open(dev
);
1303 netpoll_poll_enable(dev
);
1306 clear_bit(__LINK_STATE_START
, &dev
->state
);
1308 dev
->flags
|= IFF_UP
;
1309 dev_set_rx_mode(dev
);
1311 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1318 * dev_open - prepare an interface for use.
1319 * @dev: device to open
1321 * Takes a device from down to up state. The device's private open
1322 * function is invoked and then the multicast lists are loaded. Finally
1323 * the device is moved into the up state and a %NETDEV_UP message is
1324 * sent to the netdev notifier chain.
1326 * Calling this function on an active interface is a nop. On a failure
1327 * a negative errno code is returned.
1329 int dev_open(struct net_device
*dev
)
1333 if (dev
->flags
& IFF_UP
)
1336 ret
= __dev_open(dev
);
1340 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1341 call_netdevice_notifiers(NETDEV_UP
, dev
);
1345 EXPORT_SYMBOL(dev_open
);
1347 static int __dev_close_many(struct list_head
*head
)
1349 struct net_device
*dev
;
1354 list_for_each_entry(dev
, head
, close_list
) {
1355 /* Temporarily disable netpoll until the interface is down */
1356 netpoll_poll_disable(dev
);
1358 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1360 clear_bit(__LINK_STATE_START
, &dev
->state
);
1362 /* Synchronize to scheduled poll. We cannot touch poll list, it
1363 * can be even on different cpu. So just clear netif_running().
1365 * dev->stop() will invoke napi_disable() on all of it's
1366 * napi_struct instances on this device.
1368 smp_mb__after_atomic(); /* Commit netif_running(). */
1371 dev_deactivate_many(head
);
1373 list_for_each_entry(dev
, head
, close_list
) {
1374 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1377 * Call the device specific close. This cannot fail.
1378 * Only if device is UP
1380 * We allow it to be called even after a DETACH hot-plug
1386 dev
->flags
&= ~IFF_UP
;
1387 netpoll_poll_enable(dev
);
1393 static int __dev_close(struct net_device
*dev
)
1398 list_add(&dev
->close_list
, &single
);
1399 retval
= __dev_close_many(&single
);
1405 int dev_close_many(struct list_head
*head
, bool unlink
)
1407 struct net_device
*dev
, *tmp
;
1409 /* Remove the devices that don't need to be closed */
1410 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1411 if (!(dev
->flags
& IFF_UP
))
1412 list_del_init(&dev
->close_list
);
1414 __dev_close_many(head
);
1416 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1417 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1418 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1420 list_del_init(&dev
->close_list
);
1425 EXPORT_SYMBOL(dev_close_many
);
1428 * dev_close - shutdown an interface.
1429 * @dev: device to shutdown
1431 * This function moves an active device into down state. A
1432 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1433 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1436 int dev_close(struct net_device
*dev
)
1438 if (dev
->flags
& IFF_UP
) {
1441 list_add(&dev
->close_list
, &single
);
1442 dev_close_many(&single
, true);
1447 EXPORT_SYMBOL(dev_close
);
1451 * dev_disable_lro - disable Large Receive Offload on a device
1454 * Disable Large Receive Offload (LRO) on a net device. Must be
1455 * called under RTNL. This is needed if received packets may be
1456 * forwarded to another interface.
1458 void dev_disable_lro(struct net_device
*dev
)
1460 struct net_device
*lower_dev
;
1461 struct list_head
*iter
;
1463 dev
->wanted_features
&= ~NETIF_F_LRO
;
1464 netdev_update_features(dev
);
1466 if (unlikely(dev
->features
& NETIF_F_LRO
))
1467 netdev_WARN(dev
, "failed to disable LRO!\n");
1469 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1470 dev_disable_lro(lower_dev
);
1472 EXPORT_SYMBOL(dev_disable_lro
);
1474 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1475 struct net_device
*dev
)
1477 struct netdev_notifier_info info
;
1479 netdev_notifier_info_init(&info
, dev
);
1480 return nb
->notifier_call(nb
, val
, &info
);
1483 static int dev_boot_phase
= 1;
1486 * register_netdevice_notifier - register a network notifier block
1489 * Register a notifier to be called when network device events occur.
1490 * The notifier passed is linked into the kernel structures and must
1491 * not be reused until it has been unregistered. A negative errno code
1492 * is returned on a failure.
1494 * When registered all registration and up events are replayed
1495 * to the new notifier to allow device to have a race free
1496 * view of the network device list.
1499 int register_netdevice_notifier(struct notifier_block
*nb
)
1501 struct net_device
*dev
;
1502 struct net_device
*last
;
1507 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1513 for_each_netdev(net
, dev
) {
1514 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1515 err
= notifier_to_errno(err
);
1519 if (!(dev
->flags
& IFF_UP
))
1522 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1533 for_each_netdev(net
, dev
) {
1537 if (dev
->flags
& IFF_UP
) {
1538 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1540 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1542 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1547 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1550 EXPORT_SYMBOL(register_netdevice_notifier
);
1553 * unregister_netdevice_notifier - unregister a network notifier block
1556 * Unregister a notifier previously registered by
1557 * register_netdevice_notifier(). The notifier is unlinked into the
1558 * kernel structures and may then be reused. A negative errno code
1559 * is returned on a failure.
1561 * After unregistering unregister and down device events are synthesized
1562 * for all devices on the device list to the removed notifier to remove
1563 * the need for special case cleanup code.
1566 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1568 struct net_device
*dev
;
1573 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1578 for_each_netdev(net
, dev
) {
1579 if (dev
->flags
& IFF_UP
) {
1580 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1582 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1584 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1591 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1594 * call_netdevice_notifiers_info - call all network notifier blocks
1595 * @val: value passed unmodified to notifier function
1596 * @dev: net_device pointer passed unmodified to notifier function
1597 * @info: notifier information data
1599 * Call all network notifier blocks. Parameters and return value
1600 * are as for raw_notifier_call_chain().
1603 static int call_netdevice_notifiers_info(unsigned long val
,
1604 struct net_device
*dev
,
1605 struct netdev_notifier_info
*info
)
1608 netdev_notifier_info_init(info
, dev
);
1609 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
1613 * call_netdevice_notifiers - call all network notifier blocks
1614 * @val: value passed unmodified to notifier function
1615 * @dev: net_device pointer passed unmodified to notifier function
1617 * Call all network notifier blocks. Parameters and return value
1618 * are as for raw_notifier_call_chain().
1621 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
1623 struct netdev_notifier_info info
;
1625 return call_netdevice_notifiers_info(val
, dev
, &info
);
1627 EXPORT_SYMBOL(call_netdevice_notifiers
);
1629 static struct static_key netstamp_needed __read_mostly
;
1630 #ifdef HAVE_JUMP_LABEL
1631 /* We are not allowed to call static_key_slow_dec() from irq context
1632 * If net_disable_timestamp() is called from irq context, defer the
1633 * static_key_slow_dec() calls.
1635 static atomic_t netstamp_needed_deferred
;
1638 void net_enable_timestamp(void)
1640 #ifdef HAVE_JUMP_LABEL
1641 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
1645 static_key_slow_dec(&netstamp_needed
);
1649 static_key_slow_inc(&netstamp_needed
);
1651 EXPORT_SYMBOL(net_enable_timestamp
);
1653 void net_disable_timestamp(void)
1655 #ifdef HAVE_JUMP_LABEL
1656 if (in_interrupt()) {
1657 atomic_inc(&netstamp_needed_deferred
);
1661 static_key_slow_dec(&netstamp_needed
);
1663 EXPORT_SYMBOL(net_disable_timestamp
);
1665 static inline void net_timestamp_set(struct sk_buff
*skb
)
1667 skb
->tstamp
.tv64
= 0;
1668 if (static_key_false(&netstamp_needed
))
1669 __net_timestamp(skb
);
1672 #define net_timestamp_check(COND, SKB) \
1673 if (static_key_false(&netstamp_needed)) { \
1674 if ((COND) && !(SKB)->tstamp.tv64) \
1675 __net_timestamp(SKB); \
1678 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1682 if (!(dev
->flags
& IFF_UP
))
1685 len
= dev
->mtu
+ dev
->hard_header_len
+ VLAN_HLEN
;
1686 if (skb
->len
<= len
)
1689 /* if TSO is enabled, we don't care about the length as the packet
1690 * could be forwarded without being segmented before
1692 if (skb_is_gso(skb
))
1697 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
1699 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1701 if (skb_shinfo(skb
)->tx_flags
& SKBTX_DEV_ZEROCOPY
) {
1702 if (skb_copy_ubufs(skb
, GFP_ATOMIC
)) {
1703 atomic_long_inc(&dev
->rx_dropped
);
1709 if (unlikely(!is_skb_forwardable(dev
, skb
))) {
1710 atomic_long_inc(&dev
->rx_dropped
);
1715 skb_scrub_packet(skb
, true);
1717 skb
->protocol
= eth_type_trans(skb
, dev
);
1718 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
1722 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
1725 * dev_forward_skb - loopback an skb to another netif
1727 * @dev: destination network device
1728 * @skb: buffer to forward
1731 * NET_RX_SUCCESS (no congestion)
1732 * NET_RX_DROP (packet was dropped, but freed)
1734 * dev_forward_skb can be used for injecting an skb from the
1735 * start_xmit function of one device into the receive queue
1736 * of another device.
1738 * The receiving device may be in another namespace, so
1739 * we have to clear all information in the skb that could
1740 * impact namespace isolation.
1742 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1744 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
1746 EXPORT_SYMBOL_GPL(dev_forward_skb
);
1748 static inline int deliver_skb(struct sk_buff
*skb
,
1749 struct packet_type
*pt_prev
,
1750 struct net_device
*orig_dev
)
1752 if (unlikely(skb_orphan_frags(skb
, GFP_ATOMIC
)))
1754 atomic_inc(&skb
->users
);
1755 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
1758 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
1759 struct packet_type
**pt
,
1760 struct net_device
*orig_dev
,
1762 struct list_head
*ptype_list
)
1764 struct packet_type
*ptype
, *pt_prev
= *pt
;
1766 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1767 if (ptype
->type
!= type
)
1770 deliver_skb(skb
, pt_prev
, orig_dev
);
1776 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
1778 if (!ptype
->af_packet_priv
|| !skb
->sk
)
1781 if (ptype
->id_match
)
1782 return ptype
->id_match(ptype
, skb
->sk
);
1783 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
1790 * Support routine. Sends outgoing frames to any network
1791 * taps currently in use.
1794 static void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
1796 struct packet_type
*ptype
;
1797 struct sk_buff
*skb2
= NULL
;
1798 struct packet_type
*pt_prev
= NULL
;
1799 struct list_head
*ptype_list
= &ptype_all
;
1803 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1804 /* Never send packets back to the socket
1805 * they originated from - MvS (miquels@drinkel.ow.org)
1807 if (skb_loop_sk(ptype
, skb
))
1811 deliver_skb(skb2
, pt_prev
, skb
->dev
);
1816 /* need to clone skb, done only once */
1817 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1821 net_timestamp_set(skb2
);
1823 /* skb->nh should be correctly
1824 * set by sender, so that the second statement is
1825 * just protection against buggy protocols.
1827 skb_reset_mac_header(skb2
);
1829 if (skb_network_header(skb2
) < skb2
->data
||
1830 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
1831 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1832 ntohs(skb2
->protocol
),
1834 skb_reset_network_header(skb2
);
1837 skb2
->transport_header
= skb2
->network_header
;
1838 skb2
->pkt_type
= PACKET_OUTGOING
;
1842 if (ptype_list
== &ptype_all
) {
1843 ptype_list
= &dev
->ptype_all
;
1848 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
1853 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1854 * @dev: Network device
1855 * @txq: number of queues available
1857 * If real_num_tx_queues is changed the tc mappings may no longer be
1858 * valid. To resolve this verify the tc mapping remains valid and if
1859 * not NULL the mapping. With no priorities mapping to this
1860 * offset/count pair it will no longer be used. In the worst case TC0
1861 * is invalid nothing can be done so disable priority mappings. If is
1862 * expected that drivers will fix this mapping if they can before
1863 * calling netif_set_real_num_tx_queues.
1865 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
1868 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
1870 /* If TC0 is invalidated disable TC mapping */
1871 if (tc
->offset
+ tc
->count
> txq
) {
1872 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1877 /* Invalidated prio to tc mappings set to TC0 */
1878 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
1879 int q
= netdev_get_prio_tc_map(dev
, i
);
1881 tc
= &dev
->tc_to_txq
[q
];
1882 if (tc
->offset
+ tc
->count
> txq
) {
1883 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1885 netdev_set_prio_tc_map(dev
, i
, 0);
1891 static DEFINE_MUTEX(xps_map_mutex
);
1892 #define xmap_dereference(P) \
1893 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1895 static struct xps_map
*remove_xps_queue(struct xps_dev_maps
*dev_maps
,
1898 struct xps_map
*map
= NULL
;
1902 map
= xmap_dereference(dev_maps
->cpu_map
[cpu
]);
1904 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
1905 if (map
->queues
[pos
] == index
) {
1907 map
->queues
[pos
] = map
->queues
[--map
->len
];
1909 RCU_INIT_POINTER(dev_maps
->cpu_map
[cpu
], NULL
);
1910 kfree_rcu(map
, rcu
);
1920 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
1922 struct xps_dev_maps
*dev_maps
;
1924 bool active
= false;
1926 mutex_lock(&xps_map_mutex
);
1927 dev_maps
= xmap_dereference(dev
->xps_maps
);
1932 for_each_possible_cpu(cpu
) {
1933 for (i
= index
; i
< dev
->num_tx_queues
; i
++) {
1934 if (!remove_xps_queue(dev_maps
, cpu
, i
))
1937 if (i
== dev
->num_tx_queues
)
1942 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
1943 kfree_rcu(dev_maps
, rcu
);
1946 for (i
= index
; i
< dev
->num_tx_queues
; i
++)
1947 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, i
),
1951 mutex_unlock(&xps_map_mutex
);
1954 static struct xps_map
*expand_xps_map(struct xps_map
*map
,
1957 struct xps_map
*new_map
;
1958 int alloc_len
= XPS_MIN_MAP_ALLOC
;
1961 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
1962 if (map
->queues
[pos
] != index
)
1967 /* Need to add queue to this CPU's existing map */
1969 if (pos
< map
->alloc_len
)
1972 alloc_len
= map
->alloc_len
* 2;
1975 /* Need to allocate new map to store queue on this CPU's map */
1976 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
1981 for (i
= 0; i
< pos
; i
++)
1982 new_map
->queues
[i
] = map
->queues
[i
];
1983 new_map
->alloc_len
= alloc_len
;
1989 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
1992 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
;
1993 struct xps_map
*map
, *new_map
;
1994 int maps_sz
= max_t(unsigned int, XPS_DEV_MAPS_SIZE
, L1_CACHE_BYTES
);
1995 int cpu
, numa_node_id
= -2;
1996 bool active
= false;
1998 mutex_lock(&xps_map_mutex
);
2000 dev_maps
= xmap_dereference(dev
->xps_maps
);
2002 /* allocate memory for queue storage */
2003 for_each_online_cpu(cpu
) {
2004 if (!cpumask_test_cpu(cpu
, mask
))
2008 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
2009 if (!new_dev_maps
) {
2010 mutex_unlock(&xps_map_mutex
);
2014 map
= dev_maps
? xmap_dereference(dev_maps
->cpu_map
[cpu
]) :
2017 map
= expand_xps_map(map
, cpu
, index
);
2021 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[cpu
], map
);
2025 goto out_no_new_maps
;
2027 for_each_possible_cpu(cpu
) {
2028 if (cpumask_test_cpu(cpu
, mask
) && cpu_online(cpu
)) {
2029 /* add queue to CPU maps */
2032 map
= xmap_dereference(new_dev_maps
->cpu_map
[cpu
]);
2033 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2036 if (pos
== map
->len
)
2037 map
->queues
[map
->len
++] = index
;
2039 if (numa_node_id
== -2)
2040 numa_node_id
= cpu_to_node(cpu
);
2041 else if (numa_node_id
!= cpu_to_node(cpu
))
2044 } else if (dev_maps
) {
2045 /* fill in the new device map from the old device map */
2046 map
= xmap_dereference(dev_maps
->cpu_map
[cpu
]);
2047 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[cpu
], map
);
2052 rcu_assign_pointer(dev
->xps_maps
, new_dev_maps
);
2054 /* Cleanup old maps */
2056 for_each_possible_cpu(cpu
) {
2057 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[cpu
]);
2058 map
= xmap_dereference(dev_maps
->cpu_map
[cpu
]);
2059 if (map
&& map
!= new_map
)
2060 kfree_rcu(map
, rcu
);
2063 kfree_rcu(dev_maps
, rcu
);
2066 dev_maps
= new_dev_maps
;
2070 /* update Tx queue numa node */
2071 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2072 (numa_node_id
>= 0) ? numa_node_id
:
2078 /* removes queue from unused CPUs */
2079 for_each_possible_cpu(cpu
) {
2080 if (cpumask_test_cpu(cpu
, mask
) && cpu_online(cpu
))
2083 if (remove_xps_queue(dev_maps
, cpu
, index
))
2087 /* free map if not active */
2089 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
2090 kfree_rcu(dev_maps
, rcu
);
2094 mutex_unlock(&xps_map_mutex
);
2098 /* remove any maps that we added */
2099 for_each_possible_cpu(cpu
) {
2100 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[cpu
]);
2101 map
= dev_maps
? xmap_dereference(dev_maps
->cpu_map
[cpu
]) :
2103 if (new_map
&& new_map
!= map
)
2107 mutex_unlock(&xps_map_mutex
);
2109 kfree(new_dev_maps
);
2112 EXPORT_SYMBOL(netif_set_xps_queue
);
2116 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2117 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2119 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
2123 if (txq
< 1 || txq
> dev
->num_tx_queues
)
2126 if (dev
->reg_state
== NETREG_REGISTERED
||
2127 dev
->reg_state
== NETREG_UNREGISTERING
) {
2130 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
2136 netif_setup_tc(dev
, txq
);
2138 if (txq
< dev
->real_num_tx_queues
) {
2139 qdisc_reset_all_tx_gt(dev
, txq
);
2141 netif_reset_xps_queues_gt(dev
, txq
);
2146 dev
->real_num_tx_queues
= txq
;
2149 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
2153 * netif_set_real_num_rx_queues - set actual number of RX queues used
2154 * @dev: Network device
2155 * @rxq: Actual number of RX queues
2157 * This must be called either with the rtnl_lock held or before
2158 * registration of the net device. Returns 0 on success, or a
2159 * negative error code. If called before registration, it always
2162 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
2166 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
2169 if (dev
->reg_state
== NETREG_REGISTERED
) {
2172 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
2178 dev
->real_num_rx_queues
= rxq
;
2181 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
2185 * netif_get_num_default_rss_queues - default number of RSS queues
2187 * This routine should set an upper limit on the number of RSS queues
2188 * used by default by multiqueue devices.
2190 int netif_get_num_default_rss_queues(void)
2192 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES
, num_online_cpus());
2194 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
2196 static inline void __netif_reschedule(struct Qdisc
*q
)
2198 struct softnet_data
*sd
;
2199 unsigned long flags
;
2201 local_irq_save(flags
);
2202 sd
= this_cpu_ptr(&softnet_data
);
2203 q
->next_sched
= NULL
;
2204 *sd
->output_queue_tailp
= q
;
2205 sd
->output_queue_tailp
= &q
->next_sched
;
2206 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2207 local_irq_restore(flags
);
2210 void __netif_schedule(struct Qdisc
*q
)
2212 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
2213 __netif_reschedule(q
);
2215 EXPORT_SYMBOL(__netif_schedule
);
2217 struct dev_kfree_skb_cb
{
2218 enum skb_free_reason reason
;
2221 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
2223 return (struct dev_kfree_skb_cb
*)skb
->cb
;
2226 void netif_schedule_queue(struct netdev_queue
*txq
)
2229 if (!(txq
->state
& QUEUE_STATE_ANY_XOFF
)) {
2230 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
2232 __netif_schedule(q
);
2236 EXPORT_SYMBOL(netif_schedule_queue
);
2239 * netif_wake_subqueue - allow sending packets on subqueue
2240 * @dev: network device
2241 * @queue_index: sub queue index
2243 * Resume individual transmit queue of a device with multiple transmit queues.
2245 void netif_wake_subqueue(struct net_device
*dev
, u16 queue_index
)
2247 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2249 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &txq
->state
)) {
2253 q
= rcu_dereference(txq
->qdisc
);
2254 __netif_schedule(q
);
2258 EXPORT_SYMBOL(netif_wake_subqueue
);
2260 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
2262 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
2266 q
= rcu_dereference(dev_queue
->qdisc
);
2267 __netif_schedule(q
);
2271 EXPORT_SYMBOL(netif_tx_wake_queue
);
2273 void __dev_kfree_skb_irq(struct sk_buff
*skb
, enum skb_free_reason reason
)
2275 unsigned long flags
;
2277 if (likely(atomic_read(&skb
->users
) == 1)) {
2279 atomic_set(&skb
->users
, 0);
2280 } else if (likely(!atomic_dec_and_test(&skb
->users
))) {
2283 get_kfree_skb_cb(skb
)->reason
= reason
;
2284 local_irq_save(flags
);
2285 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
2286 __this_cpu_write(softnet_data
.completion_queue
, skb
);
2287 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2288 local_irq_restore(flags
);
2290 EXPORT_SYMBOL(__dev_kfree_skb_irq
);
2292 void __dev_kfree_skb_any(struct sk_buff
*skb
, enum skb_free_reason reason
)
2294 if (in_irq() || irqs_disabled())
2295 __dev_kfree_skb_irq(skb
, reason
);
2299 EXPORT_SYMBOL(__dev_kfree_skb_any
);
2303 * netif_device_detach - mark device as removed
2304 * @dev: network device
2306 * Mark device as removed from system and therefore no longer available.
2308 void netif_device_detach(struct net_device
*dev
)
2310 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2311 netif_running(dev
)) {
2312 netif_tx_stop_all_queues(dev
);
2315 EXPORT_SYMBOL(netif_device_detach
);
2318 * netif_device_attach - mark device as attached
2319 * @dev: network device
2321 * Mark device as attached from system and restart if needed.
2323 void netif_device_attach(struct net_device
*dev
)
2325 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2326 netif_running(dev
)) {
2327 netif_tx_wake_all_queues(dev
);
2328 __netdev_watchdog_up(dev
);
2331 EXPORT_SYMBOL(netif_device_attach
);
2333 static void skb_warn_bad_offload(const struct sk_buff
*skb
)
2335 static const netdev_features_t null_features
= 0;
2336 struct net_device
*dev
= skb
->dev
;
2337 const char *driver
= "";
2339 if (!net_ratelimit())
2342 if (dev
&& dev
->dev
.parent
)
2343 driver
= dev_driver_string(dev
->dev
.parent
);
2345 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2346 "gso_type=%d ip_summed=%d\n",
2347 driver
, dev
? &dev
->features
: &null_features
,
2348 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
,
2349 skb
->len
, skb
->data_len
, skb_shinfo(skb
)->gso_size
,
2350 skb_shinfo(skb
)->gso_type
, skb
->ip_summed
);
2354 * Invalidate hardware checksum when packet is to be mangled, and
2355 * complete checksum manually on outgoing path.
2357 int skb_checksum_help(struct sk_buff
*skb
)
2360 int ret
= 0, offset
;
2362 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
2363 goto out_set_summed
;
2365 if (unlikely(skb_shinfo(skb
)->gso_size
)) {
2366 skb_warn_bad_offload(skb
);
2370 /* Before computing a checksum, we should make sure no frag could
2371 * be modified by an external entity : checksum could be wrong.
2373 if (skb_has_shared_frag(skb
)) {
2374 ret
= __skb_linearize(skb
);
2379 offset
= skb_checksum_start_offset(skb
);
2380 BUG_ON(offset
>= skb_headlen(skb
));
2381 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
2383 offset
+= skb
->csum_offset
;
2384 BUG_ON(offset
+ sizeof(__sum16
) > skb_headlen(skb
));
2386 if (skb_cloned(skb
) &&
2387 !skb_clone_writable(skb
, offset
+ sizeof(__sum16
))) {
2388 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2393 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
);
2395 skb
->ip_summed
= CHECKSUM_NONE
;
2399 EXPORT_SYMBOL(skb_checksum_help
);
2401 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
2403 __be16 type
= skb
->protocol
;
2405 /* Tunnel gso handlers can set protocol to ethernet. */
2406 if (type
== htons(ETH_P_TEB
)) {
2409 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
2412 eth
= (struct ethhdr
*)skb_mac_header(skb
);
2413 type
= eth
->h_proto
;
2416 return __vlan_get_protocol(skb
, type
, depth
);
2420 * skb_mac_gso_segment - mac layer segmentation handler.
2421 * @skb: buffer to segment
2422 * @features: features for the output path (see dev->features)
2424 struct sk_buff
*skb_mac_gso_segment(struct sk_buff
*skb
,
2425 netdev_features_t features
)
2427 struct sk_buff
*segs
= ERR_PTR(-EPROTONOSUPPORT
);
2428 struct packet_offload
*ptype
;
2429 int vlan_depth
= skb
->mac_len
;
2430 __be16 type
= skb_network_protocol(skb
, &vlan_depth
);
2432 if (unlikely(!type
))
2433 return ERR_PTR(-EINVAL
);
2435 __skb_pull(skb
, vlan_depth
);
2438 list_for_each_entry_rcu(ptype
, &offload_base
, list
) {
2439 if (ptype
->type
== type
&& ptype
->callbacks
.gso_segment
) {
2440 segs
= ptype
->callbacks
.gso_segment(skb
, features
);
2446 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
2450 EXPORT_SYMBOL(skb_mac_gso_segment
);
2453 /* openvswitch calls this on rx path, so we need a different check.
2455 static inline bool skb_needs_check(struct sk_buff
*skb
, bool tx_path
)
2458 return skb
->ip_summed
!= CHECKSUM_PARTIAL
;
2460 return skb
->ip_summed
== CHECKSUM_NONE
;
2464 * __skb_gso_segment - Perform segmentation on skb.
2465 * @skb: buffer to segment
2466 * @features: features for the output path (see dev->features)
2467 * @tx_path: whether it is called in TX path
2469 * This function segments the given skb and returns a list of segments.
2471 * It may return NULL if the skb requires no segmentation. This is
2472 * only possible when GSO is used for verifying header integrity.
2474 struct sk_buff
*__skb_gso_segment(struct sk_buff
*skb
,
2475 netdev_features_t features
, bool tx_path
)
2477 if (unlikely(skb_needs_check(skb
, tx_path
))) {
2480 skb_warn_bad_offload(skb
);
2482 err
= skb_cow_head(skb
, 0);
2484 return ERR_PTR(err
);
2487 SKB_GSO_CB(skb
)->mac_offset
= skb_headroom(skb
);
2488 SKB_GSO_CB(skb
)->encap_level
= 0;
2490 skb_reset_mac_header(skb
);
2491 skb_reset_mac_len(skb
);
2493 return skb_mac_gso_segment(skb
, features
);
2495 EXPORT_SYMBOL(__skb_gso_segment
);
2497 /* Take action when hardware reception checksum errors are detected. */
2499 void netdev_rx_csum_fault(struct net_device
*dev
)
2501 if (net_ratelimit()) {
2502 pr_err("%s: hw csum failure\n", dev
? dev
->name
: "<unknown>");
2506 EXPORT_SYMBOL(netdev_rx_csum_fault
);
2509 /* Actually, we should eliminate this check as soon as we know, that:
2510 * 1. IOMMU is present and allows to map all the memory.
2511 * 2. No high memory really exists on this machine.
2514 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
2516 #ifdef CONFIG_HIGHMEM
2518 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
2519 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2520 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2521 if (PageHighMem(skb_frag_page(frag
)))
2526 if (PCI_DMA_BUS_IS_PHYS
) {
2527 struct device
*pdev
= dev
->dev
.parent
;
2531 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2532 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2533 dma_addr_t addr
= page_to_phys(skb_frag_page(frag
));
2534 if (!pdev
->dma_mask
|| addr
+ PAGE_SIZE
- 1 > *pdev
->dma_mask
)
2542 /* If MPLS offload request, verify we are testing hardware MPLS features
2543 * instead of standard features for the netdev.
2545 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2546 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2547 netdev_features_t features
,
2550 if (eth_p_mpls(type
))
2551 features
&= skb
->dev
->mpls_features
;
2556 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2557 netdev_features_t features
,
2564 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
2565 netdev_features_t features
)
2570 type
= skb_network_protocol(skb
, &tmp
);
2571 features
= net_mpls_features(skb
, features
, type
);
2573 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
2574 !can_checksum_protocol(features
, type
)) {
2575 features
&= ~NETIF_F_ALL_CSUM
;
2576 } else if (illegal_highdma(skb
->dev
, skb
)) {
2577 features
&= ~NETIF_F_SG
;
2583 netdev_features_t
passthru_features_check(struct sk_buff
*skb
,
2584 struct net_device
*dev
,
2585 netdev_features_t features
)
2589 EXPORT_SYMBOL(passthru_features_check
);
2591 static netdev_features_t
dflt_features_check(const struct sk_buff
*skb
,
2592 struct net_device
*dev
,
2593 netdev_features_t features
)
2595 return vlan_features_check(skb
, features
);
2598 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
2600 struct net_device
*dev
= skb
->dev
;
2601 netdev_features_t features
= dev
->features
;
2602 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
2604 if (gso_segs
> dev
->gso_max_segs
|| gso_segs
< dev
->gso_min_segs
)
2605 features
&= ~NETIF_F_GSO_MASK
;
2607 /* If encapsulation offload request, verify we are testing
2608 * hardware encapsulation features instead of standard
2609 * features for the netdev
2611 if (skb
->encapsulation
)
2612 features
&= dev
->hw_enc_features
;
2614 if (skb_vlan_tagged(skb
))
2615 features
= netdev_intersect_features(features
,
2616 dev
->vlan_features
|
2617 NETIF_F_HW_VLAN_CTAG_TX
|
2618 NETIF_F_HW_VLAN_STAG_TX
);
2620 if (dev
->netdev_ops
->ndo_features_check
)
2621 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
2624 features
&= dflt_features_check(skb
, dev
, features
);
2626 return harmonize_features(skb
, features
);
2628 EXPORT_SYMBOL(netif_skb_features
);
2630 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
2631 struct netdev_queue
*txq
, bool more
)
2636 if (!list_empty(&ptype_all
) || !list_empty(&dev
->ptype_all
))
2637 dev_queue_xmit_nit(skb
, dev
);
2640 trace_net_dev_start_xmit(skb
, dev
);
2641 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
2642 trace_net_dev_xmit(skb
, rc
, dev
, len
);
2647 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
2648 struct netdev_queue
*txq
, int *ret
)
2650 struct sk_buff
*skb
= first
;
2651 int rc
= NETDEV_TX_OK
;
2654 struct sk_buff
*next
= skb
->next
;
2657 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
2658 if (unlikely(!dev_xmit_complete(rc
))) {
2664 if (netif_xmit_stopped(txq
) && skb
) {
2665 rc
= NETDEV_TX_BUSY
;
2675 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
2676 netdev_features_t features
)
2678 if (skb_vlan_tag_present(skb
) &&
2679 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
2680 skb
= __vlan_hwaccel_push_inside(skb
);
2684 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
)
2686 netdev_features_t features
;
2691 features
= netif_skb_features(skb
);
2692 skb
= validate_xmit_vlan(skb
, features
);
2696 if (netif_needs_gso(dev
, skb
, features
)) {
2697 struct sk_buff
*segs
;
2699 segs
= skb_gso_segment(skb
, features
);
2707 if (skb_needs_linearize(skb
, features
) &&
2708 __skb_linearize(skb
))
2711 /* If packet is not checksummed and device does not
2712 * support checksumming for this protocol, complete
2713 * checksumming here.
2715 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2716 if (skb
->encapsulation
)
2717 skb_set_inner_transport_header(skb
,
2718 skb_checksum_start_offset(skb
));
2720 skb_set_transport_header(skb
,
2721 skb_checksum_start_offset(skb
));
2722 if (!(features
& NETIF_F_ALL_CSUM
) &&
2723 skb_checksum_help(skb
))
2736 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
)
2738 struct sk_buff
*next
, *head
= NULL
, *tail
;
2740 for (; skb
!= NULL
; skb
= next
) {
2744 /* in case skb wont be segmented, point to itself */
2747 skb
= validate_xmit_skb(skb
, dev
);
2755 /* If skb was segmented, skb->prev points to
2756 * the last segment. If not, it still contains skb.
2763 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
2765 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
2767 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
2769 /* To get more precise estimation of bytes sent on wire,
2770 * we add to pkt_len the headers size of all segments
2772 if (shinfo
->gso_size
) {
2773 unsigned int hdr_len
;
2774 u16 gso_segs
= shinfo
->gso_segs
;
2776 /* mac layer + network layer */
2777 hdr_len
= skb_transport_header(skb
) - skb_mac_header(skb
);
2779 /* + transport layer */
2780 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)))
2781 hdr_len
+= tcp_hdrlen(skb
);
2783 hdr_len
+= sizeof(struct udphdr
);
2785 if (shinfo
->gso_type
& SKB_GSO_DODGY
)
2786 gso_segs
= DIV_ROUND_UP(skb
->len
- hdr_len
,
2789 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
2793 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
2794 struct net_device
*dev
,
2795 struct netdev_queue
*txq
)
2797 spinlock_t
*root_lock
= qdisc_lock(q
);
2801 qdisc_pkt_len_init(skb
);
2802 qdisc_calculate_pkt_len(skb
, q
);
2804 * Heuristic to force contended enqueues to serialize on a
2805 * separate lock before trying to get qdisc main lock.
2806 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2807 * often and dequeue packets faster.
2809 contended
= qdisc_is_running(q
);
2810 if (unlikely(contended
))
2811 spin_lock(&q
->busylock
);
2813 spin_lock(root_lock
);
2814 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
2817 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
2818 qdisc_run_begin(q
)) {
2820 * This is a work-conserving queue; there are no old skbs
2821 * waiting to be sent out; and the qdisc is not running -
2822 * xmit the skb directly.
2825 qdisc_bstats_update(q
, skb
);
2827 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
2828 if (unlikely(contended
)) {
2829 spin_unlock(&q
->busylock
);
2836 rc
= NET_XMIT_SUCCESS
;
2838 rc
= q
->enqueue(skb
, q
) & NET_XMIT_MASK
;
2839 if (qdisc_run_begin(q
)) {
2840 if (unlikely(contended
)) {
2841 spin_unlock(&q
->busylock
);
2847 spin_unlock(root_lock
);
2848 if (unlikely(contended
))
2849 spin_unlock(&q
->busylock
);
2853 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2854 static void skb_update_prio(struct sk_buff
*skb
)
2856 struct netprio_map
*map
= rcu_dereference_bh(skb
->dev
->priomap
);
2858 if (!skb
->priority
&& skb
->sk
&& map
) {
2859 unsigned int prioidx
= skb
->sk
->sk_cgrp_prioidx
;
2861 if (prioidx
< map
->priomap_len
)
2862 skb
->priority
= map
->priomap
[prioidx
];
2866 #define skb_update_prio(skb)
2869 static DEFINE_PER_CPU(int, xmit_recursion
);
2870 #define RECURSION_LIMIT 10
2873 * dev_loopback_xmit - loop back @skb
2874 * @skb: buffer to transmit
2876 int dev_loopback_xmit(struct sk_buff
*skb
)
2878 skb_reset_mac_header(skb
);
2879 __skb_pull(skb
, skb_network_offset(skb
));
2880 skb
->pkt_type
= PACKET_LOOPBACK
;
2881 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2882 WARN_ON(!skb_dst(skb
));
2887 EXPORT_SYMBOL(dev_loopback_xmit
);
2890 * __dev_queue_xmit - transmit a buffer
2891 * @skb: buffer to transmit
2892 * @accel_priv: private data used for L2 forwarding offload
2894 * Queue a buffer for transmission to a network device. The caller must
2895 * have set the device and priority and built the buffer before calling
2896 * this function. The function can be called from an interrupt.
2898 * A negative errno code is returned on a failure. A success does not
2899 * guarantee the frame will be transmitted as it may be dropped due
2900 * to congestion or traffic shaping.
2902 * -----------------------------------------------------------------------------------
2903 * I notice this method can also return errors from the queue disciplines,
2904 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2907 * Regardless of the return value, the skb is consumed, so it is currently
2908 * difficult to retry a send to this method. (You can bump the ref count
2909 * before sending to hold a reference for retry if you are careful.)
2911 * When calling this method, interrupts MUST be enabled. This is because
2912 * the BH enable code must have IRQs enabled so that it will not deadlock.
2915 static int __dev_queue_xmit(struct sk_buff
*skb
, void *accel_priv
)
2917 struct net_device
*dev
= skb
->dev
;
2918 struct netdev_queue
*txq
;
2922 skb_reset_mac_header(skb
);
2924 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
2925 __skb_tstamp_tx(skb
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
2927 /* Disable soft irqs for various locks below. Also
2928 * stops preemption for RCU.
2932 skb_update_prio(skb
);
2934 /* If device/qdisc don't need skb->dst, release it right now while
2935 * its hot in this cpu cache.
2937 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
2942 txq
= netdev_pick_tx(dev
, skb
, accel_priv
);
2943 q
= rcu_dereference_bh(txq
->qdisc
);
2945 #ifdef CONFIG_NET_CLS_ACT
2946 skb
->tc_verd
= SET_TC_AT(skb
->tc_verd
, AT_EGRESS
);
2948 trace_net_dev_queue(skb
);
2950 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
2954 /* The device has no queue. Common case for software devices:
2955 loopback, all the sorts of tunnels...
2957 Really, it is unlikely that netif_tx_lock protection is necessary
2958 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2960 However, it is possible, that they rely on protection
2963 Check this and shot the lock. It is not prone from deadlocks.
2964 Either shot noqueue qdisc, it is even simpler 8)
2966 if (dev
->flags
& IFF_UP
) {
2967 int cpu
= smp_processor_id(); /* ok because BHs are off */
2969 if (txq
->xmit_lock_owner
!= cpu
) {
2971 if (__this_cpu_read(xmit_recursion
) > RECURSION_LIMIT
)
2972 goto recursion_alert
;
2974 skb
= validate_xmit_skb(skb
, dev
);
2978 HARD_TX_LOCK(dev
, txq
, cpu
);
2980 if (!netif_xmit_stopped(txq
)) {
2981 __this_cpu_inc(xmit_recursion
);
2982 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
2983 __this_cpu_dec(xmit_recursion
);
2984 if (dev_xmit_complete(rc
)) {
2985 HARD_TX_UNLOCK(dev
, txq
);
2989 HARD_TX_UNLOCK(dev
, txq
);
2990 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2993 /* Recursion is detected! It is possible,
2997 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3004 rcu_read_unlock_bh();
3006 atomic_long_inc(&dev
->tx_dropped
);
3007 kfree_skb_list(skb
);
3010 rcu_read_unlock_bh();
3014 int dev_queue_xmit(struct sk_buff
*skb
)
3016 return __dev_queue_xmit(skb
, NULL
);
3018 EXPORT_SYMBOL(dev_queue_xmit
);
3020 int dev_queue_xmit_accel(struct sk_buff
*skb
, void *accel_priv
)
3022 return __dev_queue_xmit(skb
, accel_priv
);
3024 EXPORT_SYMBOL(dev_queue_xmit_accel
);
3027 /*=======================================================================
3029 =======================================================================*/
3031 int netdev_max_backlog __read_mostly
= 1000;
3032 EXPORT_SYMBOL(netdev_max_backlog
);
3034 int netdev_tstamp_prequeue __read_mostly
= 1;
3035 int netdev_budget __read_mostly
= 300;
3036 int weight_p __read_mostly
= 64; /* old backlog weight */
3038 /* Called with irq disabled */
3039 static inline void ____napi_schedule(struct softnet_data
*sd
,
3040 struct napi_struct
*napi
)
3042 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
3043 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3048 /* One global table that all flow-based protocols share. */
3049 struct rps_sock_flow_table __rcu
*rps_sock_flow_table __read_mostly
;
3050 EXPORT_SYMBOL(rps_sock_flow_table
);
3051 u32 rps_cpu_mask __read_mostly
;
3052 EXPORT_SYMBOL(rps_cpu_mask
);
3054 struct static_key rps_needed __read_mostly
;
3056 static struct rps_dev_flow
*
3057 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3058 struct rps_dev_flow
*rflow
, u16 next_cpu
)
3060 if (next_cpu
!= RPS_NO_CPU
) {
3061 #ifdef CONFIG_RFS_ACCEL
3062 struct netdev_rx_queue
*rxqueue
;
3063 struct rps_dev_flow_table
*flow_table
;
3064 struct rps_dev_flow
*old_rflow
;
3069 /* Should we steer this flow to a different hardware queue? */
3070 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
3071 !(dev
->features
& NETIF_F_NTUPLE
))
3073 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
3074 if (rxq_index
== skb_get_rx_queue(skb
))
3077 rxqueue
= dev
->_rx
+ rxq_index
;
3078 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3081 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
3082 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
3083 rxq_index
, flow_id
);
3087 rflow
= &flow_table
->flows
[flow_id
];
3089 if (old_rflow
->filter
== rflow
->filter
)
3090 old_rflow
->filter
= RPS_NO_FILTER
;
3094 per_cpu(softnet_data
, next_cpu
).input_queue_head
;
3097 rflow
->cpu
= next_cpu
;
3102 * get_rps_cpu is called from netif_receive_skb and returns the target
3103 * CPU from the RPS map of the receiving queue for a given skb.
3104 * rcu_read_lock must be held on entry.
3106 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3107 struct rps_dev_flow
**rflowp
)
3109 const struct rps_sock_flow_table
*sock_flow_table
;
3110 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
3111 struct rps_dev_flow_table
*flow_table
;
3112 struct rps_map
*map
;
3117 if (skb_rx_queue_recorded(skb
)) {
3118 u16 index
= skb_get_rx_queue(skb
);
3120 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
3121 WARN_ONCE(dev
->real_num_rx_queues
> 1,
3122 "%s received packet on queue %u, but number "
3123 "of RX queues is %u\n",
3124 dev
->name
, index
, dev
->real_num_rx_queues
);
3130 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3132 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3133 map
= rcu_dereference(rxqueue
->rps_map
);
3134 if (!flow_table
&& !map
)
3137 skb_reset_network_header(skb
);
3138 hash
= skb_get_hash(skb
);
3142 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
3143 if (flow_table
&& sock_flow_table
) {
3144 struct rps_dev_flow
*rflow
;
3148 /* First check into global flow table if there is a match */
3149 ident
= sock_flow_table
->ents
[hash
& sock_flow_table
->mask
];
3150 if ((ident
^ hash
) & ~rps_cpu_mask
)
3153 next_cpu
= ident
& rps_cpu_mask
;
3155 /* OK, now we know there is a match,
3156 * we can look at the local (per receive queue) flow table
3158 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
3162 * If the desired CPU (where last recvmsg was done) is
3163 * different from current CPU (one in the rx-queue flow
3164 * table entry), switch if one of the following holds:
3165 * - Current CPU is unset (equal to RPS_NO_CPU).
3166 * - Current CPU is offline.
3167 * - The current CPU's queue tail has advanced beyond the
3168 * last packet that was enqueued using this table entry.
3169 * This guarantees that all previous packets for the flow
3170 * have been dequeued, thus preserving in order delivery.
3172 if (unlikely(tcpu
!= next_cpu
) &&
3173 (tcpu
== RPS_NO_CPU
|| !cpu_online(tcpu
) ||
3174 ((int)(per_cpu(softnet_data
, tcpu
).input_queue_head
-
3175 rflow
->last_qtail
)) >= 0)) {
3177 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
3180 if (tcpu
!= RPS_NO_CPU
&& cpu_online(tcpu
)) {
3190 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
3191 if (cpu_online(tcpu
)) {
3201 #ifdef CONFIG_RFS_ACCEL
3204 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3205 * @dev: Device on which the filter was set
3206 * @rxq_index: RX queue index
3207 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3208 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3210 * Drivers that implement ndo_rx_flow_steer() should periodically call
3211 * this function for each installed filter and remove the filters for
3212 * which it returns %true.
3214 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
3215 u32 flow_id
, u16 filter_id
)
3217 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
3218 struct rps_dev_flow_table
*flow_table
;
3219 struct rps_dev_flow
*rflow
;
3224 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3225 if (flow_table
&& flow_id
<= flow_table
->mask
) {
3226 rflow
= &flow_table
->flows
[flow_id
];
3227 cpu
= ACCESS_ONCE(rflow
->cpu
);
3228 if (rflow
->filter
== filter_id
&& cpu
!= RPS_NO_CPU
&&
3229 ((int)(per_cpu(softnet_data
, cpu
).input_queue_head
-
3230 rflow
->last_qtail
) <
3231 (int)(10 * flow_table
->mask
)))
3237 EXPORT_SYMBOL(rps_may_expire_flow
);
3239 #endif /* CONFIG_RFS_ACCEL */
3241 /* Called from hardirq (IPI) context */
3242 static void rps_trigger_softirq(void *data
)
3244 struct softnet_data
*sd
= data
;
3246 ____napi_schedule(sd
, &sd
->backlog
);
3250 #endif /* CONFIG_RPS */
3253 * Check if this softnet_data structure is another cpu one
3254 * If yes, queue it to our IPI list and return 1
3257 static int rps_ipi_queued(struct softnet_data
*sd
)
3260 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
3263 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
3264 mysd
->rps_ipi_list
= sd
;
3266 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3269 #endif /* CONFIG_RPS */
3273 #ifdef CONFIG_NET_FLOW_LIMIT
3274 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
3277 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
3279 #ifdef CONFIG_NET_FLOW_LIMIT
3280 struct sd_flow_limit
*fl
;
3281 struct softnet_data
*sd
;
3282 unsigned int old_flow
, new_flow
;
3284 if (qlen
< (netdev_max_backlog
>> 1))
3287 sd
= this_cpu_ptr(&softnet_data
);
3290 fl
= rcu_dereference(sd
->flow_limit
);
3292 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
3293 old_flow
= fl
->history
[fl
->history_head
];
3294 fl
->history
[fl
->history_head
] = new_flow
;
3297 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
3299 if (likely(fl
->buckets
[old_flow
]))
3300 fl
->buckets
[old_flow
]--;
3302 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
3314 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3315 * queue (may be a remote CPU queue).
3317 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
3318 unsigned int *qtail
)
3320 struct softnet_data
*sd
;
3321 unsigned long flags
;
3324 sd
= &per_cpu(softnet_data
, cpu
);
3326 local_irq_save(flags
);
3329 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
3330 if (qlen
<= netdev_max_backlog
&& !skb_flow_limit(skb
, qlen
)) {
3333 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
3334 input_queue_tail_incr_save(sd
, qtail
);
3336 local_irq_restore(flags
);
3337 return NET_RX_SUCCESS
;
3340 /* Schedule NAPI for backlog device
3341 * We can use non atomic operation since we own the queue lock
3343 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
)) {
3344 if (!rps_ipi_queued(sd
))
3345 ____napi_schedule(sd
, &sd
->backlog
);
3353 local_irq_restore(flags
);
3355 atomic_long_inc(&skb
->dev
->rx_dropped
);
3360 static int netif_rx_internal(struct sk_buff
*skb
)
3364 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
3366 trace_netif_rx(skb
);
3368 if (static_key_false(&rps_needed
)) {
3369 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
3375 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
3377 cpu
= smp_processor_id();
3379 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
3387 ret
= enqueue_to_backlog(skb
, get_cpu(), &qtail
);
3394 * netif_rx - post buffer to the network code
3395 * @skb: buffer to post
3397 * This function receives a packet from a device driver and queues it for
3398 * the upper (protocol) levels to process. It always succeeds. The buffer
3399 * may be dropped during processing for congestion control or by the
3403 * NET_RX_SUCCESS (no congestion)
3404 * NET_RX_DROP (packet was dropped)
3408 int netif_rx(struct sk_buff
*skb
)
3410 trace_netif_rx_entry(skb
);
3412 return netif_rx_internal(skb
);
3414 EXPORT_SYMBOL(netif_rx
);
3416 int netif_rx_ni(struct sk_buff
*skb
)
3420 trace_netif_rx_ni_entry(skb
);
3423 err
= netif_rx_internal(skb
);
3424 if (local_softirq_pending())
3430 EXPORT_SYMBOL(netif_rx_ni
);
3432 static void net_tx_action(struct softirq_action
*h
)
3434 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
3436 if (sd
->completion_queue
) {
3437 struct sk_buff
*clist
;
3439 local_irq_disable();
3440 clist
= sd
->completion_queue
;
3441 sd
->completion_queue
= NULL
;
3445 struct sk_buff
*skb
= clist
;
3446 clist
= clist
->next
;
3448 WARN_ON(atomic_read(&skb
->users
));
3449 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_REASON_CONSUMED
))
3450 trace_consume_skb(skb
);
3452 trace_kfree_skb(skb
, net_tx_action
);
3457 if (sd
->output_queue
) {
3460 local_irq_disable();
3461 head
= sd
->output_queue
;
3462 sd
->output_queue
= NULL
;
3463 sd
->output_queue_tailp
= &sd
->output_queue
;
3467 struct Qdisc
*q
= head
;
3468 spinlock_t
*root_lock
;
3470 head
= head
->next_sched
;
3472 root_lock
= qdisc_lock(q
);
3473 if (spin_trylock(root_lock
)) {
3474 smp_mb__before_atomic();
3475 clear_bit(__QDISC_STATE_SCHED
,
3478 spin_unlock(root_lock
);
3480 if (!test_bit(__QDISC_STATE_DEACTIVATED
,
3482 __netif_reschedule(q
);
3484 smp_mb__before_atomic();
3485 clear_bit(__QDISC_STATE_SCHED
,
3493 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3494 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3495 /* This hook is defined here for ATM LANE */
3496 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
3497 unsigned char *addr
) __read_mostly
;
3498 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
3501 #ifdef CONFIG_NET_CLS_ACT
3502 /* TODO: Maybe we should just force sch_ingress to be compiled in
3503 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3504 * a compare and 2 stores extra right now if we dont have it on
3505 * but have CONFIG_NET_CLS_ACT
3506 * NOTE: This doesn't stop any functionality; if you dont have
3507 * the ingress scheduler, you just can't add policies on ingress.
3510 static int ing_filter(struct sk_buff
*skb
, struct netdev_queue
*rxq
)
3512 struct net_device
*dev
= skb
->dev
;
3513 u32 ttl
= G_TC_RTTL(skb
->tc_verd
);
3514 int result
= TC_ACT_OK
;
3517 if (unlikely(MAX_RED_LOOP
< ttl
++)) {
3518 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3519 skb
->skb_iif
, dev
->ifindex
);
3523 skb
->tc_verd
= SET_TC_RTTL(skb
->tc_verd
, ttl
);
3524 skb
->tc_verd
= SET_TC_AT(skb
->tc_verd
, AT_INGRESS
);
3526 q
= rcu_dereference(rxq
->qdisc
);
3527 if (q
!= &noop_qdisc
) {
3528 spin_lock(qdisc_lock(q
));
3529 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
)))
3530 result
= qdisc_enqueue_root(skb
, q
);
3531 spin_unlock(qdisc_lock(q
));
3537 static inline struct sk_buff
*handle_ing(struct sk_buff
*skb
,
3538 struct packet_type
**pt_prev
,
3539 int *ret
, struct net_device
*orig_dev
)
3541 struct netdev_queue
*rxq
= rcu_dereference(skb
->dev
->ingress_queue
);
3543 if (!rxq
|| rcu_access_pointer(rxq
->qdisc
) == &noop_qdisc
)
3547 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
3551 switch (ing_filter(skb
, rxq
)) {
3565 * netdev_rx_handler_register - register receive handler
3566 * @dev: device to register a handler for
3567 * @rx_handler: receive handler to register
3568 * @rx_handler_data: data pointer that is used by rx handler
3570 * Register a receive handler for a device. This handler will then be
3571 * called from __netif_receive_skb. A negative errno code is returned
3574 * The caller must hold the rtnl_mutex.
3576 * For a general description of rx_handler, see enum rx_handler_result.
3578 int netdev_rx_handler_register(struct net_device
*dev
,
3579 rx_handler_func_t
*rx_handler
,
3580 void *rx_handler_data
)
3584 if (dev
->rx_handler
)
3587 /* Note: rx_handler_data must be set before rx_handler */
3588 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
3589 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
3593 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
3596 * netdev_rx_handler_unregister - unregister receive handler
3597 * @dev: device to unregister a handler from
3599 * Unregister a receive handler from a device.
3601 * The caller must hold the rtnl_mutex.
3603 void netdev_rx_handler_unregister(struct net_device
*dev
)
3607 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
3608 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3609 * section has a guarantee to see a non NULL rx_handler_data
3613 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
3615 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
3618 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3619 * the special handling of PFMEMALLOC skbs.
3621 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
3623 switch (skb
->protocol
) {
3624 case htons(ETH_P_ARP
):
3625 case htons(ETH_P_IP
):
3626 case htons(ETH_P_IPV6
):
3627 case htons(ETH_P_8021Q
):
3628 case htons(ETH_P_8021AD
):
3635 static int __netif_receive_skb_core(struct sk_buff
*skb
, bool pfmemalloc
)
3637 struct packet_type
*ptype
, *pt_prev
;
3638 rx_handler_func_t
*rx_handler
;
3639 struct net_device
*orig_dev
;
3640 bool deliver_exact
= false;
3641 int ret
= NET_RX_DROP
;
3644 net_timestamp_check(!netdev_tstamp_prequeue
, skb
);
3646 trace_netif_receive_skb(skb
);
3648 orig_dev
= skb
->dev
;
3650 skb_reset_network_header(skb
);
3651 if (!skb_transport_header_was_set(skb
))
3652 skb_reset_transport_header(skb
);
3653 skb_reset_mac_len(skb
);
3660 skb
->skb_iif
= skb
->dev
->ifindex
;
3662 __this_cpu_inc(softnet_data
.processed
);
3664 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
) ||
3665 skb
->protocol
== cpu_to_be16(ETH_P_8021AD
)) {
3666 skb
= skb_vlan_untag(skb
);
3671 #ifdef CONFIG_NET_CLS_ACT
3672 if (skb
->tc_verd
& TC_NCLS
) {
3673 skb
->tc_verd
= CLR_TC_NCLS(skb
->tc_verd
);
3681 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
3683 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3687 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
3689 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3694 #ifdef CONFIG_NET_CLS_ACT
3695 skb
= handle_ing(skb
, &pt_prev
, &ret
, orig_dev
);
3701 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
3704 if (skb_vlan_tag_present(skb
)) {
3706 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3709 if (vlan_do_receive(&skb
))
3711 else if (unlikely(!skb
))
3715 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
3718 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3721 switch (rx_handler(&skb
)) {
3722 case RX_HANDLER_CONSUMED
:
3723 ret
= NET_RX_SUCCESS
;
3725 case RX_HANDLER_ANOTHER
:
3727 case RX_HANDLER_EXACT
:
3728 deliver_exact
= true;
3729 case RX_HANDLER_PASS
:
3736 if (unlikely(skb_vlan_tag_present(skb
))) {
3737 if (skb_vlan_tag_get_id(skb
))
3738 skb
->pkt_type
= PACKET_OTHERHOST
;
3739 /* Note: we might in the future use prio bits
3740 * and set skb->priority like in vlan_do_receive()
3741 * For the time being, just ignore Priority Code Point
3746 type
= skb
->protocol
;
3748 /* deliver only exact match when indicated */
3749 if (likely(!deliver_exact
)) {
3750 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
3751 &ptype_base
[ntohs(type
) &
3755 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
3756 &orig_dev
->ptype_specific
);
3758 if (unlikely(skb
->dev
!= orig_dev
)) {
3759 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
3760 &skb
->dev
->ptype_specific
);
3764 if (unlikely(skb_orphan_frags(skb
, GFP_ATOMIC
)))
3767 ret
= pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
3770 atomic_long_inc(&skb
->dev
->rx_dropped
);
3772 /* Jamal, now you will not able to escape explaining
3773 * me how you were going to use this. :-)
3783 static int __netif_receive_skb(struct sk_buff
*skb
)
3787 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
3788 unsigned long pflags
= current
->flags
;
3791 * PFMEMALLOC skbs are special, they should
3792 * - be delivered to SOCK_MEMALLOC sockets only
3793 * - stay away from userspace
3794 * - have bounded memory usage
3796 * Use PF_MEMALLOC as this saves us from propagating the allocation
3797 * context down to all allocation sites.
3799 current
->flags
|= PF_MEMALLOC
;
3800 ret
= __netif_receive_skb_core(skb
, true);
3801 tsk_restore_flags(current
, pflags
, PF_MEMALLOC
);
3803 ret
= __netif_receive_skb_core(skb
, false);
3808 static int netif_receive_skb_internal(struct sk_buff
*skb
)
3810 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
3812 if (skb_defer_rx_timestamp(skb
))
3813 return NET_RX_SUCCESS
;
3816 if (static_key_false(&rps_needed
)) {
3817 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
3822 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
3825 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
3832 return __netif_receive_skb(skb
);
3836 * netif_receive_skb - process receive buffer from network
3837 * @skb: buffer to process
3839 * netif_receive_skb() is the main receive data processing function.
3840 * It always succeeds. The buffer may be dropped during processing
3841 * for congestion control or by the protocol layers.
3843 * This function may only be called from softirq context and interrupts
3844 * should be enabled.
3846 * Return values (usually ignored):
3847 * NET_RX_SUCCESS: no congestion
3848 * NET_RX_DROP: packet was dropped
3850 int netif_receive_skb(struct sk_buff
*skb
)
3852 trace_netif_receive_skb_entry(skb
);
3854 return netif_receive_skb_internal(skb
);
3856 EXPORT_SYMBOL(netif_receive_skb
);
3858 /* Network device is going away, flush any packets still pending
3859 * Called with irqs disabled.
3861 static void flush_backlog(void *arg
)
3863 struct net_device
*dev
= arg
;
3864 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
3865 struct sk_buff
*skb
, *tmp
;
3868 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
3869 if (skb
->dev
== dev
) {
3870 __skb_unlink(skb
, &sd
->input_pkt_queue
);
3872 input_queue_head_incr(sd
);
3877 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
3878 if (skb
->dev
== dev
) {
3879 __skb_unlink(skb
, &sd
->process_queue
);
3881 input_queue_head_incr(sd
);
3886 static int napi_gro_complete(struct sk_buff
*skb
)
3888 struct packet_offload
*ptype
;
3889 __be16 type
= skb
->protocol
;
3890 struct list_head
*head
= &offload_base
;
3893 BUILD_BUG_ON(sizeof(struct napi_gro_cb
) > sizeof(skb
->cb
));
3895 if (NAPI_GRO_CB(skb
)->count
== 1) {
3896 skb_shinfo(skb
)->gso_size
= 0;
3901 list_for_each_entry_rcu(ptype
, head
, list
) {
3902 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
3905 err
= ptype
->callbacks
.gro_complete(skb
, 0);
3911 WARN_ON(&ptype
->list
== head
);
3913 return NET_RX_SUCCESS
;
3917 return netif_receive_skb_internal(skb
);
3920 /* napi->gro_list contains packets ordered by age.
3921 * youngest packets at the head of it.
3922 * Complete skbs in reverse order to reduce latencies.
3924 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
)
3926 struct sk_buff
*skb
, *prev
= NULL
;
3928 /* scan list and build reverse chain */
3929 for (skb
= napi
->gro_list
; skb
!= NULL
; skb
= skb
->next
) {
3934 for (skb
= prev
; skb
; skb
= prev
) {
3937 if (flush_old
&& NAPI_GRO_CB(skb
)->age
== jiffies
)
3941 napi_gro_complete(skb
);
3945 napi
->gro_list
= NULL
;
3947 EXPORT_SYMBOL(napi_gro_flush
);
3949 static void gro_list_prepare(struct napi_struct
*napi
, struct sk_buff
*skb
)
3952 unsigned int maclen
= skb
->dev
->hard_header_len
;
3953 u32 hash
= skb_get_hash_raw(skb
);
3955 for (p
= napi
->gro_list
; p
; p
= p
->next
) {
3956 unsigned long diffs
;
3958 NAPI_GRO_CB(p
)->flush
= 0;
3960 if (hash
!= skb_get_hash_raw(p
)) {
3961 NAPI_GRO_CB(p
)->same_flow
= 0;
3965 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
3966 diffs
|= p
->vlan_tci
^ skb
->vlan_tci
;
3967 if (maclen
== ETH_HLEN
)
3968 diffs
|= compare_ether_header(skb_mac_header(p
),
3969 skb_mac_header(skb
));
3971 diffs
= memcmp(skb_mac_header(p
),
3972 skb_mac_header(skb
),
3974 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
3978 static void skb_gro_reset_offset(struct sk_buff
*skb
)
3980 const struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
3981 const skb_frag_t
*frag0
= &pinfo
->frags
[0];
3983 NAPI_GRO_CB(skb
)->data_offset
= 0;
3984 NAPI_GRO_CB(skb
)->frag0
= NULL
;
3985 NAPI_GRO_CB(skb
)->frag0_len
= 0;
3987 if (skb_mac_header(skb
) == skb_tail_pointer(skb
) &&
3989 !PageHighMem(skb_frag_page(frag0
))) {
3990 NAPI_GRO_CB(skb
)->frag0
= skb_frag_address(frag0
);
3991 NAPI_GRO_CB(skb
)->frag0_len
= skb_frag_size(frag0
);
3995 static void gro_pull_from_frag0(struct sk_buff
*skb
, int grow
)
3997 struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
3999 BUG_ON(skb
->end
- skb
->tail
< grow
);
4001 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
4003 skb
->data_len
-= grow
;
4006 pinfo
->frags
[0].page_offset
+= grow
;
4007 skb_frag_size_sub(&pinfo
->frags
[0], grow
);
4009 if (unlikely(!skb_frag_size(&pinfo
->frags
[0]))) {
4010 skb_frag_unref(skb
, 0);
4011 memmove(pinfo
->frags
, pinfo
->frags
+ 1,
4012 --pinfo
->nr_frags
* sizeof(pinfo
->frags
[0]));
4016 static enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
4018 struct sk_buff
**pp
= NULL
;
4019 struct packet_offload
*ptype
;
4020 __be16 type
= skb
->protocol
;
4021 struct list_head
*head
= &offload_base
;
4023 enum gro_result ret
;
4026 if (!(skb
->dev
->features
& NETIF_F_GRO
))
4029 if (skb_is_gso(skb
) || skb_has_frag_list(skb
) || skb
->csum_bad
)
4032 gro_list_prepare(napi
, skb
);
4035 list_for_each_entry_rcu(ptype
, head
, list
) {
4036 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
4039 skb_set_network_header(skb
, skb_gro_offset(skb
));
4040 skb_reset_mac_len(skb
);
4041 NAPI_GRO_CB(skb
)->same_flow
= 0;
4042 NAPI_GRO_CB(skb
)->flush
= 0;
4043 NAPI_GRO_CB(skb
)->free
= 0;
4044 NAPI_GRO_CB(skb
)->udp_mark
= 0;
4045 NAPI_GRO_CB(skb
)->gro_remcsum_start
= 0;
4047 /* Setup for GRO checksum validation */
4048 switch (skb
->ip_summed
) {
4049 case CHECKSUM_COMPLETE
:
4050 NAPI_GRO_CB(skb
)->csum
= skb
->csum
;
4051 NAPI_GRO_CB(skb
)->csum_valid
= 1;
4052 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
4054 case CHECKSUM_UNNECESSARY
:
4055 NAPI_GRO_CB(skb
)->csum_cnt
= skb
->csum_level
+ 1;
4056 NAPI_GRO_CB(skb
)->csum_valid
= 0;
4059 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
4060 NAPI_GRO_CB(skb
)->csum_valid
= 0;
4063 pp
= ptype
->callbacks
.gro_receive(&napi
->gro_list
, skb
);
4068 if (&ptype
->list
== head
)
4071 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
4072 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
4075 struct sk_buff
*nskb
= *pp
;
4079 napi_gro_complete(nskb
);
4086 if (NAPI_GRO_CB(skb
)->flush
)
4089 if (unlikely(napi
->gro_count
>= MAX_GRO_SKBS
)) {
4090 struct sk_buff
*nskb
= napi
->gro_list
;
4092 /* locate the end of the list to select the 'oldest' flow */
4093 while (nskb
->next
) {
4099 napi_gro_complete(nskb
);
4103 NAPI_GRO_CB(skb
)->count
= 1;
4104 NAPI_GRO_CB(skb
)->age
= jiffies
;
4105 NAPI_GRO_CB(skb
)->last
= skb
;
4106 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
4107 skb
->next
= napi
->gro_list
;
4108 napi
->gro_list
= skb
;
4112 grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
4114 gro_pull_from_frag0(skb
, grow
);
4123 struct packet_offload
*gro_find_receive_by_type(__be16 type
)
4125 struct list_head
*offload_head
= &offload_base
;
4126 struct packet_offload
*ptype
;
4128 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
4129 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
4135 EXPORT_SYMBOL(gro_find_receive_by_type
);
4137 struct packet_offload
*gro_find_complete_by_type(__be16 type
)
4139 struct list_head
*offload_head
= &offload_base
;
4140 struct packet_offload
*ptype
;
4142 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
4143 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
4149 EXPORT_SYMBOL(gro_find_complete_by_type
);
4151 static gro_result_t
napi_skb_finish(gro_result_t ret
, struct sk_buff
*skb
)
4155 if (netif_receive_skb_internal(skb
))
4163 case GRO_MERGED_FREE
:
4164 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
4165 kmem_cache_free(skbuff_head_cache
, skb
);
4178 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
4180 trace_napi_gro_receive_entry(skb
);
4182 skb_gro_reset_offset(skb
);
4184 return napi_skb_finish(dev_gro_receive(napi
, skb
), skb
);
4186 EXPORT_SYMBOL(napi_gro_receive
);
4188 static void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
4190 if (unlikely(skb
->pfmemalloc
)) {
4194 __skb_pull(skb
, skb_headlen(skb
));
4195 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4196 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
- skb_headroom(skb
));
4198 skb
->dev
= napi
->dev
;
4200 skb
->encapsulation
= 0;
4201 skb_shinfo(skb
)->gso_type
= 0;
4202 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
4207 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
4209 struct sk_buff
*skb
= napi
->skb
;
4212 skb
= napi_alloc_skb(napi
, GRO_MAX_HEAD
);
4217 EXPORT_SYMBOL(napi_get_frags
);
4219 static gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
4220 struct sk_buff
*skb
,
4226 __skb_push(skb
, ETH_HLEN
);
4227 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
4228 if (ret
== GRO_NORMAL
&& netif_receive_skb_internal(skb
))
4233 case GRO_MERGED_FREE
:
4234 napi_reuse_skb(napi
, skb
);
4244 /* Upper GRO stack assumes network header starts at gro_offset=0
4245 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4246 * We copy ethernet header into skb->data to have a common layout.
4248 static struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
4250 struct sk_buff
*skb
= napi
->skb
;
4251 const struct ethhdr
*eth
;
4252 unsigned int hlen
= sizeof(*eth
);
4256 skb_reset_mac_header(skb
);
4257 skb_gro_reset_offset(skb
);
4259 eth
= skb_gro_header_fast(skb
, 0);
4260 if (unlikely(skb_gro_header_hard(skb
, hlen
))) {
4261 eth
= skb_gro_header_slow(skb
, hlen
, 0);
4262 if (unlikely(!eth
)) {
4263 napi_reuse_skb(napi
, skb
);
4267 gro_pull_from_frag0(skb
, hlen
);
4268 NAPI_GRO_CB(skb
)->frag0
+= hlen
;
4269 NAPI_GRO_CB(skb
)->frag0_len
-= hlen
;
4271 __skb_pull(skb
, hlen
);
4274 * This works because the only protocols we care about don't require
4276 * We'll fix it up properly in napi_frags_finish()
4278 skb
->protocol
= eth
->h_proto
;
4283 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
4285 struct sk_buff
*skb
= napi_frags_skb(napi
);
4290 trace_napi_gro_frags_entry(skb
);
4292 return napi_frags_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
4294 EXPORT_SYMBOL(napi_gro_frags
);
4296 /* Compute the checksum from gro_offset and return the folded value
4297 * after adding in any pseudo checksum.
4299 __sum16
__skb_gro_checksum_complete(struct sk_buff
*skb
)
4304 wsum
= skb_checksum(skb
, skb_gro_offset(skb
), skb_gro_len(skb
), 0);
4306 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4307 sum
= csum_fold(csum_add(NAPI_GRO_CB(skb
)->csum
, wsum
));
4309 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
4310 !skb
->csum_complete_sw
)
4311 netdev_rx_csum_fault(skb
->dev
);
4314 NAPI_GRO_CB(skb
)->csum
= wsum
;
4315 NAPI_GRO_CB(skb
)->csum_valid
= 1;
4319 EXPORT_SYMBOL(__skb_gro_checksum_complete
);
4322 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4323 * Note: called with local irq disabled, but exits with local irq enabled.
4325 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
4328 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
4331 sd
->rps_ipi_list
= NULL
;
4335 /* Send pending IPI's to kick RPS processing on remote cpus. */
4337 struct softnet_data
*next
= remsd
->rps_ipi_next
;
4339 if (cpu_online(remsd
->cpu
))
4340 smp_call_function_single_async(remsd
->cpu
,
4349 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
4352 return sd
->rps_ipi_list
!= NULL
;
4358 static int process_backlog(struct napi_struct
*napi
, int quota
)
4361 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
4363 /* Check if we have pending ipi, its better to send them now,
4364 * not waiting net_rx_action() end.
4366 if (sd_has_rps_ipi_waiting(sd
)) {
4367 local_irq_disable();
4368 net_rps_action_and_irq_enable(sd
);
4371 napi
->weight
= weight_p
;
4372 local_irq_disable();
4374 struct sk_buff
*skb
;
4376 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
4378 __netif_receive_skb(skb
);
4379 local_irq_disable();
4380 input_queue_head_incr(sd
);
4381 if (++work
>= quota
) {
4388 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
4390 * Inline a custom version of __napi_complete().
4391 * only current cpu owns and manipulates this napi,
4392 * and NAPI_STATE_SCHED is the only possible flag set
4394 * We can use a plain write instead of clear_bit(),
4395 * and we dont need an smp_mb() memory barrier.
4403 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
4404 &sd
->process_queue
);
4413 * __napi_schedule - schedule for receive
4414 * @n: entry to schedule
4416 * The entry's receive function will be scheduled to run.
4417 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
4419 void __napi_schedule(struct napi_struct
*n
)
4421 unsigned long flags
;
4423 local_irq_save(flags
);
4424 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
4425 local_irq_restore(flags
);
4427 EXPORT_SYMBOL(__napi_schedule
);
4430 * __napi_schedule_irqoff - schedule for receive
4431 * @n: entry to schedule
4433 * Variant of __napi_schedule() assuming hard irqs are masked
4435 void __napi_schedule_irqoff(struct napi_struct
*n
)
4437 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
4439 EXPORT_SYMBOL(__napi_schedule_irqoff
);
4441 void __napi_complete(struct napi_struct
*n
)
4443 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
4445 list_del_init(&n
->poll_list
);
4446 smp_mb__before_atomic();
4447 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
4449 EXPORT_SYMBOL(__napi_complete
);
4451 void napi_complete_done(struct napi_struct
*n
, int work_done
)
4453 unsigned long flags
;
4456 * don't let napi dequeue from the cpu poll list
4457 * just in case its running on a different cpu
4459 if (unlikely(test_bit(NAPI_STATE_NPSVC
, &n
->state
)))
4463 unsigned long timeout
= 0;
4466 timeout
= n
->dev
->gro_flush_timeout
;
4469 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
4470 HRTIMER_MODE_REL_PINNED
);
4472 napi_gro_flush(n
, false);
4474 if (likely(list_empty(&n
->poll_list
))) {
4475 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED
, &n
->state
));
4477 /* If n->poll_list is not empty, we need to mask irqs */
4478 local_irq_save(flags
);
4480 local_irq_restore(flags
);
4483 EXPORT_SYMBOL(napi_complete_done
);
4485 /* must be called under rcu_read_lock(), as we dont take a reference */
4486 struct napi_struct
*napi_by_id(unsigned int napi_id
)
4488 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
4489 struct napi_struct
*napi
;
4491 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
4492 if (napi
->napi_id
== napi_id
)
4497 EXPORT_SYMBOL_GPL(napi_by_id
);
4499 void napi_hash_add(struct napi_struct
*napi
)
4501 if (!test_and_set_bit(NAPI_STATE_HASHED
, &napi
->state
)) {
4503 spin_lock(&napi_hash_lock
);
4505 /* 0 is not a valid id, we also skip an id that is taken
4506 * we expect both events to be extremely rare
4509 while (!napi
->napi_id
) {
4510 napi
->napi_id
= ++napi_gen_id
;
4511 if (napi_by_id(napi
->napi_id
))
4515 hlist_add_head_rcu(&napi
->napi_hash_node
,
4516 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
4518 spin_unlock(&napi_hash_lock
);
4521 EXPORT_SYMBOL_GPL(napi_hash_add
);
4523 /* Warning : caller is responsible to make sure rcu grace period
4524 * is respected before freeing memory containing @napi
4526 void napi_hash_del(struct napi_struct
*napi
)
4528 spin_lock(&napi_hash_lock
);
4530 if (test_and_clear_bit(NAPI_STATE_HASHED
, &napi
->state
))
4531 hlist_del_rcu(&napi
->napi_hash_node
);
4533 spin_unlock(&napi_hash_lock
);
4535 EXPORT_SYMBOL_GPL(napi_hash_del
);
4537 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
4539 struct napi_struct
*napi
;
4541 napi
= container_of(timer
, struct napi_struct
, timer
);
4543 napi_schedule(napi
);
4545 return HRTIMER_NORESTART
;
4548 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
4549 int (*poll
)(struct napi_struct
*, int), int weight
)
4551 INIT_LIST_HEAD(&napi
->poll_list
);
4552 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
4553 napi
->timer
.function
= napi_watchdog
;
4554 napi
->gro_count
= 0;
4555 napi
->gro_list
= NULL
;
4558 if (weight
> NAPI_POLL_WEIGHT
)
4559 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4561 napi
->weight
= weight
;
4562 list_add(&napi
->dev_list
, &dev
->napi_list
);
4564 #ifdef CONFIG_NETPOLL
4565 spin_lock_init(&napi
->poll_lock
);
4566 napi
->poll_owner
= -1;
4568 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
4570 EXPORT_SYMBOL(netif_napi_add
);
4572 void napi_disable(struct napi_struct
*n
)
4575 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
4577 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
4580 hrtimer_cancel(&n
->timer
);
4582 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
4584 EXPORT_SYMBOL(napi_disable
);
4586 void netif_napi_del(struct napi_struct
*napi
)
4588 list_del_init(&napi
->dev_list
);
4589 napi_free_frags(napi
);
4591 kfree_skb_list(napi
->gro_list
);
4592 napi
->gro_list
= NULL
;
4593 napi
->gro_count
= 0;
4595 EXPORT_SYMBOL(netif_napi_del
);
4597 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
4602 list_del_init(&n
->poll_list
);
4604 have
= netpoll_poll_lock(n
);
4608 /* This NAPI_STATE_SCHED test is for avoiding a race
4609 * with netpoll's poll_napi(). Only the entity which
4610 * obtains the lock and sees NAPI_STATE_SCHED set will
4611 * actually make the ->poll() call. Therefore we avoid
4612 * accidentally calling ->poll() when NAPI is not scheduled.
4615 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
4616 work
= n
->poll(n
, weight
);
4620 WARN_ON_ONCE(work
> weight
);
4622 if (likely(work
< weight
))
4625 /* Drivers must not modify the NAPI state if they
4626 * consume the entire weight. In such cases this code
4627 * still "owns" the NAPI instance and therefore can
4628 * move the instance around on the list at-will.
4630 if (unlikely(napi_disable_pending(n
))) {
4636 /* flush too old packets
4637 * If HZ < 1000, flush all packets.
4639 napi_gro_flush(n
, HZ
>= 1000);
4642 /* Some drivers may have called napi_schedule
4643 * prior to exhausting their budget.
4645 if (unlikely(!list_empty(&n
->poll_list
))) {
4646 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4647 n
->dev
? n
->dev
->name
: "backlog");
4651 list_add_tail(&n
->poll_list
, repoll
);
4654 netpoll_poll_unlock(have
);
4659 static void net_rx_action(struct softirq_action
*h
)
4661 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
4662 unsigned long time_limit
= jiffies
+ 2;
4663 int budget
= netdev_budget
;
4667 local_irq_disable();
4668 list_splice_init(&sd
->poll_list
, &list
);
4672 struct napi_struct
*n
;
4674 if (list_empty(&list
)) {
4675 if (!sd_has_rps_ipi_waiting(sd
) && list_empty(&repoll
))
4680 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
4681 budget
-= napi_poll(n
, &repoll
);
4683 /* If softirq window is exhausted then punt.
4684 * Allow this to run for 2 jiffies since which will allow
4685 * an average latency of 1.5/HZ.
4687 if (unlikely(budget
<= 0 ||
4688 time_after_eq(jiffies
, time_limit
))) {
4694 local_irq_disable();
4696 list_splice_tail_init(&sd
->poll_list
, &list
);
4697 list_splice_tail(&repoll
, &list
);
4698 list_splice(&list
, &sd
->poll_list
);
4699 if (!list_empty(&sd
->poll_list
))
4700 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4702 net_rps_action_and_irq_enable(sd
);
4705 struct netdev_adjacent
{
4706 struct net_device
*dev
;
4708 /* upper master flag, there can only be one master device per list */
4711 /* counter for the number of times this device was added to us */
4714 /* private field for the users */
4717 struct list_head list
;
4718 struct rcu_head rcu
;
4721 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*dev
,
4722 struct net_device
*adj_dev
,
4723 struct list_head
*adj_list
)
4725 struct netdev_adjacent
*adj
;
4727 list_for_each_entry(adj
, adj_list
, list
) {
4728 if (adj
->dev
== adj_dev
)
4735 * netdev_has_upper_dev - Check if device is linked to an upper device
4737 * @upper_dev: upper device to check
4739 * Find out if a device is linked to specified upper device and return true
4740 * in case it is. Note that this checks only immediate upper device,
4741 * not through a complete stack of devices. The caller must hold the RTNL lock.
4743 bool netdev_has_upper_dev(struct net_device
*dev
,
4744 struct net_device
*upper_dev
)
4748 return __netdev_find_adj(dev
, upper_dev
, &dev
->all_adj_list
.upper
);
4750 EXPORT_SYMBOL(netdev_has_upper_dev
);
4753 * netdev_has_any_upper_dev - Check if device is linked to some device
4756 * Find out if a device is linked to an upper device and return true in case
4757 * it is. The caller must hold the RTNL lock.
4759 static bool netdev_has_any_upper_dev(struct net_device
*dev
)
4763 return !list_empty(&dev
->all_adj_list
.upper
);
4767 * netdev_master_upper_dev_get - Get master upper device
4770 * Find a master upper device and return pointer to it or NULL in case
4771 * it's not there. The caller must hold the RTNL lock.
4773 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
4775 struct netdev_adjacent
*upper
;
4779 if (list_empty(&dev
->adj_list
.upper
))
4782 upper
= list_first_entry(&dev
->adj_list
.upper
,
4783 struct netdev_adjacent
, list
);
4784 if (likely(upper
->master
))
4788 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
4790 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
4792 struct netdev_adjacent
*adj
;
4794 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
4796 return adj
->private;
4798 EXPORT_SYMBOL(netdev_adjacent_get_private
);
4801 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4803 * @iter: list_head ** of the current position
4805 * Gets the next device from the dev's upper list, starting from iter
4806 * position. The caller must hold RCU read lock.
4808 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
4809 struct list_head
**iter
)
4811 struct netdev_adjacent
*upper
;
4813 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4815 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
4817 if (&upper
->list
== &dev
->adj_list
.upper
)
4820 *iter
= &upper
->list
;
4824 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
4827 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4829 * @iter: list_head ** of the current position
4831 * Gets the next device from the dev's upper list, starting from iter
4832 * position. The caller must hold RCU read lock.
4834 struct net_device
*netdev_all_upper_get_next_dev_rcu(struct net_device
*dev
,
4835 struct list_head
**iter
)
4837 struct netdev_adjacent
*upper
;
4839 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4841 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
4843 if (&upper
->list
== &dev
->all_adj_list
.upper
)
4846 *iter
= &upper
->list
;
4850 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu
);
4853 * netdev_lower_get_next_private - Get the next ->private from the
4854 * lower neighbour list
4856 * @iter: list_head ** of the current position
4858 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4859 * list, starting from iter position. The caller must hold either hold the
4860 * RTNL lock or its own locking that guarantees that the neighbour lower
4861 * list will remain unchainged.
4863 void *netdev_lower_get_next_private(struct net_device
*dev
,
4864 struct list_head
**iter
)
4866 struct netdev_adjacent
*lower
;
4868 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
4870 if (&lower
->list
== &dev
->adj_list
.lower
)
4873 *iter
= lower
->list
.next
;
4875 return lower
->private;
4877 EXPORT_SYMBOL(netdev_lower_get_next_private
);
4880 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4881 * lower neighbour list, RCU
4884 * @iter: list_head ** of the current position
4886 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4887 * list, starting from iter position. The caller must hold RCU read lock.
4889 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
4890 struct list_head
**iter
)
4892 struct netdev_adjacent
*lower
;
4894 WARN_ON_ONCE(!rcu_read_lock_held());
4896 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
4898 if (&lower
->list
== &dev
->adj_list
.lower
)
4901 *iter
= &lower
->list
;
4903 return lower
->private;
4905 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
4908 * netdev_lower_get_next - Get the next device from the lower neighbour
4911 * @iter: list_head ** of the current position
4913 * Gets the next netdev_adjacent from the dev's lower neighbour
4914 * list, starting from iter position. The caller must hold RTNL lock or
4915 * its own locking that guarantees that the neighbour lower
4916 * list will remain unchainged.
4918 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
4920 struct netdev_adjacent
*lower
;
4922 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
4924 if (&lower
->list
== &dev
->adj_list
.lower
)
4927 *iter
= &lower
->list
;
4931 EXPORT_SYMBOL(netdev_lower_get_next
);
4934 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4935 * lower neighbour list, RCU
4939 * Gets the first netdev_adjacent->private from the dev's lower neighbour
4940 * list. The caller must hold RCU read lock.
4942 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
4944 struct netdev_adjacent
*lower
;
4946 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
4947 struct netdev_adjacent
, list
);
4949 return lower
->private;
4952 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
4955 * netdev_master_upper_dev_get_rcu - Get master upper device
4958 * Find a master upper device and return pointer to it or NULL in case
4959 * it's not there. The caller must hold the RCU read lock.
4961 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
4963 struct netdev_adjacent
*upper
;
4965 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
4966 struct netdev_adjacent
, list
);
4967 if (upper
&& likely(upper
->master
))
4971 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
4973 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
4974 struct net_device
*adj_dev
,
4975 struct list_head
*dev_list
)
4977 char linkname
[IFNAMSIZ
+7];
4978 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
4979 "upper_%s" : "lower_%s", adj_dev
->name
);
4980 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
4983 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
4985 struct list_head
*dev_list
)
4987 char linkname
[IFNAMSIZ
+7];
4988 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
4989 "upper_%s" : "lower_%s", name
);
4990 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
4993 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
4994 struct net_device
*adj_dev
,
4995 struct list_head
*dev_list
)
4997 return (dev_list
== &dev
->adj_list
.upper
||
4998 dev_list
== &dev
->adj_list
.lower
) &&
4999 net_eq(dev_net(dev
), dev_net(adj_dev
));
5002 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
5003 struct net_device
*adj_dev
,
5004 struct list_head
*dev_list
,
5005 void *private, bool master
)
5007 struct netdev_adjacent
*adj
;
5010 adj
= __netdev_find_adj(dev
, adj_dev
, dev_list
);
5017 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
5022 adj
->master
= master
;
5024 adj
->private = private;
5027 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5028 adj_dev
->name
, dev
->name
, adj_dev
->name
);
5030 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
5031 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
5036 /* Ensure that master link is always the first item in list. */
5038 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
5039 &(adj_dev
->dev
.kobj
), "master");
5041 goto remove_symlinks
;
5043 list_add_rcu(&adj
->list
, dev_list
);
5045 list_add_tail_rcu(&adj
->list
, dev_list
);
5051 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
5052 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
5060 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
5061 struct net_device
*adj_dev
,
5062 struct list_head
*dev_list
)
5064 struct netdev_adjacent
*adj
;
5066 adj
= __netdev_find_adj(dev
, adj_dev
, dev_list
);
5069 pr_err("tried to remove device %s from %s\n",
5070 dev
->name
, adj_dev
->name
);
5074 if (adj
->ref_nr
> 1) {
5075 pr_debug("%s to %s ref_nr-- = %d\n", dev
->name
, adj_dev
->name
,
5082 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
5084 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
5085 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
5087 list_del_rcu(&adj
->list
);
5088 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5089 adj_dev
->name
, dev
->name
, adj_dev
->name
);
5091 kfree_rcu(adj
, rcu
);
5094 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
5095 struct net_device
*upper_dev
,
5096 struct list_head
*up_list
,
5097 struct list_head
*down_list
,
5098 void *private, bool master
)
5102 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
, private,
5107 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
, private,
5110 __netdev_adjacent_dev_remove(dev
, upper_dev
, up_list
);
5117 static int __netdev_adjacent_dev_link(struct net_device
*dev
,
5118 struct net_device
*upper_dev
)
5120 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
5121 &dev
->all_adj_list
.upper
,
5122 &upper_dev
->all_adj_list
.lower
,
5126 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
5127 struct net_device
*upper_dev
,
5128 struct list_head
*up_list
,
5129 struct list_head
*down_list
)
5131 __netdev_adjacent_dev_remove(dev
, upper_dev
, up_list
);
5132 __netdev_adjacent_dev_remove(upper_dev
, dev
, down_list
);
5135 static void __netdev_adjacent_dev_unlink(struct net_device
*dev
,
5136 struct net_device
*upper_dev
)
5138 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
,
5139 &dev
->all_adj_list
.upper
,
5140 &upper_dev
->all_adj_list
.lower
);
5143 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
5144 struct net_device
*upper_dev
,
5145 void *private, bool master
)
5147 int ret
= __netdev_adjacent_dev_link(dev
, upper_dev
);
5152 ret
= __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
5153 &dev
->adj_list
.upper
,
5154 &upper_dev
->adj_list
.lower
,
5157 __netdev_adjacent_dev_unlink(dev
, upper_dev
);
5164 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
5165 struct net_device
*upper_dev
)
5167 __netdev_adjacent_dev_unlink(dev
, upper_dev
);
5168 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
,
5169 &dev
->adj_list
.upper
,
5170 &upper_dev
->adj_list
.lower
);
5173 static int __netdev_upper_dev_link(struct net_device
*dev
,
5174 struct net_device
*upper_dev
, bool master
,
5177 struct netdev_adjacent
*i
, *j
, *to_i
, *to_j
;
5182 if (dev
== upper_dev
)
5185 /* To prevent loops, check if dev is not upper device to upper_dev. */
5186 if (__netdev_find_adj(upper_dev
, dev
, &upper_dev
->all_adj_list
.upper
))
5189 if (__netdev_find_adj(dev
, upper_dev
, &dev
->all_adj_list
.upper
))
5192 if (master
&& netdev_master_upper_dev_get(dev
))
5195 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, private,
5200 /* Now that we linked these devs, make all the upper_dev's
5201 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5202 * versa, and don't forget the devices itself. All of these
5203 * links are non-neighbours.
5205 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
) {
5206 list_for_each_entry(j
, &upper_dev
->all_adj_list
.upper
, list
) {
5207 pr_debug("Interlinking %s with %s, non-neighbour\n",
5208 i
->dev
->name
, j
->dev
->name
);
5209 ret
= __netdev_adjacent_dev_link(i
->dev
, j
->dev
);
5215 /* add dev to every upper_dev's upper device */
5216 list_for_each_entry(i
, &upper_dev
->all_adj_list
.upper
, list
) {
5217 pr_debug("linking %s's upper device %s with %s\n",
5218 upper_dev
->name
, i
->dev
->name
, dev
->name
);
5219 ret
= __netdev_adjacent_dev_link(dev
, i
->dev
);
5221 goto rollback_upper_mesh
;
5224 /* add upper_dev to every dev's lower device */
5225 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
) {
5226 pr_debug("linking %s's lower device %s with %s\n", dev
->name
,
5227 i
->dev
->name
, upper_dev
->name
);
5228 ret
= __netdev_adjacent_dev_link(i
->dev
, upper_dev
);
5230 goto rollback_lower_mesh
;
5233 call_netdevice_notifiers(NETDEV_CHANGEUPPER
, dev
);
5236 rollback_lower_mesh
:
5238 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
) {
5241 __netdev_adjacent_dev_unlink(i
->dev
, upper_dev
);
5246 rollback_upper_mesh
:
5248 list_for_each_entry(i
, &upper_dev
->all_adj_list
.upper
, list
) {
5251 __netdev_adjacent_dev_unlink(dev
, i
->dev
);
5259 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
) {
5260 list_for_each_entry(j
, &upper_dev
->all_adj_list
.upper
, list
) {
5261 if (i
== to_i
&& j
== to_j
)
5263 __netdev_adjacent_dev_unlink(i
->dev
, j
->dev
);
5269 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
5275 * netdev_upper_dev_link - Add a link to the upper device
5277 * @upper_dev: new upper device
5279 * Adds a link to device which is upper to this one. The caller must hold
5280 * the RTNL lock. On a failure a negative errno code is returned.
5281 * On success the reference counts are adjusted and the function
5284 int netdev_upper_dev_link(struct net_device
*dev
,
5285 struct net_device
*upper_dev
)
5287 return __netdev_upper_dev_link(dev
, upper_dev
, false, NULL
);
5289 EXPORT_SYMBOL(netdev_upper_dev_link
);
5292 * netdev_master_upper_dev_link - Add a master link to the upper device
5294 * @upper_dev: new upper device
5296 * Adds a link to device which is upper to this one. In this case, only
5297 * one master upper device can be linked, although other non-master devices
5298 * might be linked as well. The caller must hold the RTNL lock.
5299 * On a failure a negative errno code is returned. On success the reference
5300 * counts are adjusted and the function returns zero.
5302 int netdev_master_upper_dev_link(struct net_device
*dev
,
5303 struct net_device
*upper_dev
)
5305 return __netdev_upper_dev_link(dev
, upper_dev
, true, NULL
);
5307 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
5309 int netdev_master_upper_dev_link_private(struct net_device
*dev
,
5310 struct net_device
*upper_dev
,
5313 return __netdev_upper_dev_link(dev
, upper_dev
, true, private);
5315 EXPORT_SYMBOL(netdev_master_upper_dev_link_private
);
5318 * netdev_upper_dev_unlink - Removes a link to upper device
5320 * @upper_dev: new upper device
5322 * Removes a link to device which is upper to this one. The caller must hold
5325 void netdev_upper_dev_unlink(struct net_device
*dev
,
5326 struct net_device
*upper_dev
)
5328 struct netdev_adjacent
*i
, *j
;
5331 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
5333 /* Here is the tricky part. We must remove all dev's lower
5334 * devices from all upper_dev's upper devices and vice
5335 * versa, to maintain the graph relationship.
5337 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
)
5338 list_for_each_entry(j
, &upper_dev
->all_adj_list
.upper
, list
)
5339 __netdev_adjacent_dev_unlink(i
->dev
, j
->dev
);
5341 /* remove also the devices itself from lower/upper device
5344 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
)
5345 __netdev_adjacent_dev_unlink(i
->dev
, upper_dev
);
5347 list_for_each_entry(i
, &upper_dev
->all_adj_list
.upper
, list
)
5348 __netdev_adjacent_dev_unlink(dev
, i
->dev
);
5350 call_netdevice_notifiers(NETDEV_CHANGEUPPER
, dev
);
5352 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
5355 * netdev_bonding_info_change - Dispatch event about slave change
5357 * @bonding_info: info to dispatch
5359 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5360 * The caller must hold the RTNL lock.
5362 void netdev_bonding_info_change(struct net_device
*dev
,
5363 struct netdev_bonding_info
*bonding_info
)
5365 struct netdev_notifier_bonding_info info
;
5367 memcpy(&info
.bonding_info
, bonding_info
,
5368 sizeof(struct netdev_bonding_info
));
5369 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
, dev
,
5372 EXPORT_SYMBOL(netdev_bonding_info_change
);
5374 static void netdev_adjacent_add_links(struct net_device
*dev
)
5376 struct netdev_adjacent
*iter
;
5378 struct net
*net
= dev_net(dev
);
5380 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
5381 if (!net_eq(net
,dev_net(iter
->dev
)))
5383 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
5384 &iter
->dev
->adj_list
.lower
);
5385 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
5386 &dev
->adj_list
.upper
);
5389 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
5390 if (!net_eq(net
,dev_net(iter
->dev
)))
5392 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
5393 &iter
->dev
->adj_list
.upper
);
5394 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
5395 &dev
->adj_list
.lower
);
5399 static void netdev_adjacent_del_links(struct net_device
*dev
)
5401 struct netdev_adjacent
*iter
;
5403 struct net
*net
= dev_net(dev
);
5405 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
5406 if (!net_eq(net
,dev_net(iter
->dev
)))
5408 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
5409 &iter
->dev
->adj_list
.lower
);
5410 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
5411 &dev
->adj_list
.upper
);
5414 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
5415 if (!net_eq(net
,dev_net(iter
->dev
)))
5417 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
5418 &iter
->dev
->adj_list
.upper
);
5419 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
5420 &dev
->adj_list
.lower
);
5424 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
5426 struct netdev_adjacent
*iter
;
5428 struct net
*net
= dev_net(dev
);
5430 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
5431 if (!net_eq(net
,dev_net(iter
->dev
)))
5433 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
5434 &iter
->dev
->adj_list
.lower
);
5435 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
5436 &iter
->dev
->adj_list
.lower
);
5439 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
5440 if (!net_eq(net
,dev_net(iter
->dev
)))
5442 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
5443 &iter
->dev
->adj_list
.upper
);
5444 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
5445 &iter
->dev
->adj_list
.upper
);
5449 void *netdev_lower_dev_get_private(struct net_device
*dev
,
5450 struct net_device
*lower_dev
)
5452 struct netdev_adjacent
*lower
;
5456 lower
= __netdev_find_adj(dev
, lower_dev
, &dev
->adj_list
.lower
);
5460 return lower
->private;
5462 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
5465 int dev_get_nest_level(struct net_device
*dev
,
5466 bool (*type_check
)(struct net_device
*dev
))
5468 struct net_device
*lower
= NULL
;
5469 struct list_head
*iter
;
5475 netdev_for_each_lower_dev(dev
, lower
, iter
) {
5476 nest
= dev_get_nest_level(lower
, type_check
);
5477 if (max_nest
< nest
)
5481 if (type_check(dev
))
5486 EXPORT_SYMBOL(dev_get_nest_level
);
5488 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
5490 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5492 if (ops
->ndo_change_rx_flags
)
5493 ops
->ndo_change_rx_flags(dev
, flags
);
5496 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
5498 unsigned int old_flags
= dev
->flags
;
5504 dev
->flags
|= IFF_PROMISC
;
5505 dev
->promiscuity
+= inc
;
5506 if (dev
->promiscuity
== 0) {
5509 * If inc causes overflow, untouch promisc and return error.
5512 dev
->flags
&= ~IFF_PROMISC
;
5514 dev
->promiscuity
-= inc
;
5515 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5520 if (dev
->flags
!= old_flags
) {
5521 pr_info("device %s %s promiscuous mode\n",
5523 dev
->flags
& IFF_PROMISC
? "entered" : "left");
5524 if (audit_enabled
) {
5525 current_uid_gid(&uid
, &gid
);
5526 audit_log(current
->audit_context
, GFP_ATOMIC
,
5527 AUDIT_ANOM_PROMISCUOUS
,
5528 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5529 dev
->name
, (dev
->flags
& IFF_PROMISC
),
5530 (old_flags
& IFF_PROMISC
),
5531 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
5532 from_kuid(&init_user_ns
, uid
),
5533 from_kgid(&init_user_ns
, gid
),
5534 audit_get_sessionid(current
));
5537 dev_change_rx_flags(dev
, IFF_PROMISC
);
5540 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
);
5545 * dev_set_promiscuity - update promiscuity count on a device
5549 * Add or remove promiscuity from a device. While the count in the device
5550 * remains above zero the interface remains promiscuous. Once it hits zero
5551 * the device reverts back to normal filtering operation. A negative inc
5552 * value is used to drop promiscuity on the device.
5553 * Return 0 if successful or a negative errno code on error.
5555 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
5557 unsigned int old_flags
= dev
->flags
;
5560 err
= __dev_set_promiscuity(dev
, inc
, true);
5563 if (dev
->flags
!= old_flags
)
5564 dev_set_rx_mode(dev
);
5567 EXPORT_SYMBOL(dev_set_promiscuity
);
5569 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
5571 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
5575 dev
->flags
|= IFF_ALLMULTI
;
5576 dev
->allmulti
+= inc
;
5577 if (dev
->allmulti
== 0) {
5580 * If inc causes overflow, untouch allmulti and return error.
5583 dev
->flags
&= ~IFF_ALLMULTI
;
5585 dev
->allmulti
-= inc
;
5586 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5591 if (dev
->flags
^ old_flags
) {
5592 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
5593 dev_set_rx_mode(dev
);
5595 __dev_notify_flags(dev
, old_flags
,
5596 dev
->gflags
^ old_gflags
);
5602 * dev_set_allmulti - update allmulti count on a device
5606 * Add or remove reception of all multicast frames to a device. While the
5607 * count in the device remains above zero the interface remains listening
5608 * to all interfaces. Once it hits zero the device reverts back to normal
5609 * filtering operation. A negative @inc value is used to drop the counter
5610 * when releasing a resource needing all multicasts.
5611 * Return 0 if successful or a negative errno code on error.
5614 int dev_set_allmulti(struct net_device
*dev
, int inc
)
5616 return __dev_set_allmulti(dev
, inc
, true);
5618 EXPORT_SYMBOL(dev_set_allmulti
);
5621 * Upload unicast and multicast address lists to device and
5622 * configure RX filtering. When the device doesn't support unicast
5623 * filtering it is put in promiscuous mode while unicast addresses
5626 void __dev_set_rx_mode(struct net_device
*dev
)
5628 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5630 /* dev_open will call this function so the list will stay sane. */
5631 if (!(dev
->flags
&IFF_UP
))
5634 if (!netif_device_present(dev
))
5637 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
5638 /* Unicast addresses changes may only happen under the rtnl,
5639 * therefore calling __dev_set_promiscuity here is safe.
5641 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
5642 __dev_set_promiscuity(dev
, 1, false);
5643 dev
->uc_promisc
= true;
5644 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
5645 __dev_set_promiscuity(dev
, -1, false);
5646 dev
->uc_promisc
= false;
5650 if (ops
->ndo_set_rx_mode
)
5651 ops
->ndo_set_rx_mode(dev
);
5654 void dev_set_rx_mode(struct net_device
*dev
)
5656 netif_addr_lock_bh(dev
);
5657 __dev_set_rx_mode(dev
);
5658 netif_addr_unlock_bh(dev
);
5662 * dev_get_flags - get flags reported to userspace
5665 * Get the combination of flag bits exported through APIs to userspace.
5667 unsigned int dev_get_flags(const struct net_device
*dev
)
5671 flags
= (dev
->flags
& ~(IFF_PROMISC
|
5676 (dev
->gflags
& (IFF_PROMISC
|
5679 if (netif_running(dev
)) {
5680 if (netif_oper_up(dev
))
5681 flags
|= IFF_RUNNING
;
5682 if (netif_carrier_ok(dev
))
5683 flags
|= IFF_LOWER_UP
;
5684 if (netif_dormant(dev
))
5685 flags
|= IFF_DORMANT
;
5690 EXPORT_SYMBOL(dev_get_flags
);
5692 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
)
5694 unsigned int old_flags
= dev
->flags
;
5700 * Set the flags on our device.
5703 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
5704 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
5706 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
5710 * Load in the correct multicast list now the flags have changed.
5713 if ((old_flags
^ flags
) & IFF_MULTICAST
)
5714 dev_change_rx_flags(dev
, IFF_MULTICAST
);
5716 dev_set_rx_mode(dev
);
5719 * Have we downed the interface. We handle IFF_UP ourselves
5720 * according to user attempts to set it, rather than blindly
5725 if ((old_flags
^ flags
) & IFF_UP
)
5726 ret
= ((old_flags
& IFF_UP
) ? __dev_close
: __dev_open
)(dev
);
5728 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
5729 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
5730 unsigned int old_flags
= dev
->flags
;
5732 dev
->gflags
^= IFF_PROMISC
;
5734 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
5735 if (dev
->flags
!= old_flags
)
5736 dev_set_rx_mode(dev
);
5739 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5740 is important. Some (broken) drivers set IFF_PROMISC, when
5741 IFF_ALLMULTI is requested not asking us and not reporting.
5743 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
5744 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
5746 dev
->gflags
^= IFF_ALLMULTI
;
5747 __dev_set_allmulti(dev
, inc
, false);
5753 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
5754 unsigned int gchanges
)
5756 unsigned int changes
= dev
->flags
^ old_flags
;
5759 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
);
5761 if (changes
& IFF_UP
) {
5762 if (dev
->flags
& IFF_UP
)
5763 call_netdevice_notifiers(NETDEV_UP
, dev
);
5765 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
5768 if (dev
->flags
& IFF_UP
&&
5769 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
5770 struct netdev_notifier_change_info change_info
;
5772 change_info
.flags_changed
= changes
;
5773 call_netdevice_notifiers_info(NETDEV_CHANGE
, dev
,
5779 * dev_change_flags - change device settings
5781 * @flags: device state flags
5783 * Change settings on device based state flags. The flags are
5784 * in the userspace exported format.
5786 int dev_change_flags(struct net_device
*dev
, unsigned int flags
)
5789 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
5791 ret
= __dev_change_flags(dev
, flags
);
5795 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
5796 __dev_notify_flags(dev
, old_flags
, changes
);
5799 EXPORT_SYMBOL(dev_change_flags
);
5801 static int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
5803 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5805 if (ops
->ndo_change_mtu
)
5806 return ops
->ndo_change_mtu(dev
, new_mtu
);
5813 * dev_set_mtu - Change maximum transfer unit
5815 * @new_mtu: new transfer unit
5817 * Change the maximum transfer size of the network device.
5819 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
5823 if (new_mtu
== dev
->mtu
)
5826 /* MTU must be positive. */
5830 if (!netif_device_present(dev
))
5833 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
5834 err
= notifier_to_errno(err
);
5838 orig_mtu
= dev
->mtu
;
5839 err
= __dev_set_mtu(dev
, new_mtu
);
5842 err
= call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
5843 err
= notifier_to_errno(err
);
5845 /* setting mtu back and notifying everyone again,
5846 * so that they have a chance to revert changes.
5848 __dev_set_mtu(dev
, orig_mtu
);
5849 call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
5854 EXPORT_SYMBOL(dev_set_mtu
);
5857 * dev_set_group - Change group this device belongs to
5859 * @new_group: group this device should belong to
5861 void dev_set_group(struct net_device
*dev
, int new_group
)
5863 dev
->group
= new_group
;
5865 EXPORT_SYMBOL(dev_set_group
);
5868 * dev_set_mac_address - Change Media Access Control Address
5872 * Change the hardware (MAC) address of the device
5874 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
)
5876 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5879 if (!ops
->ndo_set_mac_address
)
5881 if (sa
->sa_family
!= dev
->type
)
5883 if (!netif_device_present(dev
))
5885 err
= ops
->ndo_set_mac_address(dev
, sa
);
5888 dev
->addr_assign_type
= NET_ADDR_SET
;
5889 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
5890 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
5893 EXPORT_SYMBOL(dev_set_mac_address
);
5896 * dev_change_carrier - Change device carrier
5898 * @new_carrier: new value
5900 * Change device carrier
5902 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
5904 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5906 if (!ops
->ndo_change_carrier
)
5908 if (!netif_device_present(dev
))
5910 return ops
->ndo_change_carrier(dev
, new_carrier
);
5912 EXPORT_SYMBOL(dev_change_carrier
);
5915 * dev_get_phys_port_id - Get device physical port ID
5919 * Get device physical port ID
5921 int dev_get_phys_port_id(struct net_device
*dev
,
5922 struct netdev_phys_item_id
*ppid
)
5924 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5926 if (!ops
->ndo_get_phys_port_id
)
5928 return ops
->ndo_get_phys_port_id(dev
, ppid
);
5930 EXPORT_SYMBOL(dev_get_phys_port_id
);
5933 * dev_get_phys_port_name - Get device physical port name
5937 * Get device physical port name
5939 int dev_get_phys_port_name(struct net_device
*dev
,
5940 char *name
, size_t len
)
5942 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5944 if (!ops
->ndo_get_phys_port_name
)
5946 return ops
->ndo_get_phys_port_name(dev
, name
, len
);
5948 EXPORT_SYMBOL(dev_get_phys_port_name
);
5951 * dev_new_index - allocate an ifindex
5952 * @net: the applicable net namespace
5954 * Returns a suitable unique value for a new device interface
5955 * number. The caller must hold the rtnl semaphore or the
5956 * dev_base_lock to be sure it remains unique.
5958 static int dev_new_index(struct net
*net
)
5960 int ifindex
= net
->ifindex
;
5964 if (!__dev_get_by_index(net
, ifindex
))
5965 return net
->ifindex
= ifindex
;
5969 /* Delayed registration/unregisteration */
5970 static LIST_HEAD(net_todo_list
);
5971 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
5973 static void net_set_todo(struct net_device
*dev
)
5975 list_add_tail(&dev
->todo_list
, &net_todo_list
);
5976 dev_net(dev
)->dev_unreg_count
++;
5979 static void rollback_registered_many(struct list_head
*head
)
5981 struct net_device
*dev
, *tmp
;
5982 LIST_HEAD(close_head
);
5984 BUG_ON(dev_boot_phase
);
5987 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
5988 /* Some devices call without registering
5989 * for initialization unwind. Remove those
5990 * devices and proceed with the remaining.
5992 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
5993 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5997 list_del(&dev
->unreg_list
);
6000 dev
->dismantle
= true;
6001 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
6004 /* If device is running, close it first. */
6005 list_for_each_entry(dev
, head
, unreg_list
)
6006 list_add_tail(&dev
->close_list
, &close_head
);
6007 dev_close_many(&close_head
, true);
6009 list_for_each_entry(dev
, head
, unreg_list
) {
6010 /* And unlink it from device chain. */
6011 unlist_netdevice(dev
);
6013 dev
->reg_state
= NETREG_UNREGISTERING
;
6018 list_for_each_entry(dev
, head
, unreg_list
) {
6019 struct sk_buff
*skb
= NULL
;
6021 /* Shutdown queueing discipline. */
6025 /* Notify protocols, that we are about to destroy
6026 this device. They should clean all the things.
6028 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
6030 if (!dev
->rtnl_link_ops
||
6031 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
6032 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U,
6036 * Flush the unicast and multicast chains
6041 if (dev
->netdev_ops
->ndo_uninit
)
6042 dev
->netdev_ops
->ndo_uninit(dev
);
6045 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
);
6047 /* Notifier chain MUST detach us all upper devices. */
6048 WARN_ON(netdev_has_any_upper_dev(dev
));
6050 /* Remove entries from kobject tree */
6051 netdev_unregister_kobject(dev
);
6053 /* Remove XPS queueing entries */
6054 netif_reset_xps_queues_gt(dev
, 0);
6060 list_for_each_entry(dev
, head
, unreg_list
)
6064 static void rollback_registered(struct net_device
*dev
)
6068 list_add(&dev
->unreg_list
, &single
);
6069 rollback_registered_many(&single
);
6073 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
6074 netdev_features_t features
)
6076 /* Fix illegal checksum combinations */
6077 if ((features
& NETIF_F_HW_CSUM
) &&
6078 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
6079 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
6080 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
6083 /* TSO requires that SG is present as well. */
6084 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
6085 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
6086 features
&= ~NETIF_F_ALL_TSO
;
6089 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
6090 !(features
& NETIF_F_IP_CSUM
)) {
6091 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
6092 features
&= ~NETIF_F_TSO
;
6093 features
&= ~NETIF_F_TSO_ECN
;
6096 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
6097 !(features
& NETIF_F_IPV6_CSUM
)) {
6098 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
6099 features
&= ~NETIF_F_TSO6
;
6102 /* TSO ECN requires that TSO is present as well. */
6103 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
6104 features
&= ~NETIF_F_TSO_ECN
;
6106 /* Software GSO depends on SG. */
6107 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
6108 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
6109 features
&= ~NETIF_F_GSO
;
6112 /* UFO needs SG and checksumming */
6113 if (features
& NETIF_F_UFO
) {
6114 /* maybe split UFO into V4 and V6? */
6115 if (!((features
& NETIF_F_GEN_CSUM
) ||
6116 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))
6117 == (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
6119 "Dropping NETIF_F_UFO since no checksum offload features.\n");
6120 features
&= ~NETIF_F_UFO
;
6123 if (!(features
& NETIF_F_SG
)) {
6125 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
6126 features
&= ~NETIF_F_UFO
;
6130 #ifdef CONFIG_NET_RX_BUSY_POLL
6131 if (dev
->netdev_ops
->ndo_busy_poll
)
6132 features
|= NETIF_F_BUSY_POLL
;
6135 features
&= ~NETIF_F_BUSY_POLL
;
6140 int __netdev_update_features(struct net_device
*dev
)
6142 netdev_features_t features
;
6147 features
= netdev_get_wanted_features(dev
);
6149 if (dev
->netdev_ops
->ndo_fix_features
)
6150 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
6152 /* driver might be less strict about feature dependencies */
6153 features
= netdev_fix_features(dev
, features
);
6155 if (dev
->features
== features
)
6158 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
6159 &dev
->features
, &features
);
6161 if (dev
->netdev_ops
->ndo_set_features
)
6162 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
6164 if (unlikely(err
< 0)) {
6166 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6167 err
, &features
, &dev
->features
);
6172 dev
->features
= features
;
6178 * netdev_update_features - recalculate device features
6179 * @dev: the device to check
6181 * Recalculate dev->features set and send notifications if it
6182 * has changed. Should be called after driver or hardware dependent
6183 * conditions might have changed that influence the features.
6185 void netdev_update_features(struct net_device
*dev
)
6187 if (__netdev_update_features(dev
))
6188 netdev_features_change(dev
);
6190 EXPORT_SYMBOL(netdev_update_features
);
6193 * netdev_change_features - recalculate device features
6194 * @dev: the device to check
6196 * Recalculate dev->features set and send notifications even
6197 * if they have not changed. Should be called instead of
6198 * netdev_update_features() if also dev->vlan_features might
6199 * have changed to allow the changes to be propagated to stacked
6202 void netdev_change_features(struct net_device
*dev
)
6204 __netdev_update_features(dev
);
6205 netdev_features_change(dev
);
6207 EXPORT_SYMBOL(netdev_change_features
);
6210 * netif_stacked_transfer_operstate - transfer operstate
6211 * @rootdev: the root or lower level device to transfer state from
6212 * @dev: the device to transfer operstate to
6214 * Transfer operational state from root to device. This is normally
6215 * called when a stacking relationship exists between the root
6216 * device and the device(a leaf device).
6218 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
6219 struct net_device
*dev
)
6221 if (rootdev
->operstate
== IF_OPER_DORMANT
)
6222 netif_dormant_on(dev
);
6224 netif_dormant_off(dev
);
6226 if (netif_carrier_ok(rootdev
)) {
6227 if (!netif_carrier_ok(dev
))
6228 netif_carrier_on(dev
);
6230 if (netif_carrier_ok(dev
))
6231 netif_carrier_off(dev
);
6234 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
6237 static int netif_alloc_rx_queues(struct net_device
*dev
)
6239 unsigned int i
, count
= dev
->num_rx_queues
;
6240 struct netdev_rx_queue
*rx
;
6241 size_t sz
= count
* sizeof(*rx
);
6245 rx
= kzalloc(sz
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
6253 for (i
= 0; i
< count
; i
++)
6259 static void netdev_init_one_queue(struct net_device
*dev
,
6260 struct netdev_queue
*queue
, void *_unused
)
6262 /* Initialize queue lock */
6263 spin_lock_init(&queue
->_xmit_lock
);
6264 netdev_set_xmit_lockdep_class(&queue
->_xmit_lock
, dev
->type
);
6265 queue
->xmit_lock_owner
= -1;
6266 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
6269 dql_init(&queue
->dql
, HZ
);
6273 static void netif_free_tx_queues(struct net_device
*dev
)
6278 static int netif_alloc_netdev_queues(struct net_device
*dev
)
6280 unsigned int count
= dev
->num_tx_queues
;
6281 struct netdev_queue
*tx
;
6282 size_t sz
= count
* sizeof(*tx
);
6284 BUG_ON(count
< 1 || count
> 0xffff);
6286 tx
= kzalloc(sz
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
6294 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
6295 spin_lock_init(&dev
->tx_global_lock
);
6301 * register_netdevice - register a network device
6302 * @dev: device to register
6304 * Take a completed network device structure and add it to the kernel
6305 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6306 * chain. 0 is returned on success. A negative errno code is returned
6307 * on a failure to set up the device, or if the name is a duplicate.
6309 * Callers must hold the rtnl semaphore. You may want
6310 * register_netdev() instead of this.
6313 * The locking appears insufficient to guarantee two parallel registers
6314 * will not get the same name.
6317 int register_netdevice(struct net_device
*dev
)
6320 struct net
*net
= dev_net(dev
);
6322 BUG_ON(dev_boot_phase
);
6327 /* When net_device's are persistent, this will be fatal. */
6328 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
6331 spin_lock_init(&dev
->addr_list_lock
);
6332 netdev_set_addr_lockdep_class(dev
);
6336 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
6340 /* Init, if this function is available */
6341 if (dev
->netdev_ops
->ndo_init
) {
6342 ret
= dev
->netdev_ops
->ndo_init(dev
);
6350 if (((dev
->hw_features
| dev
->features
) &
6351 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
6352 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
6353 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
6354 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
6361 dev
->ifindex
= dev_new_index(net
);
6362 else if (__dev_get_by_index(net
, dev
->ifindex
))
6365 if (dev_get_iflink(dev
) == -1)
6366 dev
->iflink
= dev
->ifindex
;
6368 /* Transfer changeable features to wanted_features and enable
6369 * software offloads (GSO and GRO).
6371 dev
->hw_features
|= NETIF_F_SOFT_FEATURES
;
6372 dev
->features
|= NETIF_F_SOFT_FEATURES
;
6373 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
6375 if (!(dev
->flags
& IFF_LOOPBACK
)) {
6376 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
6379 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
6381 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
6383 /* Make NETIF_F_SG inheritable to tunnel devices.
6385 dev
->hw_enc_features
|= NETIF_F_SG
;
6387 /* Make NETIF_F_SG inheritable to MPLS.
6389 dev
->mpls_features
|= NETIF_F_SG
;
6391 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
6392 ret
= notifier_to_errno(ret
);
6396 ret
= netdev_register_kobject(dev
);
6399 dev
->reg_state
= NETREG_REGISTERED
;
6401 __netdev_update_features(dev
);
6404 * Default initial state at registry is that the
6405 * device is present.
6408 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
6410 linkwatch_init_dev(dev
);
6412 dev_init_scheduler(dev
);
6414 list_netdevice(dev
);
6415 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
6417 /* If the device has permanent device address, driver should
6418 * set dev_addr and also addr_assign_type should be set to
6419 * NET_ADDR_PERM (default value).
6421 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
6422 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
6424 /* Notify protocols, that a new device appeared. */
6425 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
6426 ret
= notifier_to_errno(ret
);
6428 rollback_registered(dev
);
6429 dev
->reg_state
= NETREG_UNREGISTERED
;
6432 * Prevent userspace races by waiting until the network
6433 * device is fully setup before sending notifications.
6435 if (!dev
->rtnl_link_ops
||
6436 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
6437 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
6443 if (dev
->netdev_ops
->ndo_uninit
)
6444 dev
->netdev_ops
->ndo_uninit(dev
);
6447 EXPORT_SYMBOL(register_netdevice
);
6450 * init_dummy_netdev - init a dummy network device for NAPI
6451 * @dev: device to init
6453 * This takes a network device structure and initialize the minimum
6454 * amount of fields so it can be used to schedule NAPI polls without
6455 * registering a full blown interface. This is to be used by drivers
6456 * that need to tie several hardware interfaces to a single NAPI
6457 * poll scheduler due to HW limitations.
6459 int init_dummy_netdev(struct net_device
*dev
)
6461 /* Clear everything. Note we don't initialize spinlocks
6462 * are they aren't supposed to be taken by any of the
6463 * NAPI code and this dummy netdev is supposed to be
6464 * only ever used for NAPI polls
6466 memset(dev
, 0, sizeof(struct net_device
));
6468 /* make sure we BUG if trying to hit standard
6469 * register/unregister code path
6471 dev
->reg_state
= NETREG_DUMMY
;
6473 /* NAPI wants this */
6474 INIT_LIST_HEAD(&dev
->napi_list
);
6476 /* a dummy interface is started by default */
6477 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
6478 set_bit(__LINK_STATE_START
, &dev
->state
);
6480 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6481 * because users of this 'device' dont need to change
6487 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
6491 * register_netdev - register a network device
6492 * @dev: device to register
6494 * Take a completed network device structure and add it to the kernel
6495 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6496 * chain. 0 is returned on success. A negative errno code is returned
6497 * on a failure to set up the device, or if the name is a duplicate.
6499 * This is a wrapper around register_netdevice that takes the rtnl semaphore
6500 * and expands the device name if you passed a format string to
6503 int register_netdev(struct net_device
*dev
)
6508 err
= register_netdevice(dev
);
6512 EXPORT_SYMBOL(register_netdev
);
6514 int netdev_refcnt_read(const struct net_device
*dev
)
6518 for_each_possible_cpu(i
)
6519 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
6522 EXPORT_SYMBOL(netdev_refcnt_read
);
6525 * netdev_wait_allrefs - wait until all references are gone.
6526 * @dev: target net_device
6528 * This is called when unregistering network devices.
6530 * Any protocol or device that holds a reference should register
6531 * for netdevice notification, and cleanup and put back the
6532 * reference if they receive an UNREGISTER event.
6533 * We can get stuck here if buggy protocols don't correctly
6536 static void netdev_wait_allrefs(struct net_device
*dev
)
6538 unsigned long rebroadcast_time
, warning_time
;
6541 linkwatch_forget_dev(dev
);
6543 rebroadcast_time
= warning_time
= jiffies
;
6544 refcnt
= netdev_refcnt_read(dev
);
6546 while (refcnt
!= 0) {
6547 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
6550 /* Rebroadcast unregister notification */
6551 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
6557 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
6558 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
6560 /* We must not have linkwatch events
6561 * pending on unregister. If this
6562 * happens, we simply run the queue
6563 * unscheduled, resulting in a noop
6566 linkwatch_run_queue();
6571 rebroadcast_time
= jiffies
;
6576 refcnt
= netdev_refcnt_read(dev
);
6578 if (time_after(jiffies
, warning_time
+ 10 * HZ
)) {
6579 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6581 warning_time
= jiffies
;
6590 * register_netdevice(x1);
6591 * register_netdevice(x2);
6593 * unregister_netdevice(y1);
6594 * unregister_netdevice(y2);
6600 * We are invoked by rtnl_unlock().
6601 * This allows us to deal with problems:
6602 * 1) We can delete sysfs objects which invoke hotplug
6603 * without deadlocking with linkwatch via keventd.
6604 * 2) Since we run with the RTNL semaphore not held, we can sleep
6605 * safely in order to wait for the netdev refcnt to drop to zero.
6607 * We must not return until all unregister events added during
6608 * the interval the lock was held have been completed.
6610 void netdev_run_todo(void)
6612 struct list_head list
;
6614 /* Snapshot list, allow later requests */
6615 list_replace_init(&net_todo_list
, &list
);
6620 /* Wait for rcu callbacks to finish before next phase */
6621 if (!list_empty(&list
))
6624 while (!list_empty(&list
)) {
6625 struct net_device
*dev
6626 = list_first_entry(&list
, struct net_device
, todo_list
);
6627 list_del(&dev
->todo_list
);
6630 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
6633 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
6634 pr_err("network todo '%s' but state %d\n",
6635 dev
->name
, dev
->reg_state
);
6640 dev
->reg_state
= NETREG_UNREGISTERED
;
6642 on_each_cpu(flush_backlog
, dev
, 1);
6644 netdev_wait_allrefs(dev
);
6647 BUG_ON(netdev_refcnt_read(dev
));
6648 BUG_ON(!list_empty(&dev
->ptype_all
));
6649 BUG_ON(!list_empty(&dev
->ptype_specific
));
6650 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
6651 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
6652 WARN_ON(dev
->dn_ptr
);
6654 if (dev
->destructor
)
6655 dev
->destructor(dev
);
6657 /* Report a network device has been unregistered */
6659 dev_net(dev
)->dev_unreg_count
--;
6661 wake_up(&netdev_unregistering_wq
);
6663 /* Free network device */
6664 kobject_put(&dev
->dev
.kobj
);
6668 /* Convert net_device_stats to rtnl_link_stats64. They have the same
6669 * fields in the same order, with only the type differing.
6671 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
6672 const struct net_device_stats
*netdev_stats
)
6674 #if BITS_PER_LONG == 64
6675 BUILD_BUG_ON(sizeof(*stats64
) != sizeof(*netdev_stats
));
6676 memcpy(stats64
, netdev_stats
, sizeof(*stats64
));
6678 size_t i
, n
= sizeof(*stats64
) / sizeof(u64
);
6679 const unsigned long *src
= (const unsigned long *)netdev_stats
;
6680 u64
*dst
= (u64
*)stats64
;
6682 BUILD_BUG_ON(sizeof(*netdev_stats
) / sizeof(unsigned long) !=
6683 sizeof(*stats64
) / sizeof(u64
));
6684 for (i
= 0; i
< n
; i
++)
6688 EXPORT_SYMBOL(netdev_stats_to_stats64
);
6691 * dev_get_stats - get network device statistics
6692 * @dev: device to get statistics from
6693 * @storage: place to store stats
6695 * Get network statistics from device. Return @storage.
6696 * The device driver may provide its own method by setting
6697 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6698 * otherwise the internal statistics structure is used.
6700 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
6701 struct rtnl_link_stats64
*storage
)
6703 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6705 if (ops
->ndo_get_stats64
) {
6706 memset(storage
, 0, sizeof(*storage
));
6707 ops
->ndo_get_stats64(dev
, storage
);
6708 } else if (ops
->ndo_get_stats
) {
6709 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
6711 netdev_stats_to_stats64(storage
, &dev
->stats
);
6713 storage
->rx_dropped
+= atomic_long_read(&dev
->rx_dropped
);
6714 storage
->tx_dropped
+= atomic_long_read(&dev
->tx_dropped
);
6717 EXPORT_SYMBOL(dev_get_stats
);
6719 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
6721 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
6723 #ifdef CONFIG_NET_CLS_ACT
6726 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
6729 netdev_init_one_queue(dev
, queue
, NULL
);
6730 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
6731 queue
->qdisc_sleeping
= &noop_qdisc
;
6732 rcu_assign_pointer(dev
->ingress_queue
, queue
);
6737 static const struct ethtool_ops default_ethtool_ops
;
6739 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
6740 const struct ethtool_ops
*ops
)
6742 if (dev
->ethtool_ops
== &default_ethtool_ops
)
6743 dev
->ethtool_ops
= ops
;
6745 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
6747 void netdev_freemem(struct net_device
*dev
)
6749 char *addr
= (char *)dev
- dev
->padded
;
6755 * alloc_netdev_mqs - allocate network device
6756 * @sizeof_priv: size of private data to allocate space for
6757 * @name: device name format string
6758 * @name_assign_type: origin of device name
6759 * @setup: callback to initialize device
6760 * @txqs: the number of TX subqueues to allocate
6761 * @rxqs: the number of RX subqueues to allocate
6763 * Allocates a struct net_device with private data area for driver use
6764 * and performs basic initialization. Also allocates subqueue structs
6765 * for each queue on the device.
6767 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
6768 unsigned char name_assign_type
,
6769 void (*setup
)(struct net_device
*),
6770 unsigned int txqs
, unsigned int rxqs
)
6772 struct net_device
*dev
;
6774 struct net_device
*p
;
6776 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
6779 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6785 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6790 alloc_size
= sizeof(struct net_device
);
6792 /* ensure 32-byte alignment of private area */
6793 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
6794 alloc_size
+= sizeof_priv
;
6796 /* ensure 32-byte alignment of whole construct */
6797 alloc_size
+= NETDEV_ALIGN
- 1;
6799 p
= kzalloc(alloc_size
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
6801 p
= vzalloc(alloc_size
);
6805 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
6806 dev
->padded
= (char *)dev
- (char *)p
;
6808 dev
->pcpu_refcnt
= alloc_percpu(int);
6809 if (!dev
->pcpu_refcnt
)
6812 if (dev_addr_init(dev
))
6818 dev_net_set(dev
, &init_net
);
6820 dev
->gso_max_size
= GSO_MAX_SIZE
;
6821 dev
->gso_max_segs
= GSO_MAX_SEGS
;
6822 dev
->gso_min_segs
= 0;
6824 INIT_LIST_HEAD(&dev
->napi_list
);
6825 INIT_LIST_HEAD(&dev
->unreg_list
);
6826 INIT_LIST_HEAD(&dev
->close_list
);
6827 INIT_LIST_HEAD(&dev
->link_watch_list
);
6828 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
6829 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
6830 INIT_LIST_HEAD(&dev
->all_adj_list
.upper
);
6831 INIT_LIST_HEAD(&dev
->all_adj_list
.lower
);
6832 INIT_LIST_HEAD(&dev
->ptype_all
);
6833 INIT_LIST_HEAD(&dev
->ptype_specific
);
6834 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
6837 dev
->num_tx_queues
= txqs
;
6838 dev
->real_num_tx_queues
= txqs
;
6839 if (netif_alloc_netdev_queues(dev
))
6843 dev
->num_rx_queues
= rxqs
;
6844 dev
->real_num_rx_queues
= rxqs
;
6845 if (netif_alloc_rx_queues(dev
))
6849 strcpy(dev
->name
, name
);
6850 dev
->name_assign_type
= name_assign_type
;
6851 dev
->group
= INIT_NETDEV_GROUP
;
6852 if (!dev
->ethtool_ops
)
6853 dev
->ethtool_ops
= &default_ethtool_ops
;
6861 free_percpu(dev
->pcpu_refcnt
);
6863 netdev_freemem(dev
);
6866 EXPORT_SYMBOL(alloc_netdev_mqs
);
6869 * free_netdev - free network device
6872 * This function does the last stage of destroying an allocated device
6873 * interface. The reference to the device object is released.
6874 * If this is the last reference then it will be freed.
6876 void free_netdev(struct net_device
*dev
)
6878 struct napi_struct
*p
, *n
;
6880 netif_free_tx_queues(dev
);
6885 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
6887 /* Flush device addresses */
6888 dev_addr_flush(dev
);
6890 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
6893 free_percpu(dev
->pcpu_refcnt
);
6894 dev
->pcpu_refcnt
= NULL
;
6896 /* Compatibility with error handling in drivers */
6897 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
6898 netdev_freemem(dev
);
6902 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
6903 dev
->reg_state
= NETREG_RELEASED
;
6905 /* will free via device release */
6906 put_device(&dev
->dev
);
6908 EXPORT_SYMBOL(free_netdev
);
6911 * synchronize_net - Synchronize with packet receive processing
6913 * Wait for packets currently being received to be done.
6914 * Does not block later packets from starting.
6916 void synchronize_net(void)
6919 if (rtnl_is_locked())
6920 synchronize_rcu_expedited();
6924 EXPORT_SYMBOL(synchronize_net
);
6927 * unregister_netdevice_queue - remove device from the kernel
6931 * This function shuts down a device interface and removes it
6932 * from the kernel tables.
6933 * If head not NULL, device is queued to be unregistered later.
6935 * Callers must hold the rtnl semaphore. You may want
6936 * unregister_netdev() instead of this.
6939 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
6944 list_move_tail(&dev
->unreg_list
, head
);
6946 rollback_registered(dev
);
6947 /* Finish processing unregister after unlock */
6951 EXPORT_SYMBOL(unregister_netdevice_queue
);
6954 * unregister_netdevice_many - unregister many devices
6955 * @head: list of devices
6957 * Note: As most callers use a stack allocated list_head,
6958 * we force a list_del() to make sure stack wont be corrupted later.
6960 void unregister_netdevice_many(struct list_head
*head
)
6962 struct net_device
*dev
;
6964 if (!list_empty(head
)) {
6965 rollback_registered_many(head
);
6966 list_for_each_entry(dev
, head
, unreg_list
)
6971 EXPORT_SYMBOL(unregister_netdevice_many
);
6974 * unregister_netdev - remove device from the kernel
6977 * This function shuts down a device interface and removes it
6978 * from the kernel tables.
6980 * This is just a wrapper for unregister_netdevice that takes
6981 * the rtnl semaphore. In general you want to use this and not
6982 * unregister_netdevice.
6984 void unregister_netdev(struct net_device
*dev
)
6987 unregister_netdevice(dev
);
6990 EXPORT_SYMBOL(unregister_netdev
);
6993 * dev_change_net_namespace - move device to different nethost namespace
6995 * @net: network namespace
6996 * @pat: If not NULL name pattern to try if the current device name
6997 * is already taken in the destination network namespace.
6999 * This function shuts down a device interface and moves it
7000 * to a new network namespace. On success 0 is returned, on
7001 * a failure a netagive errno code is returned.
7003 * Callers must hold the rtnl semaphore.
7006 int dev_change_net_namespace(struct net_device
*dev
, struct net
*net
, const char *pat
)
7012 /* Don't allow namespace local devices to be moved. */
7014 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
7017 /* Ensure the device has been registrered */
7018 if (dev
->reg_state
!= NETREG_REGISTERED
)
7021 /* Get out if there is nothing todo */
7023 if (net_eq(dev_net(dev
), net
))
7026 /* Pick the destination device name, and ensure
7027 * we can use it in the destination network namespace.
7030 if (__dev_get_by_name(net
, dev
->name
)) {
7031 /* We get here if we can't use the current device name */
7034 if (dev_get_valid_name(net
, dev
, pat
) < 0)
7039 * And now a mini version of register_netdevice unregister_netdevice.
7042 /* If device is running close it first. */
7045 /* And unlink it from device chain */
7047 unlist_netdevice(dev
);
7051 /* Shutdown queueing discipline. */
7054 /* Notify protocols, that we are about to destroy
7055 this device. They should clean all the things.
7057 Note that dev->reg_state stays at NETREG_REGISTERED.
7058 This is wanted because this way 8021q and macvlan know
7059 the device is just moving and can keep their slaves up.
7061 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
7063 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
7064 rtmsg_ifinfo(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
);
7067 * Flush the unicast and multicast chains
7072 /* Send a netdev-removed uevent to the old namespace */
7073 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
7074 netdev_adjacent_del_links(dev
);
7076 /* Actually switch the network namespace */
7077 dev_net_set(dev
, net
);
7079 /* If there is an ifindex conflict assign a new one */
7080 if (__dev_get_by_index(net
, dev
->ifindex
)) {
7081 int iflink
= (dev_get_iflink(dev
) == dev
->ifindex
);
7082 dev
->ifindex
= dev_new_index(net
);
7084 dev
->iflink
= dev
->ifindex
;
7087 /* Send a netdev-add uevent to the new namespace */
7088 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
7089 netdev_adjacent_add_links(dev
);
7091 /* Fixup kobjects */
7092 err
= device_rename(&dev
->dev
, dev
->name
);
7095 /* Add the device back in the hashes */
7096 list_netdevice(dev
);
7098 /* Notify protocols, that a new device appeared. */
7099 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
7102 * Prevent userspace races by waiting until the network
7103 * device is fully setup before sending notifications.
7105 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
7112 EXPORT_SYMBOL_GPL(dev_change_net_namespace
);
7114 static int dev_cpu_callback(struct notifier_block
*nfb
,
7115 unsigned long action
,
7118 struct sk_buff
**list_skb
;
7119 struct sk_buff
*skb
;
7120 unsigned int cpu
, oldcpu
= (unsigned long)ocpu
;
7121 struct softnet_data
*sd
, *oldsd
;
7123 if (action
!= CPU_DEAD
&& action
!= CPU_DEAD_FROZEN
)
7126 local_irq_disable();
7127 cpu
= smp_processor_id();
7128 sd
= &per_cpu(softnet_data
, cpu
);
7129 oldsd
= &per_cpu(softnet_data
, oldcpu
);
7131 /* Find end of our completion_queue. */
7132 list_skb
= &sd
->completion_queue
;
7134 list_skb
= &(*list_skb
)->next
;
7135 /* Append completion queue from offline CPU. */
7136 *list_skb
= oldsd
->completion_queue
;
7137 oldsd
->completion_queue
= NULL
;
7139 /* Append output queue from offline CPU. */
7140 if (oldsd
->output_queue
) {
7141 *sd
->output_queue_tailp
= oldsd
->output_queue
;
7142 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
7143 oldsd
->output_queue
= NULL
;
7144 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
7146 /* Append NAPI poll list from offline CPU, with one exception :
7147 * process_backlog() must be called by cpu owning percpu backlog.
7148 * We properly handle process_queue & input_pkt_queue later.
7150 while (!list_empty(&oldsd
->poll_list
)) {
7151 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
7155 list_del_init(&napi
->poll_list
);
7156 if (napi
->poll
== process_backlog
)
7159 ____napi_schedule(sd
, napi
);
7162 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
7165 /* Process offline CPU's input_pkt_queue */
7166 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
7168 input_queue_head_incr(oldsd
);
7170 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
7172 input_queue_head_incr(oldsd
);
7180 * netdev_increment_features - increment feature set by one
7181 * @all: current feature set
7182 * @one: new feature set
7183 * @mask: mask feature set
7185 * Computes a new feature set after adding a device with feature set
7186 * @one to the master device with current feature set @all. Will not
7187 * enable anything that is off in @mask. Returns the new feature set.
7189 netdev_features_t
netdev_increment_features(netdev_features_t all
,
7190 netdev_features_t one
, netdev_features_t mask
)
7192 if (mask
& NETIF_F_GEN_CSUM
)
7193 mask
|= NETIF_F_ALL_CSUM
;
7194 mask
|= NETIF_F_VLAN_CHALLENGED
;
7196 all
|= one
& (NETIF_F_ONE_FOR_ALL
|NETIF_F_ALL_CSUM
) & mask
;
7197 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
7199 /* If one device supports hw checksumming, set for all. */
7200 if (all
& NETIF_F_GEN_CSUM
)
7201 all
&= ~(NETIF_F_ALL_CSUM
& ~NETIF_F_GEN_CSUM
);
7205 EXPORT_SYMBOL(netdev_increment_features
);
7207 static struct hlist_head
* __net_init
netdev_create_hash(void)
7210 struct hlist_head
*hash
;
7212 hash
= kmalloc(sizeof(*hash
) * NETDEV_HASHENTRIES
, GFP_KERNEL
);
7214 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
7215 INIT_HLIST_HEAD(&hash
[i
]);
7220 /* Initialize per network namespace state */
7221 static int __net_init
netdev_init(struct net
*net
)
7223 if (net
!= &init_net
)
7224 INIT_LIST_HEAD(&net
->dev_base_head
);
7226 net
->dev_name_head
= netdev_create_hash();
7227 if (net
->dev_name_head
== NULL
)
7230 net
->dev_index_head
= netdev_create_hash();
7231 if (net
->dev_index_head
== NULL
)
7237 kfree(net
->dev_name_head
);
7243 * netdev_drivername - network driver for the device
7244 * @dev: network device
7246 * Determine network driver for device.
7248 const char *netdev_drivername(const struct net_device
*dev
)
7250 const struct device_driver
*driver
;
7251 const struct device
*parent
;
7252 const char *empty
= "";
7254 parent
= dev
->dev
.parent
;
7258 driver
= parent
->driver
;
7259 if (driver
&& driver
->name
)
7260 return driver
->name
;
7264 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
7265 struct va_format
*vaf
)
7267 if (dev
&& dev
->dev
.parent
) {
7268 dev_printk_emit(level
[1] - '0',
7271 dev_driver_string(dev
->dev
.parent
),
7272 dev_name(dev
->dev
.parent
),
7273 netdev_name(dev
), netdev_reg_state(dev
),
7276 printk("%s%s%s: %pV",
7277 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
7279 printk("%s(NULL net_device): %pV", level
, vaf
);
7283 void netdev_printk(const char *level
, const struct net_device
*dev
,
7284 const char *format
, ...)
7286 struct va_format vaf
;
7289 va_start(args
, format
);
7294 __netdev_printk(level
, dev
, &vaf
);
7298 EXPORT_SYMBOL(netdev_printk
);
7300 #define define_netdev_printk_level(func, level) \
7301 void func(const struct net_device *dev, const char *fmt, ...) \
7303 struct va_format vaf; \
7306 va_start(args, fmt); \
7311 __netdev_printk(level, dev, &vaf); \
7315 EXPORT_SYMBOL(func);
7317 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
7318 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
7319 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
7320 define_netdev_printk_level(netdev_err
, KERN_ERR
);
7321 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
7322 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
7323 define_netdev_printk_level(netdev_info
, KERN_INFO
);
7325 static void __net_exit
netdev_exit(struct net
*net
)
7327 kfree(net
->dev_name_head
);
7328 kfree(net
->dev_index_head
);
7331 static struct pernet_operations __net_initdata netdev_net_ops
= {
7332 .init
= netdev_init
,
7333 .exit
= netdev_exit
,
7336 static void __net_exit
default_device_exit(struct net
*net
)
7338 struct net_device
*dev
, *aux
;
7340 * Push all migratable network devices back to the
7341 * initial network namespace
7344 for_each_netdev_safe(net
, dev
, aux
) {
7346 char fb_name
[IFNAMSIZ
];
7348 /* Ignore unmoveable devices (i.e. loopback) */
7349 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
7352 /* Leave virtual devices for the generic cleanup */
7353 if (dev
->rtnl_link_ops
)
7356 /* Push remaining network devices to init_net */
7357 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
7358 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
7360 pr_emerg("%s: failed to move %s to init_net: %d\n",
7361 __func__
, dev
->name
, err
);
7368 static void __net_exit
rtnl_lock_unregistering(struct list_head
*net_list
)
7370 /* Return with the rtnl_lock held when there are no network
7371 * devices unregistering in any network namespace in net_list.
7375 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
7377 add_wait_queue(&netdev_unregistering_wq
, &wait
);
7379 unregistering
= false;
7381 list_for_each_entry(net
, net_list
, exit_list
) {
7382 if (net
->dev_unreg_count
> 0) {
7383 unregistering
= true;
7391 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
7393 remove_wait_queue(&netdev_unregistering_wq
, &wait
);
7396 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
7398 /* At exit all network devices most be removed from a network
7399 * namespace. Do this in the reverse order of registration.
7400 * Do this across as many network namespaces as possible to
7401 * improve batching efficiency.
7403 struct net_device
*dev
;
7405 LIST_HEAD(dev_kill_list
);
7407 /* To prevent network device cleanup code from dereferencing
7408 * loopback devices or network devices that have been freed
7409 * wait here for all pending unregistrations to complete,
7410 * before unregistring the loopback device and allowing the
7411 * network namespace be freed.
7413 * The netdev todo list containing all network devices
7414 * unregistrations that happen in default_device_exit_batch
7415 * will run in the rtnl_unlock() at the end of
7416 * default_device_exit_batch.
7418 rtnl_lock_unregistering(net_list
);
7419 list_for_each_entry(net
, net_list
, exit_list
) {
7420 for_each_netdev_reverse(net
, dev
) {
7421 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
7422 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
7424 unregister_netdevice_queue(dev
, &dev_kill_list
);
7427 unregister_netdevice_many(&dev_kill_list
);
7431 static struct pernet_operations __net_initdata default_device_ops
= {
7432 .exit
= default_device_exit
,
7433 .exit_batch
= default_device_exit_batch
,
7437 * Initialize the DEV module. At boot time this walks the device list and
7438 * unhooks any devices that fail to initialise (normally hardware not
7439 * present) and leaves us with a valid list of present and active devices.
7444 * This is called single threaded during boot, so no need
7445 * to take the rtnl semaphore.
7447 static int __init
net_dev_init(void)
7449 int i
, rc
= -ENOMEM
;
7451 BUG_ON(!dev_boot_phase
);
7453 if (dev_proc_init())
7456 if (netdev_kobject_init())
7459 INIT_LIST_HEAD(&ptype_all
);
7460 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
7461 INIT_LIST_HEAD(&ptype_base
[i
]);
7463 INIT_LIST_HEAD(&offload_base
);
7465 if (register_pernet_subsys(&netdev_net_ops
))
7469 * Initialise the packet receive queues.
7472 for_each_possible_cpu(i
) {
7473 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
7475 skb_queue_head_init(&sd
->input_pkt_queue
);
7476 skb_queue_head_init(&sd
->process_queue
);
7477 INIT_LIST_HEAD(&sd
->poll_list
);
7478 sd
->output_queue_tailp
= &sd
->output_queue
;
7480 sd
->csd
.func
= rps_trigger_softirq
;
7485 sd
->backlog
.poll
= process_backlog
;
7486 sd
->backlog
.weight
= weight_p
;
7491 /* The loopback device is special if any other network devices
7492 * is present in a network namespace the loopback device must
7493 * be present. Since we now dynamically allocate and free the
7494 * loopback device ensure this invariant is maintained by
7495 * keeping the loopback device as the first device on the
7496 * list of network devices. Ensuring the loopback devices
7497 * is the first device that appears and the last network device
7500 if (register_pernet_device(&loopback_net_ops
))
7503 if (register_pernet_device(&default_device_ops
))
7506 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
7507 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
7509 hotcpu_notifier(dev_cpu_callback
, 0);
7516 subsys_initcall(net_dev_init
);