2 * NET3 Protocol independent device support routines.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Derived from the non IP parts of dev.c 1.0.19
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
51 * Rudi Cilibrasi : Pass the right thing to
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
121 #include <net/mpls.h>
122 #include <linux/ipv6.h>
123 #include <linux/in.h>
124 #include <linux/jhash.h>
125 #include <linux/random.h>
126 #include <trace/events/napi.h>
127 #include <trace/events/net.h>
128 #include <trace/events/skb.h>
129 #include <linux/pci.h>
130 #include <linux/inetdevice.h>
131 #include <linux/cpu_rmap.h>
132 #include <linux/static_key.h>
133 #include <linux/hashtable.h>
134 #include <linux/vmalloc.h>
135 #include <linux/if_macvlan.h>
136 #include <linux/errqueue.h>
137 #include <linux/hrtimer.h>
139 #include "net-sysfs.h"
141 /* Instead of increasing this, you should create a hash table. */
142 #define MAX_GRO_SKBS 8
144 /* This should be increased if a protocol with a bigger head is added. */
145 #define GRO_MAX_HEAD (MAX_HEADER + 128)
147 static DEFINE_SPINLOCK(ptype_lock
);
148 static DEFINE_SPINLOCK(offload_lock
);
149 struct list_head ptype_base
[PTYPE_HASH_SIZE
] __read_mostly
;
150 struct list_head ptype_all __read_mostly
; /* Taps */
151 static struct list_head offload_base __read_mostly
;
153 static int netif_rx_internal(struct sk_buff
*skb
);
154 static int call_netdevice_notifiers_info(unsigned long val
,
155 struct net_device
*dev
,
156 struct netdev_notifier_info
*info
);
159 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
162 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
164 * Writers must hold the rtnl semaphore while they loop through the
165 * dev_base_head list, and hold dev_base_lock for writing when they do the
166 * actual updates. This allows pure readers to access the list even
167 * while a writer is preparing to update it.
169 * To put it another way, dev_base_lock is held for writing only to
170 * protect against pure readers; the rtnl semaphore provides the
171 * protection against other writers.
173 * See, for example usages, register_netdevice() and
174 * unregister_netdevice(), which must be called with the rtnl
177 DEFINE_RWLOCK(dev_base_lock
);
178 EXPORT_SYMBOL(dev_base_lock
);
180 /* protects napi_hash addition/deletion and napi_gen_id */
181 static DEFINE_SPINLOCK(napi_hash_lock
);
183 static unsigned int napi_gen_id
;
184 static DEFINE_HASHTABLE(napi_hash
, 8);
186 static seqcount_t devnet_rename_seq
;
188 static inline void dev_base_seq_inc(struct net
*net
)
190 while (++net
->dev_base_seq
== 0);
193 static inline struct hlist_head
*dev_name_hash(struct net
*net
, const char *name
)
195 unsigned int hash
= full_name_hash(name
, strnlen(name
, IFNAMSIZ
));
197 return &net
->dev_name_head
[hash_32(hash
, NETDEV_HASHBITS
)];
200 static inline struct hlist_head
*dev_index_hash(struct net
*net
, int ifindex
)
202 return &net
->dev_index_head
[ifindex
& (NETDEV_HASHENTRIES
- 1)];
205 static inline void rps_lock(struct softnet_data
*sd
)
208 spin_lock(&sd
->input_pkt_queue
.lock
);
212 static inline void rps_unlock(struct softnet_data
*sd
)
215 spin_unlock(&sd
->input_pkt_queue
.lock
);
219 /* Device list insertion */
220 static void list_netdevice(struct net_device
*dev
)
222 struct net
*net
= dev_net(dev
);
226 write_lock_bh(&dev_base_lock
);
227 list_add_tail_rcu(&dev
->dev_list
, &net
->dev_base_head
);
228 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
229 hlist_add_head_rcu(&dev
->index_hlist
,
230 dev_index_hash(net
, dev
->ifindex
));
231 write_unlock_bh(&dev_base_lock
);
233 dev_base_seq_inc(net
);
236 /* Device list removal
237 * caller must respect a RCU grace period before freeing/reusing dev
239 static void unlist_netdevice(struct net_device
*dev
)
243 /* Unlink dev from the device chain */
244 write_lock_bh(&dev_base_lock
);
245 list_del_rcu(&dev
->dev_list
);
246 hlist_del_rcu(&dev
->name_hlist
);
247 hlist_del_rcu(&dev
->index_hlist
);
248 write_unlock_bh(&dev_base_lock
);
250 dev_base_seq_inc(dev_net(dev
));
257 static RAW_NOTIFIER_HEAD(netdev_chain
);
260 * Device drivers call our routines to queue packets here. We empty the
261 * queue in the local softnet handler.
264 DEFINE_PER_CPU_ALIGNED(struct softnet_data
, softnet_data
);
265 EXPORT_PER_CPU_SYMBOL(softnet_data
);
267 #ifdef CONFIG_LOCKDEP
269 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
270 * according to dev->type
272 static const unsigned short netdev_lock_type
[] =
273 {ARPHRD_NETROM
, ARPHRD_ETHER
, ARPHRD_EETHER
, ARPHRD_AX25
,
274 ARPHRD_PRONET
, ARPHRD_CHAOS
, ARPHRD_IEEE802
, ARPHRD_ARCNET
,
275 ARPHRD_APPLETLK
, ARPHRD_DLCI
, ARPHRD_ATM
, ARPHRD_METRICOM
,
276 ARPHRD_IEEE1394
, ARPHRD_EUI64
, ARPHRD_INFINIBAND
, ARPHRD_SLIP
,
277 ARPHRD_CSLIP
, ARPHRD_SLIP6
, ARPHRD_CSLIP6
, ARPHRD_RSRVD
,
278 ARPHRD_ADAPT
, ARPHRD_ROSE
, ARPHRD_X25
, ARPHRD_HWX25
,
279 ARPHRD_PPP
, ARPHRD_CISCO
, ARPHRD_LAPB
, ARPHRD_DDCMP
,
280 ARPHRD_RAWHDLC
, ARPHRD_TUNNEL
, ARPHRD_TUNNEL6
, ARPHRD_FRAD
,
281 ARPHRD_SKIP
, ARPHRD_LOOPBACK
, ARPHRD_LOCALTLK
, ARPHRD_FDDI
,
282 ARPHRD_BIF
, ARPHRD_SIT
, ARPHRD_IPDDP
, ARPHRD_IPGRE
,
283 ARPHRD_PIMREG
, ARPHRD_HIPPI
, ARPHRD_ASH
, ARPHRD_ECONET
,
284 ARPHRD_IRDA
, ARPHRD_FCPP
, ARPHRD_FCAL
, ARPHRD_FCPL
,
285 ARPHRD_FCFABRIC
, ARPHRD_IEEE80211
, ARPHRD_IEEE80211_PRISM
,
286 ARPHRD_IEEE80211_RADIOTAP
, ARPHRD_PHONET
, ARPHRD_PHONET_PIPE
,
287 ARPHRD_IEEE802154
, ARPHRD_VOID
, ARPHRD_NONE
};
289 static const char *const netdev_lock_name
[] =
290 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
291 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
292 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
293 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
294 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
295 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
296 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
297 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
298 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
299 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
300 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
301 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
302 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
303 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
304 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
306 static struct lock_class_key netdev_xmit_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
307 static struct lock_class_key netdev_addr_lock_key
[ARRAY_SIZE(netdev_lock_type
)];
309 static inline unsigned short netdev_lock_pos(unsigned short dev_type
)
313 for (i
= 0; i
< ARRAY_SIZE(netdev_lock_type
); i
++)
314 if (netdev_lock_type
[i
] == dev_type
)
316 /* the last key is used by default */
317 return ARRAY_SIZE(netdev_lock_type
) - 1;
320 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
321 unsigned short dev_type
)
325 i
= netdev_lock_pos(dev_type
);
326 lockdep_set_class_and_name(lock
, &netdev_xmit_lock_key
[i
],
327 netdev_lock_name
[i
]);
330 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
334 i
= netdev_lock_pos(dev
->type
);
335 lockdep_set_class_and_name(&dev
->addr_list_lock
,
336 &netdev_addr_lock_key
[i
],
337 netdev_lock_name
[i
]);
340 static inline void netdev_set_xmit_lockdep_class(spinlock_t
*lock
,
341 unsigned short dev_type
)
344 static inline void netdev_set_addr_lockdep_class(struct net_device
*dev
)
349 /*******************************************************************************
351 Protocol management and registration routines
353 *******************************************************************************/
356 * Add a protocol ID to the list. Now that the input handler is
357 * smarter we can dispense with all the messy stuff that used to be
360 * BEWARE!!! Protocol handlers, mangling input packets,
361 * MUST BE last in hash buckets and checking protocol handlers
362 * MUST start from promiscuous ptype_all chain in net_bh.
363 * It is true now, do not change it.
364 * Explanation follows: if protocol handler, mangling packet, will
365 * be the first on list, it is not able to sense, that packet
366 * is cloned and should be copied-on-write, so that it will
367 * change it and subsequent readers will get broken packet.
371 static inline struct list_head
*ptype_head(const struct packet_type
*pt
)
373 if (pt
->type
== htons(ETH_P_ALL
))
374 return pt
->dev
? &pt
->dev
->ptype_all
: &ptype_all
;
376 return pt
->dev
? &pt
->dev
->ptype_specific
:
377 &ptype_base
[ntohs(pt
->type
) & PTYPE_HASH_MASK
];
381 * dev_add_pack - add packet handler
382 * @pt: packet type declaration
384 * Add a protocol handler to the networking stack. The passed &packet_type
385 * is linked into kernel lists and may not be freed until it has been
386 * removed from the kernel lists.
388 * This call does not sleep therefore it can not
389 * guarantee all CPU's that are in middle of receiving packets
390 * will see the new packet type (until the next received packet).
393 void dev_add_pack(struct packet_type
*pt
)
395 struct list_head
*head
= ptype_head(pt
);
397 spin_lock(&ptype_lock
);
398 list_add_rcu(&pt
->list
, head
);
399 spin_unlock(&ptype_lock
);
401 EXPORT_SYMBOL(dev_add_pack
);
404 * __dev_remove_pack - remove packet handler
405 * @pt: packet type declaration
407 * Remove a protocol handler that was previously added to the kernel
408 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
409 * from the kernel lists and can be freed or reused once this function
412 * The packet type might still be in use by receivers
413 * and must not be freed until after all the CPU's have gone
414 * through a quiescent state.
416 void __dev_remove_pack(struct packet_type
*pt
)
418 struct list_head
*head
= ptype_head(pt
);
419 struct packet_type
*pt1
;
421 spin_lock(&ptype_lock
);
423 list_for_each_entry(pt1
, head
, list
) {
425 list_del_rcu(&pt
->list
);
430 pr_warn("dev_remove_pack: %p not found\n", pt
);
432 spin_unlock(&ptype_lock
);
434 EXPORT_SYMBOL(__dev_remove_pack
);
437 * dev_remove_pack - remove packet handler
438 * @pt: packet type declaration
440 * Remove a protocol handler that was previously added to the kernel
441 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
442 * from the kernel lists and can be freed or reused once this function
445 * This call sleeps to guarantee that no CPU is looking at the packet
448 void dev_remove_pack(struct packet_type
*pt
)
450 __dev_remove_pack(pt
);
454 EXPORT_SYMBOL(dev_remove_pack
);
458 * dev_add_offload - register offload handlers
459 * @po: protocol offload declaration
461 * Add protocol offload handlers to the networking stack. The passed
462 * &proto_offload is linked into kernel lists and may not be freed until
463 * it has been removed from the kernel lists.
465 * This call does not sleep therefore it can not
466 * guarantee all CPU's that are in middle of receiving packets
467 * will see the new offload handlers (until the next received packet).
469 void dev_add_offload(struct packet_offload
*po
)
471 struct list_head
*head
= &offload_base
;
473 spin_lock(&offload_lock
);
474 list_add_rcu(&po
->list
, head
);
475 spin_unlock(&offload_lock
);
477 EXPORT_SYMBOL(dev_add_offload
);
480 * __dev_remove_offload - remove offload handler
481 * @po: packet offload declaration
483 * Remove a protocol offload handler that was previously added to the
484 * kernel offload handlers by dev_add_offload(). The passed &offload_type
485 * is removed from the kernel lists and can be freed or reused once this
488 * The packet type might still be in use by receivers
489 * and must not be freed until after all the CPU's have gone
490 * through a quiescent state.
492 static void __dev_remove_offload(struct packet_offload
*po
)
494 struct list_head
*head
= &offload_base
;
495 struct packet_offload
*po1
;
497 spin_lock(&offload_lock
);
499 list_for_each_entry(po1
, head
, list
) {
501 list_del_rcu(&po
->list
);
506 pr_warn("dev_remove_offload: %p not found\n", po
);
508 spin_unlock(&offload_lock
);
512 * dev_remove_offload - remove packet offload handler
513 * @po: packet offload declaration
515 * Remove a packet offload handler that was previously added to the kernel
516 * offload handlers by dev_add_offload(). The passed &offload_type is
517 * removed from the kernel lists and can be freed or reused once this
520 * This call sleeps to guarantee that no CPU is looking at the packet
523 void dev_remove_offload(struct packet_offload
*po
)
525 __dev_remove_offload(po
);
529 EXPORT_SYMBOL(dev_remove_offload
);
531 /******************************************************************************
533 Device Boot-time Settings Routines
535 *******************************************************************************/
537 /* Boot time configuration table */
538 static struct netdev_boot_setup dev_boot_setup
[NETDEV_BOOT_SETUP_MAX
];
541 * netdev_boot_setup_add - add new setup entry
542 * @name: name of the device
543 * @map: configured settings for the device
545 * Adds new setup entry to the dev_boot_setup list. The function
546 * returns 0 on error and 1 on success. This is a generic routine to
549 static int netdev_boot_setup_add(char *name
, struct ifmap
*map
)
551 struct netdev_boot_setup
*s
;
555 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
556 if (s
[i
].name
[0] == '\0' || s
[i
].name
[0] == ' ') {
557 memset(s
[i
].name
, 0, sizeof(s
[i
].name
));
558 strlcpy(s
[i
].name
, name
, IFNAMSIZ
);
559 memcpy(&s
[i
].map
, map
, sizeof(s
[i
].map
));
564 return i
>= NETDEV_BOOT_SETUP_MAX
? 0 : 1;
568 * netdev_boot_setup_check - check boot time settings
569 * @dev: the netdevice
571 * Check boot time settings for the device.
572 * The found settings are set for the device to be used
573 * later in the device probing.
574 * Returns 0 if no settings found, 1 if they are.
576 int netdev_boot_setup_check(struct net_device
*dev
)
578 struct netdev_boot_setup
*s
= dev_boot_setup
;
581 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++) {
582 if (s
[i
].name
[0] != '\0' && s
[i
].name
[0] != ' ' &&
583 !strcmp(dev
->name
, s
[i
].name
)) {
584 dev
->irq
= s
[i
].map
.irq
;
585 dev
->base_addr
= s
[i
].map
.base_addr
;
586 dev
->mem_start
= s
[i
].map
.mem_start
;
587 dev
->mem_end
= s
[i
].map
.mem_end
;
593 EXPORT_SYMBOL(netdev_boot_setup_check
);
597 * netdev_boot_base - get address from boot time settings
598 * @prefix: prefix for network device
599 * @unit: id for network device
601 * Check boot time settings for the base address of device.
602 * The found settings are set for the device to be used
603 * later in the device probing.
604 * Returns 0 if no settings found.
606 unsigned long netdev_boot_base(const char *prefix
, int unit
)
608 const struct netdev_boot_setup
*s
= dev_boot_setup
;
612 sprintf(name
, "%s%d", prefix
, unit
);
615 * If device already registered then return base of 1
616 * to indicate not to probe for this interface
618 if (__dev_get_by_name(&init_net
, name
))
621 for (i
= 0; i
< NETDEV_BOOT_SETUP_MAX
; i
++)
622 if (!strcmp(name
, s
[i
].name
))
623 return s
[i
].map
.base_addr
;
628 * Saves at boot time configured settings for any netdevice.
630 int __init
netdev_boot_setup(char *str
)
635 str
= get_options(str
, ARRAY_SIZE(ints
), ints
);
640 memset(&map
, 0, sizeof(map
));
644 map
.base_addr
= ints
[2];
646 map
.mem_start
= ints
[3];
648 map
.mem_end
= ints
[4];
650 /* Add new entry to the list */
651 return netdev_boot_setup_add(str
, &map
);
654 __setup("netdev=", netdev_boot_setup
);
656 /*******************************************************************************
658 Device Interface Subroutines
660 *******************************************************************************/
663 * __dev_get_by_name - find a device by its name
664 * @net: the applicable net namespace
665 * @name: name to find
667 * Find an interface by name. Must be called under RTNL semaphore
668 * or @dev_base_lock. If the name is found a pointer to the device
669 * is returned. If the name is not found then %NULL is returned. The
670 * reference counters are not incremented so the caller must be
671 * careful with locks.
674 struct net_device
*__dev_get_by_name(struct net
*net
, const char *name
)
676 struct net_device
*dev
;
677 struct hlist_head
*head
= dev_name_hash(net
, name
);
679 hlist_for_each_entry(dev
, head
, name_hlist
)
680 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
685 EXPORT_SYMBOL(__dev_get_by_name
);
688 * dev_get_by_name_rcu - find a device by its name
689 * @net: the applicable net namespace
690 * @name: name to find
692 * Find an interface by name.
693 * If the name is found a pointer to the device is returned.
694 * If the name is not found then %NULL is returned.
695 * The reference counters are not incremented so the caller must be
696 * careful with locks. The caller must hold RCU lock.
699 struct net_device
*dev_get_by_name_rcu(struct net
*net
, const char *name
)
701 struct net_device
*dev
;
702 struct hlist_head
*head
= dev_name_hash(net
, name
);
704 hlist_for_each_entry_rcu(dev
, head
, name_hlist
)
705 if (!strncmp(dev
->name
, name
, IFNAMSIZ
))
710 EXPORT_SYMBOL(dev_get_by_name_rcu
);
713 * dev_get_by_name - find a device by its name
714 * @net: the applicable net namespace
715 * @name: name to find
717 * Find an interface by name. This can be called from any
718 * context and does its own locking. The returned handle has
719 * the usage count incremented and the caller must use dev_put() to
720 * release it when it is no longer needed. %NULL is returned if no
721 * matching device is found.
724 struct net_device
*dev_get_by_name(struct net
*net
, const char *name
)
726 struct net_device
*dev
;
729 dev
= dev_get_by_name_rcu(net
, name
);
735 EXPORT_SYMBOL(dev_get_by_name
);
738 * __dev_get_by_index - find a device by its ifindex
739 * @net: the applicable net namespace
740 * @ifindex: index of device
742 * Search for an interface by index. Returns %NULL if the device
743 * is not found or a pointer to the device. The device has not
744 * had its reference counter increased so the caller must be careful
745 * about locking. The caller must hold either the RTNL semaphore
749 struct net_device
*__dev_get_by_index(struct net
*net
, int ifindex
)
751 struct net_device
*dev
;
752 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
754 hlist_for_each_entry(dev
, head
, index_hlist
)
755 if (dev
->ifindex
== ifindex
)
760 EXPORT_SYMBOL(__dev_get_by_index
);
763 * dev_get_by_index_rcu - find a device by its ifindex
764 * @net: the applicable net namespace
765 * @ifindex: index of device
767 * Search for an interface by index. Returns %NULL if the device
768 * is not found or a pointer to the device. The device has not
769 * had its reference counter increased so the caller must be careful
770 * about locking. The caller must hold RCU lock.
773 struct net_device
*dev_get_by_index_rcu(struct net
*net
, int ifindex
)
775 struct net_device
*dev
;
776 struct hlist_head
*head
= dev_index_hash(net
, ifindex
);
778 hlist_for_each_entry_rcu(dev
, head
, index_hlist
)
779 if (dev
->ifindex
== ifindex
)
784 EXPORT_SYMBOL(dev_get_by_index_rcu
);
788 * dev_get_by_index - find a device by its ifindex
789 * @net: the applicable net namespace
790 * @ifindex: index of device
792 * Search for an interface by index. Returns NULL if the device
793 * is not found or a pointer to the device. The device returned has
794 * had a reference added and the pointer is safe until the user calls
795 * dev_put to indicate they have finished with it.
798 struct net_device
*dev_get_by_index(struct net
*net
, int ifindex
)
800 struct net_device
*dev
;
803 dev
= dev_get_by_index_rcu(net
, ifindex
);
809 EXPORT_SYMBOL(dev_get_by_index
);
812 * netdev_get_name - get a netdevice name, knowing its ifindex.
813 * @net: network namespace
814 * @name: a pointer to the buffer where the name will be stored.
815 * @ifindex: the ifindex of the interface to get the name from.
817 * The use of raw_seqcount_begin() and cond_resched() before
818 * retrying is required as we want to give the writers a chance
819 * to complete when CONFIG_PREEMPT is not set.
821 int netdev_get_name(struct net
*net
, char *name
, int ifindex
)
823 struct net_device
*dev
;
827 seq
= raw_seqcount_begin(&devnet_rename_seq
);
829 dev
= dev_get_by_index_rcu(net
, ifindex
);
835 strcpy(name
, dev
->name
);
837 if (read_seqcount_retry(&devnet_rename_seq
, seq
)) {
846 * dev_getbyhwaddr_rcu - find a device by its hardware address
847 * @net: the applicable net namespace
848 * @type: media type of device
849 * @ha: hardware address
851 * Search for an interface by MAC address. Returns NULL if the device
852 * is not found or a pointer to the device.
853 * The caller must hold RCU or RTNL.
854 * The returned device has not had its ref count increased
855 * and the caller must therefore be careful about locking
859 struct net_device
*dev_getbyhwaddr_rcu(struct net
*net
, unsigned short type
,
862 struct net_device
*dev
;
864 for_each_netdev_rcu(net
, dev
)
865 if (dev
->type
== type
&&
866 !memcmp(dev
->dev_addr
, ha
, dev
->addr_len
))
871 EXPORT_SYMBOL(dev_getbyhwaddr_rcu
);
873 struct net_device
*__dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
875 struct net_device
*dev
;
878 for_each_netdev(net
, dev
)
879 if (dev
->type
== type
)
884 EXPORT_SYMBOL(__dev_getfirstbyhwtype
);
886 struct net_device
*dev_getfirstbyhwtype(struct net
*net
, unsigned short type
)
888 struct net_device
*dev
, *ret
= NULL
;
891 for_each_netdev_rcu(net
, dev
)
892 if (dev
->type
== type
) {
900 EXPORT_SYMBOL(dev_getfirstbyhwtype
);
903 * __dev_get_by_flags - find any device with given flags
904 * @net: the applicable net namespace
905 * @if_flags: IFF_* values
906 * @mask: bitmask of bits in if_flags to check
908 * Search for any interface with the given flags. Returns NULL if a device
909 * is not found or a pointer to the device. Must be called inside
910 * rtnl_lock(), and result refcount is unchanged.
913 struct net_device
*__dev_get_by_flags(struct net
*net
, unsigned short if_flags
,
916 struct net_device
*dev
, *ret
;
921 for_each_netdev(net
, dev
) {
922 if (((dev
->flags
^ if_flags
) & mask
) == 0) {
929 EXPORT_SYMBOL(__dev_get_by_flags
);
932 * dev_valid_name - check if name is okay for network device
935 * Network device names need to be valid file names to
936 * to allow sysfs to work. We also disallow any kind of
939 bool dev_valid_name(const char *name
)
943 if (strlen(name
) >= IFNAMSIZ
)
945 if (!strcmp(name
, ".") || !strcmp(name
, ".."))
949 if (*name
== '/' || *name
== ':' || isspace(*name
))
955 EXPORT_SYMBOL(dev_valid_name
);
958 * __dev_alloc_name - allocate a name for a device
959 * @net: network namespace to allocate the device name in
960 * @name: name format string
961 * @buf: scratch buffer and result name string
963 * Passed a format string - eg "lt%d" it will try and find a suitable
964 * id. It scans list of devices to build up a free map, then chooses
965 * the first empty slot. The caller must hold the dev_base or rtnl lock
966 * while allocating the name and adding the device in order to avoid
968 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
969 * Returns the number of the unit assigned or a negative errno code.
972 static int __dev_alloc_name(struct net
*net
, const char *name
, char *buf
)
976 const int max_netdevices
= 8*PAGE_SIZE
;
977 unsigned long *inuse
;
978 struct net_device
*d
;
980 p
= strnchr(name
, IFNAMSIZ
-1, '%');
983 * Verify the string as this thing may have come from
984 * the user. There must be either one "%d" and no other "%"
987 if (p
[1] != 'd' || strchr(p
+ 2, '%'))
990 /* Use one page as a bit array of possible slots */
991 inuse
= (unsigned long *) get_zeroed_page(GFP_ATOMIC
);
995 for_each_netdev(net
, d
) {
996 if (!sscanf(d
->name
, name
, &i
))
998 if (i
< 0 || i
>= max_netdevices
)
1001 /* avoid cases where sscanf is not exact inverse of printf */
1002 snprintf(buf
, IFNAMSIZ
, name
, i
);
1003 if (!strncmp(buf
, d
->name
, IFNAMSIZ
))
1007 i
= find_first_zero_bit(inuse
, max_netdevices
);
1008 free_page((unsigned long) inuse
);
1012 snprintf(buf
, IFNAMSIZ
, name
, i
);
1013 if (!__dev_get_by_name(net
, buf
))
1016 /* It is possible to run out of possible slots
1017 * when the name is long and there isn't enough space left
1018 * for the digits, or if all bits are used.
1024 * dev_alloc_name - allocate a name for a device
1026 * @name: name format string
1028 * Passed a format string - eg "lt%d" it will try and find a suitable
1029 * id. It scans list of devices to build up a free map, then chooses
1030 * the first empty slot. The caller must hold the dev_base or rtnl lock
1031 * while allocating the name and adding the device in order to avoid
1033 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1034 * Returns the number of the unit assigned or a negative errno code.
1037 int dev_alloc_name(struct net_device
*dev
, const char *name
)
1043 BUG_ON(!dev_net(dev
));
1045 ret
= __dev_alloc_name(net
, name
, buf
);
1047 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1050 EXPORT_SYMBOL(dev_alloc_name
);
1052 static int dev_alloc_name_ns(struct net
*net
,
1053 struct net_device
*dev
,
1059 ret
= __dev_alloc_name(net
, name
, buf
);
1061 strlcpy(dev
->name
, buf
, IFNAMSIZ
);
1065 static int dev_get_valid_name(struct net
*net
,
1066 struct net_device
*dev
,
1071 if (!dev_valid_name(name
))
1074 if (strchr(name
, '%'))
1075 return dev_alloc_name_ns(net
, dev
, name
);
1076 else if (__dev_get_by_name(net
, name
))
1078 else if (dev
->name
!= name
)
1079 strlcpy(dev
->name
, name
, IFNAMSIZ
);
1085 * dev_change_name - change name of a device
1087 * @newname: name (or format string) must be at least IFNAMSIZ
1089 * Change name of a device, can pass format strings "eth%d".
1092 int dev_change_name(struct net_device
*dev
, const char *newname
)
1094 unsigned char old_assign_type
;
1095 char oldname
[IFNAMSIZ
];
1101 BUG_ON(!dev_net(dev
));
1104 if (dev
->flags
& IFF_UP
)
1107 write_seqcount_begin(&devnet_rename_seq
);
1109 if (strncmp(newname
, dev
->name
, IFNAMSIZ
) == 0) {
1110 write_seqcount_end(&devnet_rename_seq
);
1114 memcpy(oldname
, dev
->name
, IFNAMSIZ
);
1116 err
= dev_get_valid_name(net
, dev
, newname
);
1118 write_seqcount_end(&devnet_rename_seq
);
1122 if (oldname
[0] && !strchr(oldname
, '%'))
1123 netdev_info(dev
, "renamed from %s\n", oldname
);
1125 old_assign_type
= dev
->name_assign_type
;
1126 dev
->name_assign_type
= NET_NAME_RENAMED
;
1129 ret
= device_rename(&dev
->dev
, dev
->name
);
1131 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1132 dev
->name_assign_type
= old_assign_type
;
1133 write_seqcount_end(&devnet_rename_seq
);
1137 write_seqcount_end(&devnet_rename_seq
);
1139 netdev_adjacent_rename_links(dev
, oldname
);
1141 write_lock_bh(&dev_base_lock
);
1142 hlist_del_rcu(&dev
->name_hlist
);
1143 write_unlock_bh(&dev_base_lock
);
1147 write_lock_bh(&dev_base_lock
);
1148 hlist_add_head_rcu(&dev
->name_hlist
, dev_name_hash(net
, dev
->name
));
1149 write_unlock_bh(&dev_base_lock
);
1151 ret
= call_netdevice_notifiers(NETDEV_CHANGENAME
, dev
);
1152 ret
= notifier_to_errno(ret
);
1155 /* err >= 0 after dev_alloc_name() or stores the first errno */
1158 write_seqcount_begin(&devnet_rename_seq
);
1159 memcpy(dev
->name
, oldname
, IFNAMSIZ
);
1160 memcpy(oldname
, newname
, IFNAMSIZ
);
1161 dev
->name_assign_type
= old_assign_type
;
1162 old_assign_type
= NET_NAME_RENAMED
;
1165 pr_err("%s: name change rollback failed: %d\n",
1174 * dev_set_alias - change ifalias of a device
1176 * @alias: name up to IFALIASZ
1177 * @len: limit of bytes to copy from info
1179 * Set ifalias for a device,
1181 int dev_set_alias(struct net_device
*dev
, const char *alias
, size_t len
)
1187 if (len
>= IFALIASZ
)
1191 kfree(dev
->ifalias
);
1192 dev
->ifalias
= NULL
;
1196 new_ifalias
= krealloc(dev
->ifalias
, len
+ 1, GFP_KERNEL
);
1199 dev
->ifalias
= new_ifalias
;
1201 strlcpy(dev
->ifalias
, alias
, len
+1);
1207 * netdev_features_change - device changes features
1208 * @dev: device to cause notification
1210 * Called to indicate a device has changed features.
1212 void netdev_features_change(struct net_device
*dev
)
1214 call_netdevice_notifiers(NETDEV_FEAT_CHANGE
, dev
);
1216 EXPORT_SYMBOL(netdev_features_change
);
1219 * netdev_state_change - device changes state
1220 * @dev: device to cause notification
1222 * Called to indicate a device has changed state. This function calls
1223 * the notifier chains for netdev_chain and sends a NEWLINK message
1224 * to the routing socket.
1226 void netdev_state_change(struct net_device
*dev
)
1228 if (dev
->flags
& IFF_UP
) {
1229 struct netdev_notifier_change_info change_info
;
1231 change_info
.flags_changed
= 0;
1232 call_netdevice_notifiers_info(NETDEV_CHANGE
, dev
,
1234 rtmsg_ifinfo(RTM_NEWLINK
, dev
, 0, GFP_KERNEL
);
1237 EXPORT_SYMBOL(netdev_state_change
);
1240 * netdev_notify_peers - notify network peers about existence of @dev
1241 * @dev: network device
1243 * Generate traffic such that interested network peers are aware of
1244 * @dev, such as by generating a gratuitous ARP. This may be used when
1245 * a device wants to inform the rest of the network about some sort of
1246 * reconfiguration such as a failover event or virtual machine
1249 void netdev_notify_peers(struct net_device
*dev
)
1252 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, dev
);
1255 EXPORT_SYMBOL(netdev_notify_peers
);
1257 static int __dev_open(struct net_device
*dev
)
1259 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1264 if (!netif_device_present(dev
))
1267 /* Block netpoll from trying to do any rx path servicing.
1268 * If we don't do this there is a chance ndo_poll_controller
1269 * or ndo_poll may be running while we open the device
1271 netpoll_poll_disable(dev
);
1273 ret
= call_netdevice_notifiers(NETDEV_PRE_UP
, dev
);
1274 ret
= notifier_to_errno(ret
);
1278 set_bit(__LINK_STATE_START
, &dev
->state
);
1280 if (ops
->ndo_validate_addr
)
1281 ret
= ops
->ndo_validate_addr(dev
);
1283 if (!ret
&& ops
->ndo_open
)
1284 ret
= ops
->ndo_open(dev
);
1286 netpoll_poll_enable(dev
);
1289 clear_bit(__LINK_STATE_START
, &dev
->state
);
1291 dev
->flags
|= IFF_UP
;
1292 dev_set_rx_mode(dev
);
1294 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
1301 * dev_open - prepare an interface for use.
1302 * @dev: device to open
1304 * Takes a device from down to up state. The device's private open
1305 * function is invoked and then the multicast lists are loaded. Finally
1306 * the device is moved into the up state and a %NETDEV_UP message is
1307 * sent to the netdev notifier chain.
1309 * Calling this function on an active interface is a nop. On a failure
1310 * a negative errno code is returned.
1312 int dev_open(struct net_device
*dev
)
1316 if (dev
->flags
& IFF_UP
)
1319 ret
= __dev_open(dev
);
1323 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1324 call_netdevice_notifiers(NETDEV_UP
, dev
);
1328 EXPORT_SYMBOL(dev_open
);
1330 static int __dev_close_many(struct list_head
*head
)
1332 struct net_device
*dev
;
1337 list_for_each_entry(dev
, head
, close_list
) {
1338 /* Temporarily disable netpoll until the interface is down */
1339 netpoll_poll_disable(dev
);
1341 call_netdevice_notifiers(NETDEV_GOING_DOWN
, dev
);
1343 clear_bit(__LINK_STATE_START
, &dev
->state
);
1345 /* Synchronize to scheduled poll. We cannot touch poll list, it
1346 * can be even on different cpu. So just clear netif_running().
1348 * dev->stop() will invoke napi_disable() on all of it's
1349 * napi_struct instances on this device.
1351 smp_mb__after_atomic(); /* Commit netif_running(). */
1354 dev_deactivate_many(head
);
1356 list_for_each_entry(dev
, head
, close_list
) {
1357 const struct net_device_ops
*ops
= dev
->netdev_ops
;
1360 * Call the device specific close. This cannot fail.
1361 * Only if device is UP
1363 * We allow it to be called even after a DETACH hot-plug
1369 dev
->flags
&= ~IFF_UP
;
1370 netpoll_poll_enable(dev
);
1376 static int __dev_close(struct net_device
*dev
)
1381 list_add(&dev
->close_list
, &single
);
1382 retval
= __dev_close_many(&single
);
1388 int dev_close_many(struct list_head
*head
, bool unlink
)
1390 struct net_device
*dev
, *tmp
;
1392 /* Remove the devices that don't need to be closed */
1393 list_for_each_entry_safe(dev
, tmp
, head
, close_list
)
1394 if (!(dev
->flags
& IFF_UP
))
1395 list_del_init(&dev
->close_list
);
1397 __dev_close_many(head
);
1399 list_for_each_entry_safe(dev
, tmp
, head
, close_list
) {
1400 rtmsg_ifinfo(RTM_NEWLINK
, dev
, IFF_UP
|IFF_RUNNING
, GFP_KERNEL
);
1401 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
1403 list_del_init(&dev
->close_list
);
1408 EXPORT_SYMBOL(dev_close_many
);
1411 * dev_close - shutdown an interface.
1412 * @dev: device to shutdown
1414 * This function moves an active device into down state. A
1415 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1416 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1419 int dev_close(struct net_device
*dev
)
1421 if (dev
->flags
& IFF_UP
) {
1424 list_add(&dev
->close_list
, &single
);
1425 dev_close_many(&single
, true);
1430 EXPORT_SYMBOL(dev_close
);
1434 * dev_disable_lro - disable Large Receive Offload on a device
1437 * Disable Large Receive Offload (LRO) on a net device. Must be
1438 * called under RTNL. This is needed if received packets may be
1439 * forwarded to another interface.
1441 void dev_disable_lro(struct net_device
*dev
)
1443 struct net_device
*lower_dev
;
1444 struct list_head
*iter
;
1446 dev
->wanted_features
&= ~NETIF_F_LRO
;
1447 netdev_update_features(dev
);
1449 if (unlikely(dev
->features
& NETIF_F_LRO
))
1450 netdev_WARN(dev
, "failed to disable LRO!\n");
1452 netdev_for_each_lower_dev(dev
, lower_dev
, iter
)
1453 dev_disable_lro(lower_dev
);
1455 EXPORT_SYMBOL(dev_disable_lro
);
1457 static int call_netdevice_notifier(struct notifier_block
*nb
, unsigned long val
,
1458 struct net_device
*dev
)
1460 struct netdev_notifier_info info
;
1462 netdev_notifier_info_init(&info
, dev
);
1463 return nb
->notifier_call(nb
, val
, &info
);
1466 static int dev_boot_phase
= 1;
1469 * register_netdevice_notifier - register a network notifier block
1472 * Register a notifier to be called when network device events occur.
1473 * The notifier passed is linked into the kernel structures and must
1474 * not be reused until it has been unregistered. A negative errno code
1475 * is returned on a failure.
1477 * When registered all registration and up events are replayed
1478 * to the new notifier to allow device to have a race free
1479 * view of the network device list.
1482 int register_netdevice_notifier(struct notifier_block
*nb
)
1484 struct net_device
*dev
;
1485 struct net_device
*last
;
1490 err
= raw_notifier_chain_register(&netdev_chain
, nb
);
1496 for_each_netdev(net
, dev
) {
1497 err
= call_netdevice_notifier(nb
, NETDEV_REGISTER
, dev
);
1498 err
= notifier_to_errno(err
);
1502 if (!(dev
->flags
& IFF_UP
))
1505 call_netdevice_notifier(nb
, NETDEV_UP
, dev
);
1516 for_each_netdev(net
, dev
) {
1520 if (dev
->flags
& IFF_UP
) {
1521 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1523 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1525 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1530 raw_notifier_chain_unregister(&netdev_chain
, nb
);
1533 EXPORT_SYMBOL(register_netdevice_notifier
);
1536 * unregister_netdevice_notifier - unregister a network notifier block
1539 * Unregister a notifier previously registered by
1540 * register_netdevice_notifier(). The notifier is unlinked into the
1541 * kernel structures and may then be reused. A negative errno code
1542 * is returned on a failure.
1544 * After unregistering unregister and down device events are synthesized
1545 * for all devices on the device list to the removed notifier to remove
1546 * the need for special case cleanup code.
1549 int unregister_netdevice_notifier(struct notifier_block
*nb
)
1551 struct net_device
*dev
;
1556 err
= raw_notifier_chain_unregister(&netdev_chain
, nb
);
1561 for_each_netdev(net
, dev
) {
1562 if (dev
->flags
& IFF_UP
) {
1563 call_netdevice_notifier(nb
, NETDEV_GOING_DOWN
,
1565 call_netdevice_notifier(nb
, NETDEV_DOWN
, dev
);
1567 call_netdevice_notifier(nb
, NETDEV_UNREGISTER
, dev
);
1574 EXPORT_SYMBOL(unregister_netdevice_notifier
);
1577 * call_netdevice_notifiers_info - call all network notifier blocks
1578 * @val: value passed unmodified to notifier function
1579 * @dev: net_device pointer passed unmodified to notifier function
1580 * @info: notifier information data
1582 * Call all network notifier blocks. Parameters and return value
1583 * are as for raw_notifier_call_chain().
1586 static int call_netdevice_notifiers_info(unsigned long val
,
1587 struct net_device
*dev
,
1588 struct netdev_notifier_info
*info
)
1591 netdev_notifier_info_init(info
, dev
);
1592 return raw_notifier_call_chain(&netdev_chain
, val
, info
);
1596 * call_netdevice_notifiers - call all network notifier blocks
1597 * @val: value passed unmodified to notifier function
1598 * @dev: net_device pointer passed unmodified to notifier function
1600 * Call all network notifier blocks. Parameters and return value
1601 * are as for raw_notifier_call_chain().
1604 int call_netdevice_notifiers(unsigned long val
, struct net_device
*dev
)
1606 struct netdev_notifier_info info
;
1608 return call_netdevice_notifiers_info(val
, dev
, &info
);
1610 EXPORT_SYMBOL(call_netdevice_notifiers
);
1612 static struct static_key netstamp_needed __read_mostly
;
1613 #ifdef HAVE_JUMP_LABEL
1614 /* We are not allowed to call static_key_slow_dec() from irq context
1615 * If net_disable_timestamp() is called from irq context, defer the
1616 * static_key_slow_dec() calls.
1618 static atomic_t netstamp_needed_deferred
;
1621 void net_enable_timestamp(void)
1623 #ifdef HAVE_JUMP_LABEL
1624 int deferred
= atomic_xchg(&netstamp_needed_deferred
, 0);
1628 static_key_slow_dec(&netstamp_needed
);
1632 static_key_slow_inc(&netstamp_needed
);
1634 EXPORT_SYMBOL(net_enable_timestamp
);
1636 void net_disable_timestamp(void)
1638 #ifdef HAVE_JUMP_LABEL
1639 if (in_interrupt()) {
1640 atomic_inc(&netstamp_needed_deferred
);
1644 static_key_slow_dec(&netstamp_needed
);
1646 EXPORT_SYMBOL(net_disable_timestamp
);
1648 static inline void net_timestamp_set(struct sk_buff
*skb
)
1650 skb
->tstamp
.tv64
= 0;
1651 if (static_key_false(&netstamp_needed
))
1652 __net_timestamp(skb
);
1655 #define net_timestamp_check(COND, SKB) \
1656 if (static_key_false(&netstamp_needed)) { \
1657 if ((COND) && !(SKB)->tstamp.tv64) \
1658 __net_timestamp(SKB); \
1661 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1665 if (!(dev
->flags
& IFF_UP
))
1668 len
= dev
->mtu
+ dev
->hard_header_len
+ VLAN_HLEN
;
1669 if (skb
->len
<= len
)
1672 /* if TSO is enabled, we don't care about the length as the packet
1673 * could be forwarded without being segmented before
1675 if (skb_is_gso(skb
))
1680 EXPORT_SYMBOL_GPL(is_skb_forwardable
);
1682 int __dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1684 if (skb_shinfo(skb
)->tx_flags
& SKBTX_DEV_ZEROCOPY
) {
1685 if (skb_copy_ubufs(skb
, GFP_ATOMIC
)) {
1686 atomic_long_inc(&dev
->rx_dropped
);
1692 if (unlikely(!is_skb_forwardable(dev
, skb
))) {
1693 atomic_long_inc(&dev
->rx_dropped
);
1698 skb_scrub_packet(skb
, true);
1700 skb
->protocol
= eth_type_trans(skb
, dev
);
1701 skb_postpull_rcsum(skb
, eth_hdr(skb
), ETH_HLEN
);
1705 EXPORT_SYMBOL_GPL(__dev_forward_skb
);
1708 * dev_forward_skb - loopback an skb to another netif
1710 * @dev: destination network device
1711 * @skb: buffer to forward
1714 * NET_RX_SUCCESS (no congestion)
1715 * NET_RX_DROP (packet was dropped, but freed)
1717 * dev_forward_skb can be used for injecting an skb from the
1718 * start_xmit function of one device into the receive queue
1719 * of another device.
1721 * The receiving device may be in another namespace, so
1722 * we have to clear all information in the skb that could
1723 * impact namespace isolation.
1725 int dev_forward_skb(struct net_device
*dev
, struct sk_buff
*skb
)
1727 return __dev_forward_skb(dev
, skb
) ?: netif_rx_internal(skb
);
1729 EXPORT_SYMBOL_GPL(dev_forward_skb
);
1731 static inline int deliver_skb(struct sk_buff
*skb
,
1732 struct packet_type
*pt_prev
,
1733 struct net_device
*orig_dev
)
1735 if (unlikely(skb_orphan_frags(skb
, GFP_ATOMIC
)))
1737 atomic_inc(&skb
->users
);
1738 return pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
1741 static inline void deliver_ptype_list_skb(struct sk_buff
*skb
,
1742 struct packet_type
**pt
,
1743 struct net_device
*dev
, __be16 type
,
1744 struct list_head
*ptype_list
)
1746 struct packet_type
*ptype
, *pt_prev
= *pt
;
1748 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1749 if (ptype
->type
!= type
)
1752 deliver_skb(skb
, pt_prev
, dev
);
1758 static inline bool skb_loop_sk(struct packet_type
*ptype
, struct sk_buff
*skb
)
1760 if (!ptype
->af_packet_priv
|| !skb
->sk
)
1763 if (ptype
->id_match
)
1764 return ptype
->id_match(ptype
, skb
->sk
);
1765 else if ((struct sock
*)ptype
->af_packet_priv
== skb
->sk
)
1772 * Support routine. Sends outgoing frames to any network
1773 * taps currently in use.
1776 static void dev_queue_xmit_nit(struct sk_buff
*skb
, struct net_device
*dev
)
1778 struct packet_type
*ptype
;
1779 struct sk_buff
*skb2
= NULL
;
1780 struct packet_type
*pt_prev
= NULL
;
1781 struct list_head
*ptype_list
= &ptype_all
;
1785 list_for_each_entry_rcu(ptype
, ptype_list
, list
) {
1786 /* Never send packets back to the socket
1787 * they originated from - MvS (miquels@drinkel.ow.org)
1789 if (skb_loop_sk(ptype
, skb
))
1793 deliver_skb(skb2
, pt_prev
, skb
->dev
);
1798 /* need to clone skb, done only once */
1799 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1803 net_timestamp_set(skb2
);
1805 /* skb->nh should be correctly
1806 * set by sender, so that the second statement is
1807 * just protection against buggy protocols.
1809 skb_reset_mac_header(skb2
);
1811 if (skb_network_header(skb2
) < skb2
->data
||
1812 skb_network_header(skb2
) > skb_tail_pointer(skb2
)) {
1813 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1814 ntohs(skb2
->protocol
),
1816 skb_reset_network_header(skb2
);
1819 skb2
->transport_header
= skb2
->network_header
;
1820 skb2
->pkt_type
= PACKET_OUTGOING
;
1824 if (ptype_list
== &ptype_all
) {
1825 ptype_list
= &dev
->ptype_all
;
1830 pt_prev
->func(skb2
, skb
->dev
, pt_prev
, skb
->dev
);
1835 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1836 * @dev: Network device
1837 * @txq: number of queues available
1839 * If real_num_tx_queues is changed the tc mappings may no longer be
1840 * valid. To resolve this verify the tc mapping remains valid and if
1841 * not NULL the mapping. With no priorities mapping to this
1842 * offset/count pair it will no longer be used. In the worst case TC0
1843 * is invalid nothing can be done so disable priority mappings. If is
1844 * expected that drivers will fix this mapping if they can before
1845 * calling netif_set_real_num_tx_queues.
1847 static void netif_setup_tc(struct net_device
*dev
, unsigned int txq
)
1850 struct netdev_tc_txq
*tc
= &dev
->tc_to_txq
[0];
1852 /* If TC0 is invalidated disable TC mapping */
1853 if (tc
->offset
+ tc
->count
> txq
) {
1854 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1859 /* Invalidated prio to tc mappings set to TC0 */
1860 for (i
= 1; i
< TC_BITMASK
+ 1; i
++) {
1861 int q
= netdev_get_prio_tc_map(dev
, i
);
1863 tc
= &dev
->tc_to_txq
[q
];
1864 if (tc
->offset
+ tc
->count
> txq
) {
1865 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1867 netdev_set_prio_tc_map(dev
, i
, 0);
1873 static DEFINE_MUTEX(xps_map_mutex
);
1874 #define xmap_dereference(P) \
1875 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1877 static struct xps_map
*remove_xps_queue(struct xps_dev_maps
*dev_maps
,
1880 struct xps_map
*map
= NULL
;
1884 map
= xmap_dereference(dev_maps
->cpu_map
[cpu
]);
1886 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
1887 if (map
->queues
[pos
] == index
) {
1889 map
->queues
[pos
] = map
->queues
[--map
->len
];
1891 RCU_INIT_POINTER(dev_maps
->cpu_map
[cpu
], NULL
);
1892 kfree_rcu(map
, rcu
);
1902 static void netif_reset_xps_queues_gt(struct net_device
*dev
, u16 index
)
1904 struct xps_dev_maps
*dev_maps
;
1906 bool active
= false;
1908 mutex_lock(&xps_map_mutex
);
1909 dev_maps
= xmap_dereference(dev
->xps_maps
);
1914 for_each_possible_cpu(cpu
) {
1915 for (i
= index
; i
< dev
->num_tx_queues
; i
++) {
1916 if (!remove_xps_queue(dev_maps
, cpu
, i
))
1919 if (i
== dev
->num_tx_queues
)
1924 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
1925 kfree_rcu(dev_maps
, rcu
);
1928 for (i
= index
; i
< dev
->num_tx_queues
; i
++)
1929 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, i
),
1933 mutex_unlock(&xps_map_mutex
);
1936 static struct xps_map
*expand_xps_map(struct xps_map
*map
,
1939 struct xps_map
*new_map
;
1940 int alloc_len
= XPS_MIN_MAP_ALLOC
;
1943 for (pos
= 0; map
&& pos
< map
->len
; pos
++) {
1944 if (map
->queues
[pos
] != index
)
1949 /* Need to add queue to this CPU's existing map */
1951 if (pos
< map
->alloc_len
)
1954 alloc_len
= map
->alloc_len
* 2;
1957 /* Need to allocate new map to store queue on this CPU's map */
1958 new_map
= kzalloc_node(XPS_MAP_SIZE(alloc_len
), GFP_KERNEL
,
1963 for (i
= 0; i
< pos
; i
++)
1964 new_map
->queues
[i
] = map
->queues
[i
];
1965 new_map
->alloc_len
= alloc_len
;
1971 int netif_set_xps_queue(struct net_device
*dev
, const struct cpumask
*mask
,
1974 struct xps_dev_maps
*dev_maps
, *new_dev_maps
= NULL
;
1975 struct xps_map
*map
, *new_map
;
1976 int maps_sz
= max_t(unsigned int, XPS_DEV_MAPS_SIZE
, L1_CACHE_BYTES
);
1977 int cpu
, numa_node_id
= -2;
1978 bool active
= false;
1980 mutex_lock(&xps_map_mutex
);
1982 dev_maps
= xmap_dereference(dev
->xps_maps
);
1984 /* allocate memory for queue storage */
1985 for_each_online_cpu(cpu
) {
1986 if (!cpumask_test_cpu(cpu
, mask
))
1990 new_dev_maps
= kzalloc(maps_sz
, GFP_KERNEL
);
1991 if (!new_dev_maps
) {
1992 mutex_unlock(&xps_map_mutex
);
1996 map
= dev_maps
? xmap_dereference(dev_maps
->cpu_map
[cpu
]) :
1999 map
= expand_xps_map(map
, cpu
, index
);
2003 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[cpu
], map
);
2007 goto out_no_new_maps
;
2009 for_each_possible_cpu(cpu
) {
2010 if (cpumask_test_cpu(cpu
, mask
) && cpu_online(cpu
)) {
2011 /* add queue to CPU maps */
2014 map
= xmap_dereference(new_dev_maps
->cpu_map
[cpu
]);
2015 while ((pos
< map
->len
) && (map
->queues
[pos
] != index
))
2018 if (pos
== map
->len
)
2019 map
->queues
[map
->len
++] = index
;
2021 if (numa_node_id
== -2)
2022 numa_node_id
= cpu_to_node(cpu
);
2023 else if (numa_node_id
!= cpu_to_node(cpu
))
2026 } else if (dev_maps
) {
2027 /* fill in the new device map from the old device map */
2028 map
= xmap_dereference(dev_maps
->cpu_map
[cpu
]);
2029 RCU_INIT_POINTER(new_dev_maps
->cpu_map
[cpu
], map
);
2034 rcu_assign_pointer(dev
->xps_maps
, new_dev_maps
);
2036 /* Cleanup old maps */
2038 for_each_possible_cpu(cpu
) {
2039 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[cpu
]);
2040 map
= xmap_dereference(dev_maps
->cpu_map
[cpu
]);
2041 if (map
&& map
!= new_map
)
2042 kfree_rcu(map
, rcu
);
2045 kfree_rcu(dev_maps
, rcu
);
2048 dev_maps
= new_dev_maps
;
2052 /* update Tx queue numa node */
2053 netdev_queue_numa_node_write(netdev_get_tx_queue(dev
, index
),
2054 (numa_node_id
>= 0) ? numa_node_id
:
2060 /* removes queue from unused CPUs */
2061 for_each_possible_cpu(cpu
) {
2062 if (cpumask_test_cpu(cpu
, mask
) && cpu_online(cpu
))
2065 if (remove_xps_queue(dev_maps
, cpu
, index
))
2069 /* free map if not active */
2071 RCU_INIT_POINTER(dev
->xps_maps
, NULL
);
2072 kfree_rcu(dev_maps
, rcu
);
2076 mutex_unlock(&xps_map_mutex
);
2080 /* remove any maps that we added */
2081 for_each_possible_cpu(cpu
) {
2082 new_map
= xmap_dereference(new_dev_maps
->cpu_map
[cpu
]);
2083 map
= dev_maps
? xmap_dereference(dev_maps
->cpu_map
[cpu
]) :
2085 if (new_map
&& new_map
!= map
)
2089 mutex_unlock(&xps_map_mutex
);
2091 kfree(new_dev_maps
);
2094 EXPORT_SYMBOL(netif_set_xps_queue
);
2098 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2099 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2101 int netif_set_real_num_tx_queues(struct net_device
*dev
, unsigned int txq
)
2105 if (txq
< 1 || txq
> dev
->num_tx_queues
)
2108 if (dev
->reg_state
== NETREG_REGISTERED
||
2109 dev
->reg_state
== NETREG_UNREGISTERING
) {
2112 rc
= netdev_queue_update_kobjects(dev
, dev
->real_num_tx_queues
,
2118 netif_setup_tc(dev
, txq
);
2120 if (txq
< dev
->real_num_tx_queues
) {
2121 qdisc_reset_all_tx_gt(dev
, txq
);
2123 netif_reset_xps_queues_gt(dev
, txq
);
2128 dev
->real_num_tx_queues
= txq
;
2131 EXPORT_SYMBOL(netif_set_real_num_tx_queues
);
2135 * netif_set_real_num_rx_queues - set actual number of RX queues used
2136 * @dev: Network device
2137 * @rxq: Actual number of RX queues
2139 * This must be called either with the rtnl_lock held or before
2140 * registration of the net device. Returns 0 on success, or a
2141 * negative error code. If called before registration, it always
2144 int netif_set_real_num_rx_queues(struct net_device
*dev
, unsigned int rxq
)
2148 if (rxq
< 1 || rxq
> dev
->num_rx_queues
)
2151 if (dev
->reg_state
== NETREG_REGISTERED
) {
2154 rc
= net_rx_queue_update_kobjects(dev
, dev
->real_num_rx_queues
,
2160 dev
->real_num_rx_queues
= rxq
;
2163 EXPORT_SYMBOL(netif_set_real_num_rx_queues
);
2167 * netif_get_num_default_rss_queues - default number of RSS queues
2169 * This routine should set an upper limit on the number of RSS queues
2170 * used by default by multiqueue devices.
2172 int netif_get_num_default_rss_queues(void)
2174 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES
, num_online_cpus());
2176 EXPORT_SYMBOL(netif_get_num_default_rss_queues
);
2178 static inline void __netif_reschedule(struct Qdisc
*q
)
2180 struct softnet_data
*sd
;
2181 unsigned long flags
;
2183 local_irq_save(flags
);
2184 sd
= this_cpu_ptr(&softnet_data
);
2185 q
->next_sched
= NULL
;
2186 *sd
->output_queue_tailp
= q
;
2187 sd
->output_queue_tailp
= &q
->next_sched
;
2188 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2189 local_irq_restore(flags
);
2192 void __netif_schedule(struct Qdisc
*q
)
2194 if (!test_and_set_bit(__QDISC_STATE_SCHED
, &q
->state
))
2195 __netif_reschedule(q
);
2197 EXPORT_SYMBOL(__netif_schedule
);
2199 struct dev_kfree_skb_cb
{
2200 enum skb_free_reason reason
;
2203 static struct dev_kfree_skb_cb
*get_kfree_skb_cb(const struct sk_buff
*skb
)
2205 return (struct dev_kfree_skb_cb
*)skb
->cb
;
2208 void netif_schedule_queue(struct netdev_queue
*txq
)
2211 if (!(txq
->state
& QUEUE_STATE_ANY_XOFF
)) {
2212 struct Qdisc
*q
= rcu_dereference(txq
->qdisc
);
2214 __netif_schedule(q
);
2218 EXPORT_SYMBOL(netif_schedule_queue
);
2221 * netif_wake_subqueue - allow sending packets on subqueue
2222 * @dev: network device
2223 * @queue_index: sub queue index
2225 * Resume individual transmit queue of a device with multiple transmit queues.
2227 void netif_wake_subqueue(struct net_device
*dev
, u16 queue_index
)
2229 struct netdev_queue
*txq
= netdev_get_tx_queue(dev
, queue_index
);
2231 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &txq
->state
)) {
2235 q
= rcu_dereference(txq
->qdisc
);
2236 __netif_schedule(q
);
2240 EXPORT_SYMBOL(netif_wake_subqueue
);
2242 void netif_tx_wake_queue(struct netdev_queue
*dev_queue
)
2244 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF
, &dev_queue
->state
)) {
2248 q
= rcu_dereference(dev_queue
->qdisc
);
2249 __netif_schedule(q
);
2253 EXPORT_SYMBOL(netif_tx_wake_queue
);
2255 void __dev_kfree_skb_irq(struct sk_buff
*skb
, enum skb_free_reason reason
)
2257 unsigned long flags
;
2259 if (likely(atomic_read(&skb
->users
) == 1)) {
2261 atomic_set(&skb
->users
, 0);
2262 } else if (likely(!atomic_dec_and_test(&skb
->users
))) {
2265 get_kfree_skb_cb(skb
)->reason
= reason
;
2266 local_irq_save(flags
);
2267 skb
->next
= __this_cpu_read(softnet_data
.completion_queue
);
2268 __this_cpu_write(softnet_data
.completion_queue
, skb
);
2269 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
2270 local_irq_restore(flags
);
2272 EXPORT_SYMBOL(__dev_kfree_skb_irq
);
2274 void __dev_kfree_skb_any(struct sk_buff
*skb
, enum skb_free_reason reason
)
2276 if (in_irq() || irqs_disabled())
2277 __dev_kfree_skb_irq(skb
, reason
);
2281 EXPORT_SYMBOL(__dev_kfree_skb_any
);
2285 * netif_device_detach - mark device as removed
2286 * @dev: network device
2288 * Mark device as removed from system and therefore no longer available.
2290 void netif_device_detach(struct net_device
*dev
)
2292 if (test_and_clear_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2293 netif_running(dev
)) {
2294 netif_tx_stop_all_queues(dev
);
2297 EXPORT_SYMBOL(netif_device_detach
);
2300 * netif_device_attach - mark device as attached
2301 * @dev: network device
2303 * Mark device as attached from system and restart if needed.
2305 void netif_device_attach(struct net_device
*dev
)
2307 if (!test_and_set_bit(__LINK_STATE_PRESENT
, &dev
->state
) &&
2308 netif_running(dev
)) {
2309 netif_tx_wake_all_queues(dev
);
2310 __netdev_watchdog_up(dev
);
2313 EXPORT_SYMBOL(netif_device_attach
);
2315 static void skb_warn_bad_offload(const struct sk_buff
*skb
)
2317 static const netdev_features_t null_features
= 0;
2318 struct net_device
*dev
= skb
->dev
;
2319 const char *driver
= "";
2321 if (!net_ratelimit())
2324 if (dev
&& dev
->dev
.parent
)
2325 driver
= dev_driver_string(dev
->dev
.parent
);
2327 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2328 "gso_type=%d ip_summed=%d\n",
2329 driver
, dev
? &dev
->features
: &null_features
,
2330 skb
->sk
? &skb
->sk
->sk_route_caps
: &null_features
,
2331 skb
->len
, skb
->data_len
, skb_shinfo(skb
)->gso_size
,
2332 skb_shinfo(skb
)->gso_type
, skb
->ip_summed
);
2336 * Invalidate hardware checksum when packet is to be mangled, and
2337 * complete checksum manually on outgoing path.
2339 int skb_checksum_help(struct sk_buff
*skb
)
2342 int ret
= 0, offset
;
2344 if (skb
->ip_summed
== CHECKSUM_COMPLETE
)
2345 goto out_set_summed
;
2347 if (unlikely(skb_shinfo(skb
)->gso_size
)) {
2348 skb_warn_bad_offload(skb
);
2352 /* Before computing a checksum, we should make sure no frag could
2353 * be modified by an external entity : checksum could be wrong.
2355 if (skb_has_shared_frag(skb
)) {
2356 ret
= __skb_linearize(skb
);
2361 offset
= skb_checksum_start_offset(skb
);
2362 BUG_ON(offset
>= skb_headlen(skb
));
2363 csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
2365 offset
+= skb
->csum_offset
;
2366 BUG_ON(offset
+ sizeof(__sum16
) > skb_headlen(skb
));
2368 if (skb_cloned(skb
) &&
2369 !skb_clone_writable(skb
, offset
+ sizeof(__sum16
))) {
2370 ret
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
2375 *(__sum16
*)(skb
->data
+ offset
) = csum_fold(csum
);
2377 skb
->ip_summed
= CHECKSUM_NONE
;
2381 EXPORT_SYMBOL(skb_checksum_help
);
2383 __be16
skb_network_protocol(struct sk_buff
*skb
, int *depth
)
2385 __be16 type
= skb
->protocol
;
2387 /* Tunnel gso handlers can set protocol to ethernet. */
2388 if (type
== htons(ETH_P_TEB
)) {
2391 if (unlikely(!pskb_may_pull(skb
, sizeof(struct ethhdr
))))
2394 eth
= (struct ethhdr
*)skb_mac_header(skb
);
2395 type
= eth
->h_proto
;
2398 return __vlan_get_protocol(skb
, type
, depth
);
2402 * skb_mac_gso_segment - mac layer segmentation handler.
2403 * @skb: buffer to segment
2404 * @features: features for the output path (see dev->features)
2406 struct sk_buff
*skb_mac_gso_segment(struct sk_buff
*skb
,
2407 netdev_features_t features
)
2409 struct sk_buff
*segs
= ERR_PTR(-EPROTONOSUPPORT
);
2410 struct packet_offload
*ptype
;
2411 int vlan_depth
= skb
->mac_len
;
2412 __be16 type
= skb_network_protocol(skb
, &vlan_depth
);
2414 if (unlikely(!type
))
2415 return ERR_PTR(-EINVAL
);
2417 __skb_pull(skb
, vlan_depth
);
2420 list_for_each_entry_rcu(ptype
, &offload_base
, list
) {
2421 if (ptype
->type
== type
&& ptype
->callbacks
.gso_segment
) {
2422 segs
= ptype
->callbacks
.gso_segment(skb
, features
);
2428 __skb_push(skb
, skb
->data
- skb_mac_header(skb
));
2432 EXPORT_SYMBOL(skb_mac_gso_segment
);
2435 /* openvswitch calls this on rx path, so we need a different check.
2437 static inline bool skb_needs_check(struct sk_buff
*skb
, bool tx_path
)
2440 return skb
->ip_summed
!= CHECKSUM_PARTIAL
;
2442 return skb
->ip_summed
== CHECKSUM_NONE
;
2446 * __skb_gso_segment - Perform segmentation on skb.
2447 * @skb: buffer to segment
2448 * @features: features for the output path (see dev->features)
2449 * @tx_path: whether it is called in TX path
2451 * This function segments the given skb and returns a list of segments.
2453 * It may return NULL if the skb requires no segmentation. This is
2454 * only possible when GSO is used for verifying header integrity.
2456 struct sk_buff
*__skb_gso_segment(struct sk_buff
*skb
,
2457 netdev_features_t features
, bool tx_path
)
2459 if (unlikely(skb_needs_check(skb
, tx_path
))) {
2462 skb_warn_bad_offload(skb
);
2464 err
= skb_cow_head(skb
, 0);
2466 return ERR_PTR(err
);
2469 SKB_GSO_CB(skb
)->mac_offset
= skb_headroom(skb
);
2470 SKB_GSO_CB(skb
)->encap_level
= 0;
2472 skb_reset_mac_header(skb
);
2473 skb_reset_mac_len(skb
);
2475 return skb_mac_gso_segment(skb
, features
);
2477 EXPORT_SYMBOL(__skb_gso_segment
);
2479 /* Take action when hardware reception checksum errors are detected. */
2481 void netdev_rx_csum_fault(struct net_device
*dev
)
2483 if (net_ratelimit()) {
2484 pr_err("%s: hw csum failure\n", dev
? dev
->name
: "<unknown>");
2488 EXPORT_SYMBOL(netdev_rx_csum_fault
);
2491 /* Actually, we should eliminate this check as soon as we know, that:
2492 * 1. IOMMU is present and allows to map all the memory.
2493 * 2. No high memory really exists on this machine.
2496 static int illegal_highdma(struct net_device
*dev
, struct sk_buff
*skb
)
2498 #ifdef CONFIG_HIGHMEM
2500 if (!(dev
->features
& NETIF_F_HIGHDMA
)) {
2501 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2502 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2503 if (PageHighMem(skb_frag_page(frag
)))
2508 if (PCI_DMA_BUS_IS_PHYS
) {
2509 struct device
*pdev
= dev
->dev
.parent
;
2513 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2514 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2515 dma_addr_t addr
= page_to_phys(skb_frag_page(frag
));
2516 if (!pdev
->dma_mask
|| addr
+ PAGE_SIZE
- 1 > *pdev
->dma_mask
)
2524 /* If MPLS offload request, verify we are testing hardware MPLS features
2525 * instead of standard features for the netdev.
2527 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2528 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2529 netdev_features_t features
,
2532 if (eth_p_mpls(type
))
2533 features
&= skb
->dev
->mpls_features
;
2538 static netdev_features_t
net_mpls_features(struct sk_buff
*skb
,
2539 netdev_features_t features
,
2546 static netdev_features_t
harmonize_features(struct sk_buff
*skb
,
2547 netdev_features_t features
)
2552 type
= skb_network_protocol(skb
, &tmp
);
2553 features
= net_mpls_features(skb
, features
, type
);
2555 if (skb
->ip_summed
!= CHECKSUM_NONE
&&
2556 !can_checksum_protocol(features
, type
)) {
2557 features
&= ~NETIF_F_ALL_CSUM
;
2558 } else if (illegal_highdma(skb
->dev
, skb
)) {
2559 features
&= ~NETIF_F_SG
;
2565 netdev_features_t
netif_skb_features(struct sk_buff
*skb
)
2567 struct net_device
*dev
= skb
->dev
;
2568 netdev_features_t features
= dev
->features
;
2569 u16 gso_segs
= skb_shinfo(skb
)->gso_segs
;
2570 __be16 protocol
= skb
->protocol
;
2572 if (gso_segs
> dev
->gso_max_segs
|| gso_segs
< dev
->gso_min_segs
)
2573 features
&= ~NETIF_F_GSO_MASK
;
2575 /* If encapsulation offload request, verify we are testing
2576 * hardware encapsulation features instead of standard
2577 * features for the netdev
2579 if (skb
->encapsulation
)
2580 features
&= dev
->hw_enc_features
;
2582 if (!skb_vlan_tag_present(skb
)) {
2583 if (unlikely(protocol
== htons(ETH_P_8021Q
) ||
2584 protocol
== htons(ETH_P_8021AD
))) {
2585 struct vlan_ethhdr
*veh
= (struct vlan_ethhdr
*)skb
->data
;
2586 protocol
= veh
->h_vlan_encapsulated_proto
;
2592 features
= netdev_intersect_features(features
,
2593 dev
->vlan_features
|
2594 NETIF_F_HW_VLAN_CTAG_TX
|
2595 NETIF_F_HW_VLAN_STAG_TX
);
2597 if (protocol
== htons(ETH_P_8021Q
) || protocol
== htons(ETH_P_8021AD
))
2598 features
= netdev_intersect_features(features
,
2603 NETIF_F_HW_VLAN_CTAG_TX
|
2604 NETIF_F_HW_VLAN_STAG_TX
);
2607 if (dev
->netdev_ops
->ndo_features_check
)
2608 features
&= dev
->netdev_ops
->ndo_features_check(skb
, dev
,
2611 return harmonize_features(skb
, features
);
2613 EXPORT_SYMBOL(netif_skb_features
);
2615 static int xmit_one(struct sk_buff
*skb
, struct net_device
*dev
,
2616 struct netdev_queue
*txq
, bool more
)
2621 if (!list_empty(&ptype_all
) || !list_empty(&dev
->ptype_all
))
2622 dev_queue_xmit_nit(skb
, dev
);
2625 trace_net_dev_start_xmit(skb
, dev
);
2626 rc
= netdev_start_xmit(skb
, dev
, txq
, more
);
2627 trace_net_dev_xmit(skb
, rc
, dev
, len
);
2632 struct sk_buff
*dev_hard_start_xmit(struct sk_buff
*first
, struct net_device
*dev
,
2633 struct netdev_queue
*txq
, int *ret
)
2635 struct sk_buff
*skb
= first
;
2636 int rc
= NETDEV_TX_OK
;
2639 struct sk_buff
*next
= skb
->next
;
2642 rc
= xmit_one(skb
, dev
, txq
, next
!= NULL
);
2643 if (unlikely(!dev_xmit_complete(rc
))) {
2649 if (netif_xmit_stopped(txq
) && skb
) {
2650 rc
= NETDEV_TX_BUSY
;
2660 static struct sk_buff
*validate_xmit_vlan(struct sk_buff
*skb
,
2661 netdev_features_t features
)
2663 if (skb_vlan_tag_present(skb
) &&
2664 !vlan_hw_offload_capable(features
, skb
->vlan_proto
))
2665 skb
= __vlan_hwaccel_push_inside(skb
);
2669 static struct sk_buff
*validate_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
)
2671 netdev_features_t features
;
2676 features
= netif_skb_features(skb
);
2677 skb
= validate_xmit_vlan(skb
, features
);
2681 if (netif_needs_gso(dev
, skb
, features
)) {
2682 struct sk_buff
*segs
;
2684 segs
= skb_gso_segment(skb
, features
);
2692 if (skb_needs_linearize(skb
, features
) &&
2693 __skb_linearize(skb
))
2696 /* If packet is not checksummed and device does not
2697 * support checksumming for this protocol, complete
2698 * checksumming here.
2700 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2701 if (skb
->encapsulation
)
2702 skb_set_inner_transport_header(skb
,
2703 skb_checksum_start_offset(skb
));
2705 skb_set_transport_header(skb
,
2706 skb_checksum_start_offset(skb
));
2707 if (!(features
& NETIF_F_ALL_CSUM
) &&
2708 skb_checksum_help(skb
))
2721 struct sk_buff
*validate_xmit_skb_list(struct sk_buff
*skb
, struct net_device
*dev
)
2723 struct sk_buff
*next
, *head
= NULL
, *tail
;
2725 for (; skb
!= NULL
; skb
= next
) {
2729 /* in case skb wont be segmented, point to itself */
2732 skb
= validate_xmit_skb(skb
, dev
);
2740 /* If skb was segmented, skb->prev points to
2741 * the last segment. If not, it still contains skb.
2748 static void qdisc_pkt_len_init(struct sk_buff
*skb
)
2750 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
2752 qdisc_skb_cb(skb
)->pkt_len
= skb
->len
;
2754 /* To get more precise estimation of bytes sent on wire,
2755 * we add to pkt_len the headers size of all segments
2757 if (shinfo
->gso_size
) {
2758 unsigned int hdr_len
;
2759 u16 gso_segs
= shinfo
->gso_segs
;
2761 /* mac layer + network layer */
2762 hdr_len
= skb_transport_header(skb
) - skb_mac_header(skb
);
2764 /* + transport layer */
2765 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)))
2766 hdr_len
+= tcp_hdrlen(skb
);
2768 hdr_len
+= sizeof(struct udphdr
);
2770 if (shinfo
->gso_type
& SKB_GSO_DODGY
)
2771 gso_segs
= DIV_ROUND_UP(skb
->len
- hdr_len
,
2774 qdisc_skb_cb(skb
)->pkt_len
+= (gso_segs
- 1) * hdr_len
;
2778 static inline int __dev_xmit_skb(struct sk_buff
*skb
, struct Qdisc
*q
,
2779 struct net_device
*dev
,
2780 struct netdev_queue
*txq
)
2782 spinlock_t
*root_lock
= qdisc_lock(q
);
2786 qdisc_pkt_len_init(skb
);
2787 qdisc_calculate_pkt_len(skb
, q
);
2789 * Heuristic to force contended enqueues to serialize on a
2790 * separate lock before trying to get qdisc main lock.
2791 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2792 * often and dequeue packets faster.
2794 contended
= qdisc_is_running(q
);
2795 if (unlikely(contended
))
2796 spin_lock(&q
->busylock
);
2798 spin_lock(root_lock
);
2799 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
))) {
2802 } else if ((q
->flags
& TCQ_F_CAN_BYPASS
) && !qdisc_qlen(q
) &&
2803 qdisc_run_begin(q
)) {
2805 * This is a work-conserving queue; there are no old skbs
2806 * waiting to be sent out; and the qdisc is not running -
2807 * xmit the skb directly.
2810 qdisc_bstats_update(q
, skb
);
2812 if (sch_direct_xmit(skb
, q
, dev
, txq
, root_lock
, true)) {
2813 if (unlikely(contended
)) {
2814 spin_unlock(&q
->busylock
);
2821 rc
= NET_XMIT_SUCCESS
;
2823 rc
= q
->enqueue(skb
, q
) & NET_XMIT_MASK
;
2824 if (qdisc_run_begin(q
)) {
2825 if (unlikely(contended
)) {
2826 spin_unlock(&q
->busylock
);
2832 spin_unlock(root_lock
);
2833 if (unlikely(contended
))
2834 spin_unlock(&q
->busylock
);
2838 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2839 static void skb_update_prio(struct sk_buff
*skb
)
2841 struct netprio_map
*map
= rcu_dereference_bh(skb
->dev
->priomap
);
2843 if (!skb
->priority
&& skb
->sk
&& map
) {
2844 unsigned int prioidx
= skb
->sk
->sk_cgrp_prioidx
;
2846 if (prioidx
< map
->priomap_len
)
2847 skb
->priority
= map
->priomap
[prioidx
];
2851 #define skb_update_prio(skb)
2854 static DEFINE_PER_CPU(int, xmit_recursion
);
2855 #define RECURSION_LIMIT 10
2858 * dev_loopback_xmit - loop back @skb
2859 * @skb: buffer to transmit
2861 int dev_loopback_xmit(struct sk_buff
*skb
)
2863 skb_reset_mac_header(skb
);
2864 __skb_pull(skb
, skb_network_offset(skb
));
2865 skb
->pkt_type
= PACKET_LOOPBACK
;
2866 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2867 WARN_ON(!skb_dst(skb
));
2872 EXPORT_SYMBOL(dev_loopback_xmit
);
2875 * __dev_queue_xmit - transmit a buffer
2876 * @skb: buffer to transmit
2877 * @accel_priv: private data used for L2 forwarding offload
2879 * Queue a buffer for transmission to a network device. The caller must
2880 * have set the device and priority and built the buffer before calling
2881 * this function. The function can be called from an interrupt.
2883 * A negative errno code is returned on a failure. A success does not
2884 * guarantee the frame will be transmitted as it may be dropped due
2885 * to congestion or traffic shaping.
2887 * -----------------------------------------------------------------------------------
2888 * I notice this method can also return errors from the queue disciplines,
2889 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2892 * Regardless of the return value, the skb is consumed, so it is currently
2893 * difficult to retry a send to this method. (You can bump the ref count
2894 * before sending to hold a reference for retry if you are careful.)
2896 * When calling this method, interrupts MUST be enabled. This is because
2897 * the BH enable code must have IRQs enabled so that it will not deadlock.
2900 static int __dev_queue_xmit(struct sk_buff
*skb
, void *accel_priv
)
2902 struct net_device
*dev
= skb
->dev
;
2903 struct netdev_queue
*txq
;
2907 skb_reset_mac_header(skb
);
2909 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_SCHED_TSTAMP
))
2910 __skb_tstamp_tx(skb
, NULL
, skb
->sk
, SCM_TSTAMP_SCHED
);
2912 /* Disable soft irqs for various locks below. Also
2913 * stops preemption for RCU.
2917 skb_update_prio(skb
);
2919 /* If device/qdisc don't need skb->dst, release it right now while
2920 * its hot in this cpu cache.
2922 if (dev
->priv_flags
& IFF_XMIT_DST_RELEASE
)
2927 txq
= netdev_pick_tx(dev
, skb
, accel_priv
);
2928 q
= rcu_dereference_bh(txq
->qdisc
);
2930 #ifdef CONFIG_NET_CLS_ACT
2931 skb
->tc_verd
= SET_TC_AT(skb
->tc_verd
, AT_EGRESS
);
2933 trace_net_dev_queue(skb
);
2935 rc
= __dev_xmit_skb(skb
, q
, dev
, txq
);
2939 /* The device has no queue. Common case for software devices:
2940 loopback, all the sorts of tunnels...
2942 Really, it is unlikely that netif_tx_lock protection is necessary
2943 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2945 However, it is possible, that they rely on protection
2948 Check this and shot the lock. It is not prone from deadlocks.
2949 Either shot noqueue qdisc, it is even simpler 8)
2951 if (dev
->flags
& IFF_UP
) {
2952 int cpu
= smp_processor_id(); /* ok because BHs are off */
2954 if (txq
->xmit_lock_owner
!= cpu
) {
2956 if (__this_cpu_read(xmit_recursion
) > RECURSION_LIMIT
)
2957 goto recursion_alert
;
2959 skb
= validate_xmit_skb(skb
, dev
);
2963 HARD_TX_LOCK(dev
, txq
, cpu
);
2965 if (!netif_xmit_stopped(txq
)) {
2966 __this_cpu_inc(xmit_recursion
);
2967 skb
= dev_hard_start_xmit(skb
, dev
, txq
, &rc
);
2968 __this_cpu_dec(xmit_recursion
);
2969 if (dev_xmit_complete(rc
)) {
2970 HARD_TX_UNLOCK(dev
, txq
);
2974 HARD_TX_UNLOCK(dev
, txq
);
2975 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2978 /* Recursion is detected! It is possible,
2982 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2989 rcu_read_unlock_bh();
2991 atomic_long_inc(&dev
->tx_dropped
);
2992 kfree_skb_list(skb
);
2995 rcu_read_unlock_bh();
2999 int dev_queue_xmit(struct sk_buff
*skb
)
3001 return __dev_queue_xmit(skb
, NULL
);
3003 EXPORT_SYMBOL(dev_queue_xmit
);
3005 int dev_queue_xmit_accel(struct sk_buff
*skb
, void *accel_priv
)
3007 return __dev_queue_xmit(skb
, accel_priv
);
3009 EXPORT_SYMBOL(dev_queue_xmit_accel
);
3012 /*=======================================================================
3014 =======================================================================*/
3016 int netdev_max_backlog __read_mostly
= 1000;
3017 EXPORT_SYMBOL(netdev_max_backlog
);
3019 int netdev_tstamp_prequeue __read_mostly
= 1;
3020 int netdev_budget __read_mostly
= 300;
3021 int weight_p __read_mostly
= 64; /* old backlog weight */
3023 /* Called with irq disabled */
3024 static inline void ____napi_schedule(struct softnet_data
*sd
,
3025 struct napi_struct
*napi
)
3027 list_add_tail(&napi
->poll_list
, &sd
->poll_list
);
3028 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3033 /* One global table that all flow-based protocols share. */
3034 struct rps_sock_flow_table __rcu
*rps_sock_flow_table __read_mostly
;
3035 EXPORT_SYMBOL(rps_sock_flow_table
);
3036 u32 rps_cpu_mask __read_mostly
;
3037 EXPORT_SYMBOL(rps_cpu_mask
);
3039 struct static_key rps_needed __read_mostly
;
3041 static struct rps_dev_flow
*
3042 set_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3043 struct rps_dev_flow
*rflow
, u16 next_cpu
)
3045 if (next_cpu
!= RPS_NO_CPU
) {
3046 #ifdef CONFIG_RFS_ACCEL
3047 struct netdev_rx_queue
*rxqueue
;
3048 struct rps_dev_flow_table
*flow_table
;
3049 struct rps_dev_flow
*old_rflow
;
3054 /* Should we steer this flow to a different hardware queue? */
3055 if (!skb_rx_queue_recorded(skb
) || !dev
->rx_cpu_rmap
||
3056 !(dev
->features
& NETIF_F_NTUPLE
))
3058 rxq_index
= cpu_rmap_lookup_index(dev
->rx_cpu_rmap
, next_cpu
);
3059 if (rxq_index
== skb_get_rx_queue(skb
))
3062 rxqueue
= dev
->_rx
+ rxq_index
;
3063 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3066 flow_id
= skb_get_hash(skb
) & flow_table
->mask
;
3067 rc
= dev
->netdev_ops
->ndo_rx_flow_steer(dev
, skb
,
3068 rxq_index
, flow_id
);
3072 rflow
= &flow_table
->flows
[flow_id
];
3074 if (old_rflow
->filter
== rflow
->filter
)
3075 old_rflow
->filter
= RPS_NO_FILTER
;
3079 per_cpu(softnet_data
, next_cpu
).input_queue_head
;
3082 rflow
->cpu
= next_cpu
;
3087 * get_rps_cpu is called from netif_receive_skb and returns the target
3088 * CPU from the RPS map of the receiving queue for a given skb.
3089 * rcu_read_lock must be held on entry.
3091 static int get_rps_cpu(struct net_device
*dev
, struct sk_buff
*skb
,
3092 struct rps_dev_flow
**rflowp
)
3094 const struct rps_sock_flow_table
*sock_flow_table
;
3095 struct netdev_rx_queue
*rxqueue
= dev
->_rx
;
3096 struct rps_dev_flow_table
*flow_table
;
3097 struct rps_map
*map
;
3102 if (skb_rx_queue_recorded(skb
)) {
3103 u16 index
= skb_get_rx_queue(skb
);
3105 if (unlikely(index
>= dev
->real_num_rx_queues
)) {
3106 WARN_ONCE(dev
->real_num_rx_queues
> 1,
3107 "%s received packet on queue %u, but number "
3108 "of RX queues is %u\n",
3109 dev
->name
, index
, dev
->real_num_rx_queues
);
3115 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3117 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3118 map
= rcu_dereference(rxqueue
->rps_map
);
3119 if (!flow_table
&& !map
)
3122 skb_reset_network_header(skb
);
3123 hash
= skb_get_hash(skb
);
3127 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
3128 if (flow_table
&& sock_flow_table
) {
3129 struct rps_dev_flow
*rflow
;
3133 /* First check into global flow table if there is a match */
3134 ident
= sock_flow_table
->ents
[hash
& sock_flow_table
->mask
];
3135 if ((ident
^ hash
) & ~rps_cpu_mask
)
3138 next_cpu
= ident
& rps_cpu_mask
;
3140 /* OK, now we know there is a match,
3141 * we can look at the local (per receive queue) flow table
3143 rflow
= &flow_table
->flows
[hash
& flow_table
->mask
];
3147 * If the desired CPU (where last recvmsg was done) is
3148 * different from current CPU (one in the rx-queue flow
3149 * table entry), switch if one of the following holds:
3150 * - Current CPU is unset (equal to RPS_NO_CPU).
3151 * - Current CPU is offline.
3152 * - The current CPU's queue tail has advanced beyond the
3153 * last packet that was enqueued using this table entry.
3154 * This guarantees that all previous packets for the flow
3155 * have been dequeued, thus preserving in order delivery.
3157 if (unlikely(tcpu
!= next_cpu
) &&
3158 (tcpu
== RPS_NO_CPU
|| !cpu_online(tcpu
) ||
3159 ((int)(per_cpu(softnet_data
, tcpu
).input_queue_head
-
3160 rflow
->last_qtail
)) >= 0)) {
3162 rflow
= set_rps_cpu(dev
, skb
, rflow
, next_cpu
);
3165 if (tcpu
!= RPS_NO_CPU
&& cpu_online(tcpu
)) {
3175 tcpu
= map
->cpus
[reciprocal_scale(hash
, map
->len
)];
3176 if (cpu_online(tcpu
)) {
3186 #ifdef CONFIG_RFS_ACCEL
3189 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3190 * @dev: Device on which the filter was set
3191 * @rxq_index: RX queue index
3192 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3193 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3195 * Drivers that implement ndo_rx_flow_steer() should periodically call
3196 * this function for each installed filter and remove the filters for
3197 * which it returns %true.
3199 bool rps_may_expire_flow(struct net_device
*dev
, u16 rxq_index
,
3200 u32 flow_id
, u16 filter_id
)
3202 struct netdev_rx_queue
*rxqueue
= dev
->_rx
+ rxq_index
;
3203 struct rps_dev_flow_table
*flow_table
;
3204 struct rps_dev_flow
*rflow
;
3209 flow_table
= rcu_dereference(rxqueue
->rps_flow_table
);
3210 if (flow_table
&& flow_id
<= flow_table
->mask
) {
3211 rflow
= &flow_table
->flows
[flow_id
];
3212 cpu
= ACCESS_ONCE(rflow
->cpu
);
3213 if (rflow
->filter
== filter_id
&& cpu
!= RPS_NO_CPU
&&
3214 ((int)(per_cpu(softnet_data
, cpu
).input_queue_head
-
3215 rflow
->last_qtail
) <
3216 (int)(10 * flow_table
->mask
)))
3222 EXPORT_SYMBOL(rps_may_expire_flow
);
3224 #endif /* CONFIG_RFS_ACCEL */
3226 /* Called from hardirq (IPI) context */
3227 static void rps_trigger_softirq(void *data
)
3229 struct softnet_data
*sd
= data
;
3231 ____napi_schedule(sd
, &sd
->backlog
);
3235 #endif /* CONFIG_RPS */
3238 * Check if this softnet_data structure is another cpu one
3239 * If yes, queue it to our IPI list and return 1
3242 static int rps_ipi_queued(struct softnet_data
*sd
)
3245 struct softnet_data
*mysd
= this_cpu_ptr(&softnet_data
);
3248 sd
->rps_ipi_next
= mysd
->rps_ipi_list
;
3249 mysd
->rps_ipi_list
= sd
;
3251 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
3254 #endif /* CONFIG_RPS */
3258 #ifdef CONFIG_NET_FLOW_LIMIT
3259 int netdev_flow_limit_table_len __read_mostly
= (1 << 12);
3262 static bool skb_flow_limit(struct sk_buff
*skb
, unsigned int qlen
)
3264 #ifdef CONFIG_NET_FLOW_LIMIT
3265 struct sd_flow_limit
*fl
;
3266 struct softnet_data
*sd
;
3267 unsigned int old_flow
, new_flow
;
3269 if (qlen
< (netdev_max_backlog
>> 1))
3272 sd
= this_cpu_ptr(&softnet_data
);
3275 fl
= rcu_dereference(sd
->flow_limit
);
3277 new_flow
= skb_get_hash(skb
) & (fl
->num_buckets
- 1);
3278 old_flow
= fl
->history
[fl
->history_head
];
3279 fl
->history
[fl
->history_head
] = new_flow
;
3282 fl
->history_head
&= FLOW_LIMIT_HISTORY
- 1;
3284 if (likely(fl
->buckets
[old_flow
]))
3285 fl
->buckets
[old_flow
]--;
3287 if (++fl
->buckets
[new_flow
] > (FLOW_LIMIT_HISTORY
>> 1)) {
3299 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3300 * queue (may be a remote CPU queue).
3302 static int enqueue_to_backlog(struct sk_buff
*skb
, int cpu
,
3303 unsigned int *qtail
)
3305 struct softnet_data
*sd
;
3306 unsigned long flags
;
3309 sd
= &per_cpu(softnet_data
, cpu
);
3311 local_irq_save(flags
);
3314 qlen
= skb_queue_len(&sd
->input_pkt_queue
);
3315 if (qlen
<= netdev_max_backlog
&& !skb_flow_limit(skb
, qlen
)) {
3318 __skb_queue_tail(&sd
->input_pkt_queue
, skb
);
3319 input_queue_tail_incr_save(sd
, qtail
);
3321 local_irq_restore(flags
);
3322 return NET_RX_SUCCESS
;
3325 /* Schedule NAPI for backlog device
3326 * We can use non atomic operation since we own the queue lock
3328 if (!__test_and_set_bit(NAPI_STATE_SCHED
, &sd
->backlog
.state
)) {
3329 if (!rps_ipi_queued(sd
))
3330 ____napi_schedule(sd
, &sd
->backlog
);
3338 local_irq_restore(flags
);
3340 atomic_long_inc(&skb
->dev
->rx_dropped
);
3345 static int netif_rx_internal(struct sk_buff
*skb
)
3349 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
3351 trace_netif_rx(skb
);
3353 if (static_key_false(&rps_needed
)) {
3354 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
3360 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
3362 cpu
= smp_processor_id();
3364 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
3372 ret
= enqueue_to_backlog(skb
, get_cpu(), &qtail
);
3379 * netif_rx - post buffer to the network code
3380 * @skb: buffer to post
3382 * This function receives a packet from a device driver and queues it for
3383 * the upper (protocol) levels to process. It always succeeds. The buffer
3384 * may be dropped during processing for congestion control or by the
3388 * NET_RX_SUCCESS (no congestion)
3389 * NET_RX_DROP (packet was dropped)
3393 int netif_rx(struct sk_buff
*skb
)
3395 trace_netif_rx_entry(skb
);
3397 return netif_rx_internal(skb
);
3399 EXPORT_SYMBOL(netif_rx
);
3401 int netif_rx_ni(struct sk_buff
*skb
)
3405 trace_netif_rx_ni_entry(skb
);
3408 err
= netif_rx_internal(skb
);
3409 if (local_softirq_pending())
3415 EXPORT_SYMBOL(netif_rx_ni
);
3417 static void net_tx_action(struct softirq_action
*h
)
3419 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
3421 if (sd
->completion_queue
) {
3422 struct sk_buff
*clist
;
3424 local_irq_disable();
3425 clist
= sd
->completion_queue
;
3426 sd
->completion_queue
= NULL
;
3430 struct sk_buff
*skb
= clist
;
3431 clist
= clist
->next
;
3433 WARN_ON(atomic_read(&skb
->users
));
3434 if (likely(get_kfree_skb_cb(skb
)->reason
== SKB_REASON_CONSUMED
))
3435 trace_consume_skb(skb
);
3437 trace_kfree_skb(skb
, net_tx_action
);
3442 if (sd
->output_queue
) {
3445 local_irq_disable();
3446 head
= sd
->output_queue
;
3447 sd
->output_queue
= NULL
;
3448 sd
->output_queue_tailp
= &sd
->output_queue
;
3452 struct Qdisc
*q
= head
;
3453 spinlock_t
*root_lock
;
3455 head
= head
->next_sched
;
3457 root_lock
= qdisc_lock(q
);
3458 if (spin_trylock(root_lock
)) {
3459 smp_mb__before_atomic();
3460 clear_bit(__QDISC_STATE_SCHED
,
3463 spin_unlock(root_lock
);
3465 if (!test_bit(__QDISC_STATE_DEACTIVATED
,
3467 __netif_reschedule(q
);
3469 smp_mb__before_atomic();
3470 clear_bit(__QDISC_STATE_SCHED
,
3478 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3479 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3480 /* This hook is defined here for ATM LANE */
3481 int (*br_fdb_test_addr_hook
)(struct net_device
*dev
,
3482 unsigned char *addr
) __read_mostly
;
3483 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook
);
3486 #ifdef CONFIG_NET_CLS_ACT
3487 /* TODO: Maybe we should just force sch_ingress to be compiled in
3488 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3489 * a compare and 2 stores extra right now if we dont have it on
3490 * but have CONFIG_NET_CLS_ACT
3491 * NOTE: This doesn't stop any functionality; if you dont have
3492 * the ingress scheduler, you just can't add policies on ingress.
3495 static int ing_filter(struct sk_buff
*skb
, struct netdev_queue
*rxq
)
3497 struct net_device
*dev
= skb
->dev
;
3498 u32 ttl
= G_TC_RTTL(skb
->tc_verd
);
3499 int result
= TC_ACT_OK
;
3502 if (unlikely(MAX_RED_LOOP
< ttl
++)) {
3503 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3504 skb
->skb_iif
, dev
->ifindex
);
3508 skb
->tc_verd
= SET_TC_RTTL(skb
->tc_verd
, ttl
);
3509 skb
->tc_verd
= SET_TC_AT(skb
->tc_verd
, AT_INGRESS
);
3511 q
= rcu_dereference(rxq
->qdisc
);
3512 if (q
!= &noop_qdisc
) {
3513 spin_lock(qdisc_lock(q
));
3514 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED
, &q
->state
)))
3515 result
= qdisc_enqueue_root(skb
, q
);
3516 spin_unlock(qdisc_lock(q
));
3522 static inline struct sk_buff
*handle_ing(struct sk_buff
*skb
,
3523 struct packet_type
**pt_prev
,
3524 int *ret
, struct net_device
*orig_dev
)
3526 struct netdev_queue
*rxq
= rcu_dereference(skb
->dev
->ingress_queue
);
3528 if (!rxq
|| rcu_access_pointer(rxq
->qdisc
) == &noop_qdisc
)
3532 *ret
= deliver_skb(skb
, *pt_prev
, orig_dev
);
3536 switch (ing_filter(skb
, rxq
)) {
3550 * netdev_rx_handler_register - register receive handler
3551 * @dev: device to register a handler for
3552 * @rx_handler: receive handler to register
3553 * @rx_handler_data: data pointer that is used by rx handler
3555 * Register a receive handler for a device. This handler will then be
3556 * called from __netif_receive_skb. A negative errno code is returned
3559 * The caller must hold the rtnl_mutex.
3561 * For a general description of rx_handler, see enum rx_handler_result.
3563 int netdev_rx_handler_register(struct net_device
*dev
,
3564 rx_handler_func_t
*rx_handler
,
3565 void *rx_handler_data
)
3569 if (dev
->rx_handler
)
3572 /* Note: rx_handler_data must be set before rx_handler */
3573 rcu_assign_pointer(dev
->rx_handler_data
, rx_handler_data
);
3574 rcu_assign_pointer(dev
->rx_handler
, rx_handler
);
3578 EXPORT_SYMBOL_GPL(netdev_rx_handler_register
);
3581 * netdev_rx_handler_unregister - unregister receive handler
3582 * @dev: device to unregister a handler from
3584 * Unregister a receive handler from a device.
3586 * The caller must hold the rtnl_mutex.
3588 void netdev_rx_handler_unregister(struct net_device
*dev
)
3592 RCU_INIT_POINTER(dev
->rx_handler
, NULL
);
3593 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3594 * section has a guarantee to see a non NULL rx_handler_data
3598 RCU_INIT_POINTER(dev
->rx_handler_data
, NULL
);
3600 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister
);
3603 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3604 * the special handling of PFMEMALLOC skbs.
3606 static bool skb_pfmemalloc_protocol(struct sk_buff
*skb
)
3608 switch (skb
->protocol
) {
3609 case htons(ETH_P_ARP
):
3610 case htons(ETH_P_IP
):
3611 case htons(ETH_P_IPV6
):
3612 case htons(ETH_P_8021Q
):
3613 case htons(ETH_P_8021AD
):
3620 static int __netif_receive_skb_core(struct sk_buff
*skb
, bool pfmemalloc
)
3622 struct packet_type
*ptype
, *pt_prev
;
3623 rx_handler_func_t
*rx_handler
;
3624 struct net_device
*orig_dev
;
3625 bool deliver_exact
= false;
3626 int ret
= NET_RX_DROP
;
3629 net_timestamp_check(!netdev_tstamp_prequeue
, skb
);
3631 trace_netif_receive_skb(skb
);
3633 orig_dev
= skb
->dev
;
3635 skb_reset_network_header(skb
);
3636 if (!skb_transport_header_was_set(skb
))
3637 skb_reset_transport_header(skb
);
3638 skb_reset_mac_len(skb
);
3645 skb
->skb_iif
= skb
->dev
->ifindex
;
3647 __this_cpu_inc(softnet_data
.processed
);
3649 if (skb
->protocol
== cpu_to_be16(ETH_P_8021Q
) ||
3650 skb
->protocol
== cpu_to_be16(ETH_P_8021AD
)) {
3651 skb
= skb_vlan_untag(skb
);
3656 #ifdef CONFIG_NET_CLS_ACT
3657 if (skb
->tc_verd
& TC_NCLS
) {
3658 skb
->tc_verd
= CLR_TC_NCLS(skb
->tc_verd
);
3666 list_for_each_entry_rcu(ptype
, &ptype_all
, list
) {
3668 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3672 list_for_each_entry_rcu(ptype
, &skb
->dev
->ptype_all
, list
) {
3674 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3679 #ifdef CONFIG_NET_CLS_ACT
3680 skb
= handle_ing(skb
, &pt_prev
, &ret
, orig_dev
);
3686 if (pfmemalloc
&& !skb_pfmemalloc_protocol(skb
))
3689 if (skb_vlan_tag_present(skb
)) {
3691 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3694 if (vlan_do_receive(&skb
))
3696 else if (unlikely(!skb
))
3700 rx_handler
= rcu_dereference(skb
->dev
->rx_handler
);
3703 ret
= deliver_skb(skb
, pt_prev
, orig_dev
);
3706 switch (rx_handler(&skb
)) {
3707 case RX_HANDLER_CONSUMED
:
3708 ret
= NET_RX_SUCCESS
;
3710 case RX_HANDLER_ANOTHER
:
3712 case RX_HANDLER_EXACT
:
3713 deliver_exact
= true;
3714 case RX_HANDLER_PASS
:
3721 if (unlikely(skb_vlan_tag_present(skb
))) {
3722 if (skb_vlan_tag_get_id(skb
))
3723 skb
->pkt_type
= PACKET_OTHERHOST
;
3724 /* Note: we might in the future use prio bits
3725 * and set skb->priority like in vlan_do_receive()
3726 * For the time being, just ignore Priority Code Point
3731 type
= skb
->protocol
;
3733 /* deliver only exact match when indicated */
3734 if (likely(!deliver_exact
)) {
3735 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
3736 &ptype_base
[ntohs(type
) &
3740 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
3741 &orig_dev
->ptype_specific
);
3743 if (unlikely(skb
->dev
!= orig_dev
)) {
3744 deliver_ptype_list_skb(skb
, &pt_prev
, orig_dev
, type
,
3745 &skb
->dev
->ptype_specific
);
3749 if (unlikely(skb_orphan_frags(skb
, GFP_ATOMIC
)))
3752 ret
= pt_prev
->func(skb
, skb
->dev
, pt_prev
, orig_dev
);
3755 atomic_long_inc(&skb
->dev
->rx_dropped
);
3757 /* Jamal, now you will not able to escape explaining
3758 * me how you were going to use this. :-)
3768 static int __netif_receive_skb(struct sk_buff
*skb
)
3772 if (sk_memalloc_socks() && skb_pfmemalloc(skb
)) {
3773 unsigned long pflags
= current
->flags
;
3776 * PFMEMALLOC skbs are special, they should
3777 * - be delivered to SOCK_MEMALLOC sockets only
3778 * - stay away from userspace
3779 * - have bounded memory usage
3781 * Use PF_MEMALLOC as this saves us from propagating the allocation
3782 * context down to all allocation sites.
3784 current
->flags
|= PF_MEMALLOC
;
3785 ret
= __netif_receive_skb_core(skb
, true);
3786 tsk_restore_flags(current
, pflags
, PF_MEMALLOC
);
3788 ret
= __netif_receive_skb_core(skb
, false);
3793 static int netif_receive_skb_internal(struct sk_buff
*skb
)
3795 net_timestamp_check(netdev_tstamp_prequeue
, skb
);
3797 if (skb_defer_rx_timestamp(skb
))
3798 return NET_RX_SUCCESS
;
3801 if (static_key_false(&rps_needed
)) {
3802 struct rps_dev_flow voidflow
, *rflow
= &voidflow
;
3807 cpu
= get_rps_cpu(skb
->dev
, skb
, &rflow
);
3810 ret
= enqueue_to_backlog(skb
, cpu
, &rflow
->last_qtail
);
3817 return __netif_receive_skb(skb
);
3821 * netif_receive_skb - process receive buffer from network
3822 * @skb: buffer to process
3824 * netif_receive_skb() is the main receive data processing function.
3825 * It always succeeds. The buffer may be dropped during processing
3826 * for congestion control or by the protocol layers.
3828 * This function may only be called from softirq context and interrupts
3829 * should be enabled.
3831 * Return values (usually ignored):
3832 * NET_RX_SUCCESS: no congestion
3833 * NET_RX_DROP: packet was dropped
3835 int netif_receive_skb(struct sk_buff
*skb
)
3837 trace_netif_receive_skb_entry(skb
);
3839 return netif_receive_skb_internal(skb
);
3841 EXPORT_SYMBOL(netif_receive_skb
);
3843 /* Network device is going away, flush any packets still pending
3844 * Called with irqs disabled.
3846 static void flush_backlog(void *arg
)
3848 struct net_device
*dev
= arg
;
3849 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
3850 struct sk_buff
*skb
, *tmp
;
3853 skb_queue_walk_safe(&sd
->input_pkt_queue
, skb
, tmp
) {
3854 if (skb
->dev
== dev
) {
3855 __skb_unlink(skb
, &sd
->input_pkt_queue
);
3857 input_queue_head_incr(sd
);
3862 skb_queue_walk_safe(&sd
->process_queue
, skb
, tmp
) {
3863 if (skb
->dev
== dev
) {
3864 __skb_unlink(skb
, &sd
->process_queue
);
3866 input_queue_head_incr(sd
);
3871 static int napi_gro_complete(struct sk_buff
*skb
)
3873 struct packet_offload
*ptype
;
3874 __be16 type
= skb
->protocol
;
3875 struct list_head
*head
= &offload_base
;
3878 BUILD_BUG_ON(sizeof(struct napi_gro_cb
) > sizeof(skb
->cb
));
3880 if (NAPI_GRO_CB(skb
)->count
== 1) {
3881 skb_shinfo(skb
)->gso_size
= 0;
3886 list_for_each_entry_rcu(ptype
, head
, list
) {
3887 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
3890 err
= ptype
->callbacks
.gro_complete(skb
, 0);
3896 WARN_ON(&ptype
->list
== head
);
3898 return NET_RX_SUCCESS
;
3902 return netif_receive_skb_internal(skb
);
3905 /* napi->gro_list contains packets ordered by age.
3906 * youngest packets at the head of it.
3907 * Complete skbs in reverse order to reduce latencies.
3909 void napi_gro_flush(struct napi_struct
*napi
, bool flush_old
)
3911 struct sk_buff
*skb
, *prev
= NULL
;
3913 /* scan list and build reverse chain */
3914 for (skb
= napi
->gro_list
; skb
!= NULL
; skb
= skb
->next
) {
3919 for (skb
= prev
; skb
; skb
= prev
) {
3922 if (flush_old
&& NAPI_GRO_CB(skb
)->age
== jiffies
)
3926 napi_gro_complete(skb
);
3930 napi
->gro_list
= NULL
;
3932 EXPORT_SYMBOL(napi_gro_flush
);
3934 static void gro_list_prepare(struct napi_struct
*napi
, struct sk_buff
*skb
)
3937 unsigned int maclen
= skb
->dev
->hard_header_len
;
3938 u32 hash
= skb_get_hash_raw(skb
);
3940 for (p
= napi
->gro_list
; p
; p
= p
->next
) {
3941 unsigned long diffs
;
3943 NAPI_GRO_CB(p
)->flush
= 0;
3945 if (hash
!= skb_get_hash_raw(p
)) {
3946 NAPI_GRO_CB(p
)->same_flow
= 0;
3950 diffs
= (unsigned long)p
->dev
^ (unsigned long)skb
->dev
;
3951 diffs
|= p
->vlan_tci
^ skb
->vlan_tci
;
3952 if (maclen
== ETH_HLEN
)
3953 diffs
|= compare_ether_header(skb_mac_header(p
),
3954 skb_mac_header(skb
));
3956 diffs
= memcmp(skb_mac_header(p
),
3957 skb_mac_header(skb
),
3959 NAPI_GRO_CB(p
)->same_flow
= !diffs
;
3963 static void skb_gro_reset_offset(struct sk_buff
*skb
)
3965 const struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
3966 const skb_frag_t
*frag0
= &pinfo
->frags
[0];
3968 NAPI_GRO_CB(skb
)->data_offset
= 0;
3969 NAPI_GRO_CB(skb
)->frag0
= NULL
;
3970 NAPI_GRO_CB(skb
)->frag0_len
= 0;
3972 if (skb_mac_header(skb
) == skb_tail_pointer(skb
) &&
3974 !PageHighMem(skb_frag_page(frag0
))) {
3975 NAPI_GRO_CB(skb
)->frag0
= skb_frag_address(frag0
);
3976 NAPI_GRO_CB(skb
)->frag0_len
= skb_frag_size(frag0
);
3980 static void gro_pull_from_frag0(struct sk_buff
*skb
, int grow
)
3982 struct skb_shared_info
*pinfo
= skb_shinfo(skb
);
3984 BUG_ON(skb
->end
- skb
->tail
< grow
);
3986 memcpy(skb_tail_pointer(skb
), NAPI_GRO_CB(skb
)->frag0
, grow
);
3988 skb
->data_len
-= grow
;
3991 pinfo
->frags
[0].page_offset
+= grow
;
3992 skb_frag_size_sub(&pinfo
->frags
[0], grow
);
3994 if (unlikely(!skb_frag_size(&pinfo
->frags
[0]))) {
3995 skb_frag_unref(skb
, 0);
3996 memmove(pinfo
->frags
, pinfo
->frags
+ 1,
3997 --pinfo
->nr_frags
* sizeof(pinfo
->frags
[0]));
4001 static enum gro_result
dev_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
4003 struct sk_buff
**pp
= NULL
;
4004 struct packet_offload
*ptype
;
4005 __be16 type
= skb
->protocol
;
4006 struct list_head
*head
= &offload_base
;
4008 enum gro_result ret
;
4011 if (!(skb
->dev
->features
& NETIF_F_GRO
))
4014 if (skb_is_gso(skb
) || skb_has_frag_list(skb
) || skb
->csum_bad
)
4017 gro_list_prepare(napi
, skb
);
4020 list_for_each_entry_rcu(ptype
, head
, list
) {
4021 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
4024 skb_set_network_header(skb
, skb_gro_offset(skb
));
4025 skb_reset_mac_len(skb
);
4026 NAPI_GRO_CB(skb
)->same_flow
= 0;
4027 NAPI_GRO_CB(skb
)->flush
= 0;
4028 NAPI_GRO_CB(skb
)->free
= 0;
4029 NAPI_GRO_CB(skb
)->udp_mark
= 0;
4030 NAPI_GRO_CB(skb
)->gro_remcsum_start
= 0;
4032 /* Setup for GRO checksum validation */
4033 switch (skb
->ip_summed
) {
4034 case CHECKSUM_COMPLETE
:
4035 NAPI_GRO_CB(skb
)->csum
= skb
->csum
;
4036 NAPI_GRO_CB(skb
)->csum_valid
= 1;
4037 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
4039 case CHECKSUM_UNNECESSARY
:
4040 NAPI_GRO_CB(skb
)->csum_cnt
= skb
->csum_level
+ 1;
4041 NAPI_GRO_CB(skb
)->csum_valid
= 0;
4044 NAPI_GRO_CB(skb
)->csum_cnt
= 0;
4045 NAPI_GRO_CB(skb
)->csum_valid
= 0;
4048 pp
= ptype
->callbacks
.gro_receive(&napi
->gro_list
, skb
);
4053 if (&ptype
->list
== head
)
4056 same_flow
= NAPI_GRO_CB(skb
)->same_flow
;
4057 ret
= NAPI_GRO_CB(skb
)->free
? GRO_MERGED_FREE
: GRO_MERGED
;
4060 struct sk_buff
*nskb
= *pp
;
4064 napi_gro_complete(nskb
);
4071 if (NAPI_GRO_CB(skb
)->flush
)
4074 if (unlikely(napi
->gro_count
>= MAX_GRO_SKBS
)) {
4075 struct sk_buff
*nskb
= napi
->gro_list
;
4077 /* locate the end of the list to select the 'oldest' flow */
4078 while (nskb
->next
) {
4084 napi_gro_complete(nskb
);
4088 NAPI_GRO_CB(skb
)->count
= 1;
4089 NAPI_GRO_CB(skb
)->age
= jiffies
;
4090 NAPI_GRO_CB(skb
)->last
= skb
;
4091 skb_shinfo(skb
)->gso_size
= skb_gro_len(skb
);
4092 skb
->next
= napi
->gro_list
;
4093 napi
->gro_list
= skb
;
4097 grow
= skb_gro_offset(skb
) - skb_headlen(skb
);
4099 gro_pull_from_frag0(skb
, grow
);
4108 struct packet_offload
*gro_find_receive_by_type(__be16 type
)
4110 struct list_head
*offload_head
= &offload_base
;
4111 struct packet_offload
*ptype
;
4113 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
4114 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_receive
)
4120 EXPORT_SYMBOL(gro_find_receive_by_type
);
4122 struct packet_offload
*gro_find_complete_by_type(__be16 type
)
4124 struct list_head
*offload_head
= &offload_base
;
4125 struct packet_offload
*ptype
;
4127 list_for_each_entry_rcu(ptype
, offload_head
, list
) {
4128 if (ptype
->type
!= type
|| !ptype
->callbacks
.gro_complete
)
4134 EXPORT_SYMBOL(gro_find_complete_by_type
);
4136 static gro_result_t
napi_skb_finish(gro_result_t ret
, struct sk_buff
*skb
)
4140 if (netif_receive_skb_internal(skb
))
4148 case GRO_MERGED_FREE
:
4149 if (NAPI_GRO_CB(skb
)->free
== NAPI_GRO_FREE_STOLEN_HEAD
)
4150 kmem_cache_free(skbuff_head_cache
, skb
);
4163 gro_result_t
napi_gro_receive(struct napi_struct
*napi
, struct sk_buff
*skb
)
4165 trace_napi_gro_receive_entry(skb
);
4167 skb_gro_reset_offset(skb
);
4169 return napi_skb_finish(dev_gro_receive(napi
, skb
), skb
);
4171 EXPORT_SYMBOL(napi_gro_receive
);
4173 static void napi_reuse_skb(struct napi_struct
*napi
, struct sk_buff
*skb
)
4175 if (unlikely(skb
->pfmemalloc
)) {
4179 __skb_pull(skb
, skb_headlen(skb
));
4180 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4181 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
- skb_headroom(skb
));
4183 skb
->dev
= napi
->dev
;
4185 skb
->encapsulation
= 0;
4186 skb_shinfo(skb
)->gso_type
= 0;
4187 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
4192 struct sk_buff
*napi_get_frags(struct napi_struct
*napi
)
4194 struct sk_buff
*skb
= napi
->skb
;
4197 skb
= napi_alloc_skb(napi
, GRO_MAX_HEAD
);
4202 EXPORT_SYMBOL(napi_get_frags
);
4204 static gro_result_t
napi_frags_finish(struct napi_struct
*napi
,
4205 struct sk_buff
*skb
,
4211 __skb_push(skb
, ETH_HLEN
);
4212 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
4213 if (ret
== GRO_NORMAL
&& netif_receive_skb_internal(skb
))
4218 case GRO_MERGED_FREE
:
4219 napi_reuse_skb(napi
, skb
);
4229 /* Upper GRO stack assumes network header starts at gro_offset=0
4230 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4231 * We copy ethernet header into skb->data to have a common layout.
4233 static struct sk_buff
*napi_frags_skb(struct napi_struct
*napi
)
4235 struct sk_buff
*skb
= napi
->skb
;
4236 const struct ethhdr
*eth
;
4237 unsigned int hlen
= sizeof(*eth
);
4241 skb_reset_mac_header(skb
);
4242 skb_gro_reset_offset(skb
);
4244 eth
= skb_gro_header_fast(skb
, 0);
4245 if (unlikely(skb_gro_header_hard(skb
, hlen
))) {
4246 eth
= skb_gro_header_slow(skb
, hlen
, 0);
4247 if (unlikely(!eth
)) {
4248 napi_reuse_skb(napi
, skb
);
4252 gro_pull_from_frag0(skb
, hlen
);
4253 NAPI_GRO_CB(skb
)->frag0
+= hlen
;
4254 NAPI_GRO_CB(skb
)->frag0_len
-= hlen
;
4256 __skb_pull(skb
, hlen
);
4259 * This works because the only protocols we care about don't require
4261 * We'll fix it up properly in napi_frags_finish()
4263 skb
->protocol
= eth
->h_proto
;
4268 gro_result_t
napi_gro_frags(struct napi_struct
*napi
)
4270 struct sk_buff
*skb
= napi_frags_skb(napi
);
4275 trace_napi_gro_frags_entry(skb
);
4277 return napi_frags_finish(napi
, skb
, dev_gro_receive(napi
, skb
));
4279 EXPORT_SYMBOL(napi_gro_frags
);
4281 /* Compute the checksum from gro_offset and return the folded value
4282 * after adding in any pseudo checksum.
4284 __sum16
__skb_gro_checksum_complete(struct sk_buff
*skb
)
4289 wsum
= skb_checksum(skb
, skb_gro_offset(skb
), skb_gro_len(skb
), 0);
4291 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4292 sum
= csum_fold(csum_add(NAPI_GRO_CB(skb
)->csum
, wsum
));
4294 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
4295 !skb
->csum_complete_sw
)
4296 netdev_rx_csum_fault(skb
->dev
);
4299 NAPI_GRO_CB(skb
)->csum
= wsum
;
4300 NAPI_GRO_CB(skb
)->csum_valid
= 1;
4304 EXPORT_SYMBOL(__skb_gro_checksum_complete
);
4307 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4308 * Note: called with local irq disabled, but exits with local irq enabled.
4310 static void net_rps_action_and_irq_enable(struct softnet_data
*sd
)
4313 struct softnet_data
*remsd
= sd
->rps_ipi_list
;
4316 sd
->rps_ipi_list
= NULL
;
4320 /* Send pending IPI's to kick RPS processing on remote cpus. */
4322 struct softnet_data
*next
= remsd
->rps_ipi_next
;
4324 if (cpu_online(remsd
->cpu
))
4325 smp_call_function_single_async(remsd
->cpu
,
4334 static bool sd_has_rps_ipi_waiting(struct softnet_data
*sd
)
4337 return sd
->rps_ipi_list
!= NULL
;
4343 static int process_backlog(struct napi_struct
*napi
, int quota
)
4346 struct softnet_data
*sd
= container_of(napi
, struct softnet_data
, backlog
);
4348 /* Check if we have pending ipi, its better to send them now,
4349 * not waiting net_rx_action() end.
4351 if (sd_has_rps_ipi_waiting(sd
)) {
4352 local_irq_disable();
4353 net_rps_action_and_irq_enable(sd
);
4356 napi
->weight
= weight_p
;
4357 local_irq_disable();
4359 struct sk_buff
*skb
;
4361 while ((skb
= __skb_dequeue(&sd
->process_queue
))) {
4363 __netif_receive_skb(skb
);
4364 local_irq_disable();
4365 input_queue_head_incr(sd
);
4366 if (++work
>= quota
) {
4373 if (skb_queue_empty(&sd
->input_pkt_queue
)) {
4375 * Inline a custom version of __napi_complete().
4376 * only current cpu owns and manipulates this napi,
4377 * and NAPI_STATE_SCHED is the only possible flag set
4379 * We can use a plain write instead of clear_bit(),
4380 * and we dont need an smp_mb() memory barrier.
4388 skb_queue_splice_tail_init(&sd
->input_pkt_queue
,
4389 &sd
->process_queue
);
4398 * __napi_schedule - schedule for receive
4399 * @n: entry to schedule
4401 * The entry's receive function will be scheduled to run.
4402 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
4404 void __napi_schedule(struct napi_struct
*n
)
4406 unsigned long flags
;
4408 local_irq_save(flags
);
4409 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
4410 local_irq_restore(flags
);
4412 EXPORT_SYMBOL(__napi_schedule
);
4415 * __napi_schedule_irqoff - schedule for receive
4416 * @n: entry to schedule
4418 * Variant of __napi_schedule() assuming hard irqs are masked
4420 void __napi_schedule_irqoff(struct napi_struct
*n
)
4422 ____napi_schedule(this_cpu_ptr(&softnet_data
), n
);
4424 EXPORT_SYMBOL(__napi_schedule_irqoff
);
4426 void __napi_complete(struct napi_struct
*n
)
4428 BUG_ON(!test_bit(NAPI_STATE_SCHED
, &n
->state
));
4430 list_del_init(&n
->poll_list
);
4431 smp_mb__before_atomic();
4432 clear_bit(NAPI_STATE_SCHED
, &n
->state
);
4434 EXPORT_SYMBOL(__napi_complete
);
4436 void napi_complete_done(struct napi_struct
*n
, int work_done
)
4438 unsigned long flags
;
4441 * don't let napi dequeue from the cpu poll list
4442 * just in case its running on a different cpu
4444 if (unlikely(test_bit(NAPI_STATE_NPSVC
, &n
->state
)))
4448 unsigned long timeout
= 0;
4451 timeout
= n
->dev
->gro_flush_timeout
;
4454 hrtimer_start(&n
->timer
, ns_to_ktime(timeout
),
4455 HRTIMER_MODE_REL_PINNED
);
4457 napi_gro_flush(n
, false);
4459 if (likely(list_empty(&n
->poll_list
))) {
4460 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED
, &n
->state
));
4462 /* If n->poll_list is not empty, we need to mask irqs */
4463 local_irq_save(flags
);
4465 local_irq_restore(flags
);
4468 EXPORT_SYMBOL(napi_complete_done
);
4470 /* must be called under rcu_read_lock(), as we dont take a reference */
4471 struct napi_struct
*napi_by_id(unsigned int napi_id
)
4473 unsigned int hash
= napi_id
% HASH_SIZE(napi_hash
);
4474 struct napi_struct
*napi
;
4476 hlist_for_each_entry_rcu(napi
, &napi_hash
[hash
], napi_hash_node
)
4477 if (napi
->napi_id
== napi_id
)
4482 EXPORT_SYMBOL_GPL(napi_by_id
);
4484 void napi_hash_add(struct napi_struct
*napi
)
4486 if (!test_and_set_bit(NAPI_STATE_HASHED
, &napi
->state
)) {
4488 spin_lock(&napi_hash_lock
);
4490 /* 0 is not a valid id, we also skip an id that is taken
4491 * we expect both events to be extremely rare
4494 while (!napi
->napi_id
) {
4495 napi
->napi_id
= ++napi_gen_id
;
4496 if (napi_by_id(napi
->napi_id
))
4500 hlist_add_head_rcu(&napi
->napi_hash_node
,
4501 &napi_hash
[napi
->napi_id
% HASH_SIZE(napi_hash
)]);
4503 spin_unlock(&napi_hash_lock
);
4506 EXPORT_SYMBOL_GPL(napi_hash_add
);
4508 /* Warning : caller is responsible to make sure rcu grace period
4509 * is respected before freeing memory containing @napi
4511 void napi_hash_del(struct napi_struct
*napi
)
4513 spin_lock(&napi_hash_lock
);
4515 if (test_and_clear_bit(NAPI_STATE_HASHED
, &napi
->state
))
4516 hlist_del_rcu(&napi
->napi_hash_node
);
4518 spin_unlock(&napi_hash_lock
);
4520 EXPORT_SYMBOL_GPL(napi_hash_del
);
4522 static enum hrtimer_restart
napi_watchdog(struct hrtimer
*timer
)
4524 struct napi_struct
*napi
;
4526 napi
= container_of(timer
, struct napi_struct
, timer
);
4528 napi_schedule(napi
);
4530 return HRTIMER_NORESTART
;
4533 void netif_napi_add(struct net_device
*dev
, struct napi_struct
*napi
,
4534 int (*poll
)(struct napi_struct
*, int), int weight
)
4536 INIT_LIST_HEAD(&napi
->poll_list
);
4537 hrtimer_init(&napi
->timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL_PINNED
);
4538 napi
->timer
.function
= napi_watchdog
;
4539 napi
->gro_count
= 0;
4540 napi
->gro_list
= NULL
;
4543 if (weight
> NAPI_POLL_WEIGHT
)
4544 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4546 napi
->weight
= weight
;
4547 list_add(&napi
->dev_list
, &dev
->napi_list
);
4549 #ifdef CONFIG_NETPOLL
4550 spin_lock_init(&napi
->poll_lock
);
4551 napi
->poll_owner
= -1;
4553 set_bit(NAPI_STATE_SCHED
, &napi
->state
);
4555 EXPORT_SYMBOL(netif_napi_add
);
4557 void napi_disable(struct napi_struct
*n
)
4560 set_bit(NAPI_STATE_DISABLE
, &n
->state
);
4562 while (test_and_set_bit(NAPI_STATE_SCHED
, &n
->state
))
4565 hrtimer_cancel(&n
->timer
);
4567 clear_bit(NAPI_STATE_DISABLE
, &n
->state
);
4569 EXPORT_SYMBOL(napi_disable
);
4571 void netif_napi_del(struct napi_struct
*napi
)
4573 list_del_init(&napi
->dev_list
);
4574 napi_free_frags(napi
);
4576 kfree_skb_list(napi
->gro_list
);
4577 napi
->gro_list
= NULL
;
4578 napi
->gro_count
= 0;
4580 EXPORT_SYMBOL(netif_napi_del
);
4582 static int napi_poll(struct napi_struct
*n
, struct list_head
*repoll
)
4587 list_del_init(&n
->poll_list
);
4589 have
= netpoll_poll_lock(n
);
4593 /* This NAPI_STATE_SCHED test is for avoiding a race
4594 * with netpoll's poll_napi(). Only the entity which
4595 * obtains the lock and sees NAPI_STATE_SCHED set will
4596 * actually make the ->poll() call. Therefore we avoid
4597 * accidentally calling ->poll() when NAPI is not scheduled.
4600 if (test_bit(NAPI_STATE_SCHED
, &n
->state
)) {
4601 work
= n
->poll(n
, weight
);
4605 WARN_ON_ONCE(work
> weight
);
4607 if (likely(work
< weight
))
4610 /* Drivers must not modify the NAPI state if they
4611 * consume the entire weight. In such cases this code
4612 * still "owns" the NAPI instance and therefore can
4613 * move the instance around on the list at-will.
4615 if (unlikely(napi_disable_pending(n
))) {
4621 /* flush too old packets
4622 * If HZ < 1000, flush all packets.
4624 napi_gro_flush(n
, HZ
>= 1000);
4627 /* Some drivers may have called napi_schedule
4628 * prior to exhausting their budget.
4630 if (unlikely(!list_empty(&n
->poll_list
))) {
4631 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4632 n
->dev
? n
->dev
->name
: "backlog");
4636 list_add_tail(&n
->poll_list
, repoll
);
4639 netpoll_poll_unlock(have
);
4644 static void net_rx_action(struct softirq_action
*h
)
4646 struct softnet_data
*sd
= this_cpu_ptr(&softnet_data
);
4647 unsigned long time_limit
= jiffies
+ 2;
4648 int budget
= netdev_budget
;
4652 local_irq_disable();
4653 list_splice_init(&sd
->poll_list
, &list
);
4657 struct napi_struct
*n
;
4659 if (list_empty(&list
)) {
4660 if (!sd_has_rps_ipi_waiting(sd
) && list_empty(&repoll
))
4665 n
= list_first_entry(&list
, struct napi_struct
, poll_list
);
4666 budget
-= napi_poll(n
, &repoll
);
4668 /* If softirq window is exhausted then punt.
4669 * Allow this to run for 2 jiffies since which will allow
4670 * an average latency of 1.5/HZ.
4672 if (unlikely(budget
<= 0 ||
4673 time_after_eq(jiffies
, time_limit
))) {
4679 local_irq_disable();
4681 list_splice_tail_init(&sd
->poll_list
, &list
);
4682 list_splice_tail(&repoll
, &list
);
4683 list_splice(&list
, &sd
->poll_list
);
4684 if (!list_empty(&sd
->poll_list
))
4685 __raise_softirq_irqoff(NET_RX_SOFTIRQ
);
4687 net_rps_action_and_irq_enable(sd
);
4690 struct netdev_adjacent
{
4691 struct net_device
*dev
;
4693 /* upper master flag, there can only be one master device per list */
4696 /* counter for the number of times this device was added to us */
4699 /* private field for the users */
4702 struct list_head list
;
4703 struct rcu_head rcu
;
4706 static struct netdev_adjacent
*__netdev_find_adj(struct net_device
*dev
,
4707 struct net_device
*adj_dev
,
4708 struct list_head
*adj_list
)
4710 struct netdev_adjacent
*adj
;
4712 list_for_each_entry(adj
, adj_list
, list
) {
4713 if (adj
->dev
== adj_dev
)
4720 * netdev_has_upper_dev - Check if device is linked to an upper device
4722 * @upper_dev: upper device to check
4724 * Find out if a device is linked to specified upper device and return true
4725 * in case it is. Note that this checks only immediate upper device,
4726 * not through a complete stack of devices. The caller must hold the RTNL lock.
4728 bool netdev_has_upper_dev(struct net_device
*dev
,
4729 struct net_device
*upper_dev
)
4733 return __netdev_find_adj(dev
, upper_dev
, &dev
->all_adj_list
.upper
);
4735 EXPORT_SYMBOL(netdev_has_upper_dev
);
4738 * netdev_has_any_upper_dev - Check if device is linked to some device
4741 * Find out if a device is linked to an upper device and return true in case
4742 * it is. The caller must hold the RTNL lock.
4744 static bool netdev_has_any_upper_dev(struct net_device
*dev
)
4748 return !list_empty(&dev
->all_adj_list
.upper
);
4752 * netdev_master_upper_dev_get - Get master upper device
4755 * Find a master upper device and return pointer to it or NULL in case
4756 * it's not there. The caller must hold the RTNL lock.
4758 struct net_device
*netdev_master_upper_dev_get(struct net_device
*dev
)
4760 struct netdev_adjacent
*upper
;
4764 if (list_empty(&dev
->adj_list
.upper
))
4767 upper
= list_first_entry(&dev
->adj_list
.upper
,
4768 struct netdev_adjacent
, list
);
4769 if (likely(upper
->master
))
4773 EXPORT_SYMBOL(netdev_master_upper_dev_get
);
4775 void *netdev_adjacent_get_private(struct list_head
*adj_list
)
4777 struct netdev_adjacent
*adj
;
4779 adj
= list_entry(adj_list
, struct netdev_adjacent
, list
);
4781 return adj
->private;
4783 EXPORT_SYMBOL(netdev_adjacent_get_private
);
4786 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4788 * @iter: list_head ** of the current position
4790 * Gets the next device from the dev's upper list, starting from iter
4791 * position. The caller must hold RCU read lock.
4793 struct net_device
*netdev_upper_get_next_dev_rcu(struct net_device
*dev
,
4794 struct list_head
**iter
)
4796 struct netdev_adjacent
*upper
;
4798 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4800 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
4802 if (&upper
->list
== &dev
->adj_list
.upper
)
4805 *iter
= &upper
->list
;
4809 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu
);
4812 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4814 * @iter: list_head ** of the current position
4816 * Gets the next device from the dev's upper list, starting from iter
4817 * position. The caller must hold RCU read lock.
4819 struct net_device
*netdev_all_upper_get_next_dev_rcu(struct net_device
*dev
,
4820 struct list_head
**iter
)
4822 struct netdev_adjacent
*upper
;
4824 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4826 upper
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
4828 if (&upper
->list
== &dev
->all_adj_list
.upper
)
4831 *iter
= &upper
->list
;
4835 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu
);
4838 * netdev_lower_get_next_private - Get the next ->private from the
4839 * lower neighbour list
4841 * @iter: list_head ** of the current position
4843 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4844 * list, starting from iter position. The caller must hold either hold the
4845 * RTNL lock or its own locking that guarantees that the neighbour lower
4846 * list will remain unchainged.
4848 void *netdev_lower_get_next_private(struct net_device
*dev
,
4849 struct list_head
**iter
)
4851 struct netdev_adjacent
*lower
;
4853 lower
= list_entry(*iter
, struct netdev_adjacent
, list
);
4855 if (&lower
->list
== &dev
->adj_list
.lower
)
4858 *iter
= lower
->list
.next
;
4860 return lower
->private;
4862 EXPORT_SYMBOL(netdev_lower_get_next_private
);
4865 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4866 * lower neighbour list, RCU
4869 * @iter: list_head ** of the current position
4871 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4872 * list, starting from iter position. The caller must hold RCU read lock.
4874 void *netdev_lower_get_next_private_rcu(struct net_device
*dev
,
4875 struct list_head
**iter
)
4877 struct netdev_adjacent
*lower
;
4879 WARN_ON_ONCE(!rcu_read_lock_held());
4881 lower
= list_entry_rcu((*iter
)->next
, struct netdev_adjacent
, list
);
4883 if (&lower
->list
== &dev
->adj_list
.lower
)
4886 *iter
= &lower
->list
;
4888 return lower
->private;
4890 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu
);
4893 * netdev_lower_get_next - Get the next device from the lower neighbour
4896 * @iter: list_head ** of the current position
4898 * Gets the next netdev_adjacent from the dev's lower neighbour
4899 * list, starting from iter position. The caller must hold RTNL lock or
4900 * its own locking that guarantees that the neighbour lower
4901 * list will remain unchainged.
4903 void *netdev_lower_get_next(struct net_device
*dev
, struct list_head
**iter
)
4905 struct netdev_adjacent
*lower
;
4907 lower
= list_entry((*iter
)->next
, struct netdev_adjacent
, list
);
4909 if (&lower
->list
== &dev
->adj_list
.lower
)
4912 *iter
= &lower
->list
;
4916 EXPORT_SYMBOL(netdev_lower_get_next
);
4919 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4920 * lower neighbour list, RCU
4924 * Gets the first netdev_adjacent->private from the dev's lower neighbour
4925 * list. The caller must hold RCU read lock.
4927 void *netdev_lower_get_first_private_rcu(struct net_device
*dev
)
4929 struct netdev_adjacent
*lower
;
4931 lower
= list_first_or_null_rcu(&dev
->adj_list
.lower
,
4932 struct netdev_adjacent
, list
);
4934 return lower
->private;
4937 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu
);
4940 * netdev_master_upper_dev_get_rcu - Get master upper device
4943 * Find a master upper device and return pointer to it or NULL in case
4944 * it's not there. The caller must hold the RCU read lock.
4946 struct net_device
*netdev_master_upper_dev_get_rcu(struct net_device
*dev
)
4948 struct netdev_adjacent
*upper
;
4950 upper
= list_first_or_null_rcu(&dev
->adj_list
.upper
,
4951 struct netdev_adjacent
, list
);
4952 if (upper
&& likely(upper
->master
))
4956 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu
);
4958 static int netdev_adjacent_sysfs_add(struct net_device
*dev
,
4959 struct net_device
*adj_dev
,
4960 struct list_head
*dev_list
)
4962 char linkname
[IFNAMSIZ
+7];
4963 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
4964 "upper_%s" : "lower_%s", adj_dev
->name
);
4965 return sysfs_create_link(&(dev
->dev
.kobj
), &(adj_dev
->dev
.kobj
),
4968 static void netdev_adjacent_sysfs_del(struct net_device
*dev
,
4970 struct list_head
*dev_list
)
4972 char linkname
[IFNAMSIZ
+7];
4973 sprintf(linkname
, dev_list
== &dev
->adj_list
.upper
?
4974 "upper_%s" : "lower_%s", name
);
4975 sysfs_remove_link(&(dev
->dev
.kobj
), linkname
);
4978 static inline bool netdev_adjacent_is_neigh_list(struct net_device
*dev
,
4979 struct net_device
*adj_dev
,
4980 struct list_head
*dev_list
)
4982 return (dev_list
== &dev
->adj_list
.upper
||
4983 dev_list
== &dev
->adj_list
.lower
) &&
4984 net_eq(dev_net(dev
), dev_net(adj_dev
));
4987 static int __netdev_adjacent_dev_insert(struct net_device
*dev
,
4988 struct net_device
*adj_dev
,
4989 struct list_head
*dev_list
,
4990 void *private, bool master
)
4992 struct netdev_adjacent
*adj
;
4995 adj
= __netdev_find_adj(dev
, adj_dev
, dev_list
);
5002 adj
= kmalloc(sizeof(*adj
), GFP_KERNEL
);
5007 adj
->master
= master
;
5009 adj
->private = private;
5012 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5013 adj_dev
->name
, dev
->name
, adj_dev
->name
);
5015 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
)) {
5016 ret
= netdev_adjacent_sysfs_add(dev
, adj_dev
, dev_list
);
5021 /* Ensure that master link is always the first item in list. */
5023 ret
= sysfs_create_link(&(dev
->dev
.kobj
),
5024 &(adj_dev
->dev
.kobj
), "master");
5026 goto remove_symlinks
;
5028 list_add_rcu(&adj
->list
, dev_list
);
5030 list_add_tail_rcu(&adj
->list
, dev_list
);
5036 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
5037 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
5045 static void __netdev_adjacent_dev_remove(struct net_device
*dev
,
5046 struct net_device
*adj_dev
,
5047 struct list_head
*dev_list
)
5049 struct netdev_adjacent
*adj
;
5051 adj
= __netdev_find_adj(dev
, adj_dev
, dev_list
);
5054 pr_err("tried to remove device %s from %s\n",
5055 dev
->name
, adj_dev
->name
);
5059 if (adj
->ref_nr
> 1) {
5060 pr_debug("%s to %s ref_nr-- = %d\n", dev
->name
, adj_dev
->name
,
5067 sysfs_remove_link(&(dev
->dev
.kobj
), "master");
5069 if (netdev_adjacent_is_neigh_list(dev
, adj_dev
, dev_list
))
5070 netdev_adjacent_sysfs_del(dev
, adj_dev
->name
, dev_list
);
5072 list_del_rcu(&adj
->list
);
5073 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5074 adj_dev
->name
, dev
->name
, adj_dev
->name
);
5076 kfree_rcu(adj
, rcu
);
5079 static int __netdev_adjacent_dev_link_lists(struct net_device
*dev
,
5080 struct net_device
*upper_dev
,
5081 struct list_head
*up_list
,
5082 struct list_head
*down_list
,
5083 void *private, bool master
)
5087 ret
= __netdev_adjacent_dev_insert(dev
, upper_dev
, up_list
, private,
5092 ret
= __netdev_adjacent_dev_insert(upper_dev
, dev
, down_list
, private,
5095 __netdev_adjacent_dev_remove(dev
, upper_dev
, up_list
);
5102 static int __netdev_adjacent_dev_link(struct net_device
*dev
,
5103 struct net_device
*upper_dev
)
5105 return __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
5106 &dev
->all_adj_list
.upper
,
5107 &upper_dev
->all_adj_list
.lower
,
5111 static void __netdev_adjacent_dev_unlink_lists(struct net_device
*dev
,
5112 struct net_device
*upper_dev
,
5113 struct list_head
*up_list
,
5114 struct list_head
*down_list
)
5116 __netdev_adjacent_dev_remove(dev
, upper_dev
, up_list
);
5117 __netdev_adjacent_dev_remove(upper_dev
, dev
, down_list
);
5120 static void __netdev_adjacent_dev_unlink(struct net_device
*dev
,
5121 struct net_device
*upper_dev
)
5123 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
,
5124 &dev
->all_adj_list
.upper
,
5125 &upper_dev
->all_adj_list
.lower
);
5128 static int __netdev_adjacent_dev_link_neighbour(struct net_device
*dev
,
5129 struct net_device
*upper_dev
,
5130 void *private, bool master
)
5132 int ret
= __netdev_adjacent_dev_link(dev
, upper_dev
);
5137 ret
= __netdev_adjacent_dev_link_lists(dev
, upper_dev
,
5138 &dev
->adj_list
.upper
,
5139 &upper_dev
->adj_list
.lower
,
5142 __netdev_adjacent_dev_unlink(dev
, upper_dev
);
5149 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device
*dev
,
5150 struct net_device
*upper_dev
)
5152 __netdev_adjacent_dev_unlink(dev
, upper_dev
);
5153 __netdev_adjacent_dev_unlink_lists(dev
, upper_dev
,
5154 &dev
->adj_list
.upper
,
5155 &upper_dev
->adj_list
.lower
);
5158 static int __netdev_upper_dev_link(struct net_device
*dev
,
5159 struct net_device
*upper_dev
, bool master
,
5162 struct netdev_adjacent
*i
, *j
, *to_i
, *to_j
;
5167 if (dev
== upper_dev
)
5170 /* To prevent loops, check if dev is not upper device to upper_dev. */
5171 if (__netdev_find_adj(upper_dev
, dev
, &upper_dev
->all_adj_list
.upper
))
5174 if (__netdev_find_adj(dev
, upper_dev
, &dev
->all_adj_list
.upper
))
5177 if (master
&& netdev_master_upper_dev_get(dev
))
5180 ret
= __netdev_adjacent_dev_link_neighbour(dev
, upper_dev
, private,
5185 /* Now that we linked these devs, make all the upper_dev's
5186 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5187 * versa, and don't forget the devices itself. All of these
5188 * links are non-neighbours.
5190 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
) {
5191 list_for_each_entry(j
, &upper_dev
->all_adj_list
.upper
, list
) {
5192 pr_debug("Interlinking %s with %s, non-neighbour\n",
5193 i
->dev
->name
, j
->dev
->name
);
5194 ret
= __netdev_adjacent_dev_link(i
->dev
, j
->dev
);
5200 /* add dev to every upper_dev's upper device */
5201 list_for_each_entry(i
, &upper_dev
->all_adj_list
.upper
, list
) {
5202 pr_debug("linking %s's upper device %s with %s\n",
5203 upper_dev
->name
, i
->dev
->name
, dev
->name
);
5204 ret
= __netdev_adjacent_dev_link(dev
, i
->dev
);
5206 goto rollback_upper_mesh
;
5209 /* add upper_dev to every dev's lower device */
5210 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
) {
5211 pr_debug("linking %s's lower device %s with %s\n", dev
->name
,
5212 i
->dev
->name
, upper_dev
->name
);
5213 ret
= __netdev_adjacent_dev_link(i
->dev
, upper_dev
);
5215 goto rollback_lower_mesh
;
5218 call_netdevice_notifiers(NETDEV_CHANGEUPPER
, dev
);
5221 rollback_lower_mesh
:
5223 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
) {
5226 __netdev_adjacent_dev_unlink(i
->dev
, upper_dev
);
5231 rollback_upper_mesh
:
5233 list_for_each_entry(i
, &upper_dev
->all_adj_list
.upper
, list
) {
5236 __netdev_adjacent_dev_unlink(dev
, i
->dev
);
5244 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
) {
5245 list_for_each_entry(j
, &upper_dev
->all_adj_list
.upper
, list
) {
5246 if (i
== to_i
&& j
== to_j
)
5248 __netdev_adjacent_dev_unlink(i
->dev
, j
->dev
);
5254 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
5260 * netdev_upper_dev_link - Add a link to the upper device
5262 * @upper_dev: new upper device
5264 * Adds a link to device which is upper to this one. The caller must hold
5265 * the RTNL lock. On a failure a negative errno code is returned.
5266 * On success the reference counts are adjusted and the function
5269 int netdev_upper_dev_link(struct net_device
*dev
,
5270 struct net_device
*upper_dev
)
5272 return __netdev_upper_dev_link(dev
, upper_dev
, false, NULL
);
5274 EXPORT_SYMBOL(netdev_upper_dev_link
);
5277 * netdev_master_upper_dev_link - Add a master link to the upper device
5279 * @upper_dev: new upper device
5281 * Adds a link to device which is upper to this one. In this case, only
5282 * one master upper device can be linked, although other non-master devices
5283 * might be linked as well. The caller must hold the RTNL lock.
5284 * On a failure a negative errno code is returned. On success the reference
5285 * counts are adjusted and the function returns zero.
5287 int netdev_master_upper_dev_link(struct net_device
*dev
,
5288 struct net_device
*upper_dev
)
5290 return __netdev_upper_dev_link(dev
, upper_dev
, true, NULL
);
5292 EXPORT_SYMBOL(netdev_master_upper_dev_link
);
5294 int netdev_master_upper_dev_link_private(struct net_device
*dev
,
5295 struct net_device
*upper_dev
,
5298 return __netdev_upper_dev_link(dev
, upper_dev
, true, private);
5300 EXPORT_SYMBOL(netdev_master_upper_dev_link_private
);
5303 * netdev_upper_dev_unlink - Removes a link to upper device
5305 * @upper_dev: new upper device
5307 * Removes a link to device which is upper to this one. The caller must hold
5310 void netdev_upper_dev_unlink(struct net_device
*dev
,
5311 struct net_device
*upper_dev
)
5313 struct netdev_adjacent
*i
, *j
;
5316 __netdev_adjacent_dev_unlink_neighbour(dev
, upper_dev
);
5318 /* Here is the tricky part. We must remove all dev's lower
5319 * devices from all upper_dev's upper devices and vice
5320 * versa, to maintain the graph relationship.
5322 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
)
5323 list_for_each_entry(j
, &upper_dev
->all_adj_list
.upper
, list
)
5324 __netdev_adjacent_dev_unlink(i
->dev
, j
->dev
);
5326 /* remove also the devices itself from lower/upper device
5329 list_for_each_entry(i
, &dev
->all_adj_list
.lower
, list
)
5330 __netdev_adjacent_dev_unlink(i
->dev
, upper_dev
);
5332 list_for_each_entry(i
, &upper_dev
->all_adj_list
.upper
, list
)
5333 __netdev_adjacent_dev_unlink(dev
, i
->dev
);
5335 call_netdevice_notifiers(NETDEV_CHANGEUPPER
, dev
);
5337 EXPORT_SYMBOL(netdev_upper_dev_unlink
);
5340 * netdev_bonding_info_change - Dispatch event about slave change
5342 * @bonding_info: info to dispatch
5344 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5345 * The caller must hold the RTNL lock.
5347 void netdev_bonding_info_change(struct net_device
*dev
,
5348 struct netdev_bonding_info
*bonding_info
)
5350 struct netdev_notifier_bonding_info info
;
5352 memcpy(&info
.bonding_info
, bonding_info
,
5353 sizeof(struct netdev_bonding_info
));
5354 call_netdevice_notifiers_info(NETDEV_BONDING_INFO
, dev
,
5357 EXPORT_SYMBOL(netdev_bonding_info_change
);
5359 static void netdev_adjacent_add_links(struct net_device
*dev
)
5361 struct netdev_adjacent
*iter
;
5363 struct net
*net
= dev_net(dev
);
5365 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
5366 if (!net_eq(net
,dev_net(iter
->dev
)))
5368 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
5369 &iter
->dev
->adj_list
.lower
);
5370 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
5371 &dev
->adj_list
.upper
);
5374 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
5375 if (!net_eq(net
,dev_net(iter
->dev
)))
5377 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
5378 &iter
->dev
->adj_list
.upper
);
5379 netdev_adjacent_sysfs_add(dev
, iter
->dev
,
5380 &dev
->adj_list
.lower
);
5384 static void netdev_adjacent_del_links(struct net_device
*dev
)
5386 struct netdev_adjacent
*iter
;
5388 struct net
*net
= dev_net(dev
);
5390 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
5391 if (!net_eq(net
,dev_net(iter
->dev
)))
5393 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
5394 &iter
->dev
->adj_list
.lower
);
5395 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
5396 &dev
->adj_list
.upper
);
5399 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
5400 if (!net_eq(net
,dev_net(iter
->dev
)))
5402 netdev_adjacent_sysfs_del(iter
->dev
, dev
->name
,
5403 &iter
->dev
->adj_list
.upper
);
5404 netdev_adjacent_sysfs_del(dev
, iter
->dev
->name
,
5405 &dev
->adj_list
.lower
);
5409 void netdev_adjacent_rename_links(struct net_device
*dev
, char *oldname
)
5411 struct netdev_adjacent
*iter
;
5413 struct net
*net
= dev_net(dev
);
5415 list_for_each_entry(iter
, &dev
->adj_list
.upper
, list
) {
5416 if (!net_eq(net
,dev_net(iter
->dev
)))
5418 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
5419 &iter
->dev
->adj_list
.lower
);
5420 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
5421 &iter
->dev
->adj_list
.lower
);
5424 list_for_each_entry(iter
, &dev
->adj_list
.lower
, list
) {
5425 if (!net_eq(net
,dev_net(iter
->dev
)))
5427 netdev_adjacent_sysfs_del(iter
->dev
, oldname
,
5428 &iter
->dev
->adj_list
.upper
);
5429 netdev_adjacent_sysfs_add(iter
->dev
, dev
,
5430 &iter
->dev
->adj_list
.upper
);
5434 void *netdev_lower_dev_get_private(struct net_device
*dev
,
5435 struct net_device
*lower_dev
)
5437 struct netdev_adjacent
*lower
;
5441 lower
= __netdev_find_adj(dev
, lower_dev
, &dev
->adj_list
.lower
);
5445 return lower
->private;
5447 EXPORT_SYMBOL(netdev_lower_dev_get_private
);
5450 int dev_get_nest_level(struct net_device
*dev
,
5451 bool (*type_check
)(struct net_device
*dev
))
5453 struct net_device
*lower
= NULL
;
5454 struct list_head
*iter
;
5460 netdev_for_each_lower_dev(dev
, lower
, iter
) {
5461 nest
= dev_get_nest_level(lower
, type_check
);
5462 if (max_nest
< nest
)
5466 if (type_check(dev
))
5471 EXPORT_SYMBOL(dev_get_nest_level
);
5473 static void dev_change_rx_flags(struct net_device
*dev
, int flags
)
5475 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5477 if (ops
->ndo_change_rx_flags
)
5478 ops
->ndo_change_rx_flags(dev
, flags
);
5481 static int __dev_set_promiscuity(struct net_device
*dev
, int inc
, bool notify
)
5483 unsigned int old_flags
= dev
->flags
;
5489 dev
->flags
|= IFF_PROMISC
;
5490 dev
->promiscuity
+= inc
;
5491 if (dev
->promiscuity
== 0) {
5494 * If inc causes overflow, untouch promisc and return error.
5497 dev
->flags
&= ~IFF_PROMISC
;
5499 dev
->promiscuity
-= inc
;
5500 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5505 if (dev
->flags
!= old_flags
) {
5506 pr_info("device %s %s promiscuous mode\n",
5508 dev
->flags
& IFF_PROMISC
? "entered" : "left");
5509 if (audit_enabled
) {
5510 current_uid_gid(&uid
, &gid
);
5511 audit_log(current
->audit_context
, GFP_ATOMIC
,
5512 AUDIT_ANOM_PROMISCUOUS
,
5513 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5514 dev
->name
, (dev
->flags
& IFF_PROMISC
),
5515 (old_flags
& IFF_PROMISC
),
5516 from_kuid(&init_user_ns
, audit_get_loginuid(current
)),
5517 from_kuid(&init_user_ns
, uid
),
5518 from_kgid(&init_user_ns
, gid
),
5519 audit_get_sessionid(current
));
5522 dev_change_rx_flags(dev
, IFF_PROMISC
);
5525 __dev_notify_flags(dev
, old_flags
, IFF_PROMISC
);
5530 * dev_set_promiscuity - update promiscuity count on a device
5534 * Add or remove promiscuity from a device. While the count in the device
5535 * remains above zero the interface remains promiscuous. Once it hits zero
5536 * the device reverts back to normal filtering operation. A negative inc
5537 * value is used to drop promiscuity on the device.
5538 * Return 0 if successful or a negative errno code on error.
5540 int dev_set_promiscuity(struct net_device
*dev
, int inc
)
5542 unsigned int old_flags
= dev
->flags
;
5545 err
= __dev_set_promiscuity(dev
, inc
, true);
5548 if (dev
->flags
!= old_flags
)
5549 dev_set_rx_mode(dev
);
5552 EXPORT_SYMBOL(dev_set_promiscuity
);
5554 static int __dev_set_allmulti(struct net_device
*dev
, int inc
, bool notify
)
5556 unsigned int old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
5560 dev
->flags
|= IFF_ALLMULTI
;
5561 dev
->allmulti
+= inc
;
5562 if (dev
->allmulti
== 0) {
5565 * If inc causes overflow, untouch allmulti and return error.
5568 dev
->flags
&= ~IFF_ALLMULTI
;
5570 dev
->allmulti
-= inc
;
5571 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5576 if (dev
->flags
^ old_flags
) {
5577 dev_change_rx_flags(dev
, IFF_ALLMULTI
);
5578 dev_set_rx_mode(dev
);
5580 __dev_notify_flags(dev
, old_flags
,
5581 dev
->gflags
^ old_gflags
);
5587 * dev_set_allmulti - update allmulti count on a device
5591 * Add or remove reception of all multicast frames to a device. While the
5592 * count in the device remains above zero the interface remains listening
5593 * to all interfaces. Once it hits zero the device reverts back to normal
5594 * filtering operation. A negative @inc value is used to drop the counter
5595 * when releasing a resource needing all multicasts.
5596 * Return 0 if successful or a negative errno code on error.
5599 int dev_set_allmulti(struct net_device
*dev
, int inc
)
5601 return __dev_set_allmulti(dev
, inc
, true);
5603 EXPORT_SYMBOL(dev_set_allmulti
);
5606 * Upload unicast and multicast address lists to device and
5607 * configure RX filtering. When the device doesn't support unicast
5608 * filtering it is put in promiscuous mode while unicast addresses
5611 void __dev_set_rx_mode(struct net_device
*dev
)
5613 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5615 /* dev_open will call this function so the list will stay sane. */
5616 if (!(dev
->flags
&IFF_UP
))
5619 if (!netif_device_present(dev
))
5622 if (!(dev
->priv_flags
& IFF_UNICAST_FLT
)) {
5623 /* Unicast addresses changes may only happen under the rtnl,
5624 * therefore calling __dev_set_promiscuity here is safe.
5626 if (!netdev_uc_empty(dev
) && !dev
->uc_promisc
) {
5627 __dev_set_promiscuity(dev
, 1, false);
5628 dev
->uc_promisc
= true;
5629 } else if (netdev_uc_empty(dev
) && dev
->uc_promisc
) {
5630 __dev_set_promiscuity(dev
, -1, false);
5631 dev
->uc_promisc
= false;
5635 if (ops
->ndo_set_rx_mode
)
5636 ops
->ndo_set_rx_mode(dev
);
5639 void dev_set_rx_mode(struct net_device
*dev
)
5641 netif_addr_lock_bh(dev
);
5642 __dev_set_rx_mode(dev
);
5643 netif_addr_unlock_bh(dev
);
5647 * dev_get_flags - get flags reported to userspace
5650 * Get the combination of flag bits exported through APIs to userspace.
5652 unsigned int dev_get_flags(const struct net_device
*dev
)
5656 flags
= (dev
->flags
& ~(IFF_PROMISC
|
5661 (dev
->gflags
& (IFF_PROMISC
|
5664 if (netif_running(dev
)) {
5665 if (netif_oper_up(dev
))
5666 flags
|= IFF_RUNNING
;
5667 if (netif_carrier_ok(dev
))
5668 flags
|= IFF_LOWER_UP
;
5669 if (netif_dormant(dev
))
5670 flags
|= IFF_DORMANT
;
5675 EXPORT_SYMBOL(dev_get_flags
);
5677 int __dev_change_flags(struct net_device
*dev
, unsigned int flags
)
5679 unsigned int old_flags
= dev
->flags
;
5685 * Set the flags on our device.
5688 dev
->flags
= (flags
& (IFF_DEBUG
| IFF_NOTRAILERS
| IFF_NOARP
|
5689 IFF_DYNAMIC
| IFF_MULTICAST
| IFF_PORTSEL
|
5691 (dev
->flags
& (IFF_UP
| IFF_VOLATILE
| IFF_PROMISC
|
5695 * Load in the correct multicast list now the flags have changed.
5698 if ((old_flags
^ flags
) & IFF_MULTICAST
)
5699 dev_change_rx_flags(dev
, IFF_MULTICAST
);
5701 dev_set_rx_mode(dev
);
5704 * Have we downed the interface. We handle IFF_UP ourselves
5705 * according to user attempts to set it, rather than blindly
5710 if ((old_flags
^ flags
) & IFF_UP
)
5711 ret
= ((old_flags
& IFF_UP
) ? __dev_close
: __dev_open
)(dev
);
5713 if ((flags
^ dev
->gflags
) & IFF_PROMISC
) {
5714 int inc
= (flags
& IFF_PROMISC
) ? 1 : -1;
5715 unsigned int old_flags
= dev
->flags
;
5717 dev
->gflags
^= IFF_PROMISC
;
5719 if (__dev_set_promiscuity(dev
, inc
, false) >= 0)
5720 if (dev
->flags
!= old_flags
)
5721 dev_set_rx_mode(dev
);
5724 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5725 is important. Some (broken) drivers set IFF_PROMISC, when
5726 IFF_ALLMULTI is requested not asking us and not reporting.
5728 if ((flags
^ dev
->gflags
) & IFF_ALLMULTI
) {
5729 int inc
= (flags
& IFF_ALLMULTI
) ? 1 : -1;
5731 dev
->gflags
^= IFF_ALLMULTI
;
5732 __dev_set_allmulti(dev
, inc
, false);
5738 void __dev_notify_flags(struct net_device
*dev
, unsigned int old_flags
,
5739 unsigned int gchanges
)
5741 unsigned int changes
= dev
->flags
^ old_flags
;
5744 rtmsg_ifinfo(RTM_NEWLINK
, dev
, gchanges
, GFP_ATOMIC
);
5746 if (changes
& IFF_UP
) {
5747 if (dev
->flags
& IFF_UP
)
5748 call_netdevice_notifiers(NETDEV_UP
, dev
);
5750 call_netdevice_notifiers(NETDEV_DOWN
, dev
);
5753 if (dev
->flags
& IFF_UP
&&
5754 (changes
& ~(IFF_UP
| IFF_PROMISC
| IFF_ALLMULTI
| IFF_VOLATILE
))) {
5755 struct netdev_notifier_change_info change_info
;
5757 change_info
.flags_changed
= changes
;
5758 call_netdevice_notifiers_info(NETDEV_CHANGE
, dev
,
5764 * dev_change_flags - change device settings
5766 * @flags: device state flags
5768 * Change settings on device based state flags. The flags are
5769 * in the userspace exported format.
5771 int dev_change_flags(struct net_device
*dev
, unsigned int flags
)
5774 unsigned int changes
, old_flags
= dev
->flags
, old_gflags
= dev
->gflags
;
5776 ret
= __dev_change_flags(dev
, flags
);
5780 changes
= (old_flags
^ dev
->flags
) | (old_gflags
^ dev
->gflags
);
5781 __dev_notify_flags(dev
, old_flags
, changes
);
5784 EXPORT_SYMBOL(dev_change_flags
);
5786 static int __dev_set_mtu(struct net_device
*dev
, int new_mtu
)
5788 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5790 if (ops
->ndo_change_mtu
)
5791 return ops
->ndo_change_mtu(dev
, new_mtu
);
5798 * dev_set_mtu - Change maximum transfer unit
5800 * @new_mtu: new transfer unit
5802 * Change the maximum transfer size of the network device.
5804 int dev_set_mtu(struct net_device
*dev
, int new_mtu
)
5808 if (new_mtu
== dev
->mtu
)
5811 /* MTU must be positive. */
5815 if (!netif_device_present(dev
))
5818 err
= call_netdevice_notifiers(NETDEV_PRECHANGEMTU
, dev
);
5819 err
= notifier_to_errno(err
);
5823 orig_mtu
= dev
->mtu
;
5824 err
= __dev_set_mtu(dev
, new_mtu
);
5827 err
= call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
5828 err
= notifier_to_errno(err
);
5830 /* setting mtu back and notifying everyone again,
5831 * so that they have a chance to revert changes.
5833 __dev_set_mtu(dev
, orig_mtu
);
5834 call_netdevice_notifiers(NETDEV_CHANGEMTU
, dev
);
5839 EXPORT_SYMBOL(dev_set_mtu
);
5842 * dev_set_group - Change group this device belongs to
5844 * @new_group: group this device should belong to
5846 void dev_set_group(struct net_device
*dev
, int new_group
)
5848 dev
->group
= new_group
;
5850 EXPORT_SYMBOL(dev_set_group
);
5853 * dev_set_mac_address - Change Media Access Control Address
5857 * Change the hardware (MAC) address of the device
5859 int dev_set_mac_address(struct net_device
*dev
, struct sockaddr
*sa
)
5861 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5864 if (!ops
->ndo_set_mac_address
)
5866 if (sa
->sa_family
!= dev
->type
)
5868 if (!netif_device_present(dev
))
5870 err
= ops
->ndo_set_mac_address(dev
, sa
);
5873 dev
->addr_assign_type
= NET_ADDR_SET
;
5874 call_netdevice_notifiers(NETDEV_CHANGEADDR
, dev
);
5875 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
5878 EXPORT_SYMBOL(dev_set_mac_address
);
5881 * dev_change_carrier - Change device carrier
5883 * @new_carrier: new value
5885 * Change device carrier
5887 int dev_change_carrier(struct net_device
*dev
, bool new_carrier
)
5889 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5891 if (!ops
->ndo_change_carrier
)
5893 if (!netif_device_present(dev
))
5895 return ops
->ndo_change_carrier(dev
, new_carrier
);
5897 EXPORT_SYMBOL(dev_change_carrier
);
5900 * dev_get_phys_port_id - Get device physical port ID
5904 * Get device physical port ID
5906 int dev_get_phys_port_id(struct net_device
*dev
,
5907 struct netdev_phys_item_id
*ppid
)
5909 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5911 if (!ops
->ndo_get_phys_port_id
)
5913 return ops
->ndo_get_phys_port_id(dev
, ppid
);
5915 EXPORT_SYMBOL(dev_get_phys_port_id
);
5918 * dev_get_phys_port_name - Get device physical port name
5922 * Get device physical port name
5924 int dev_get_phys_port_name(struct net_device
*dev
,
5925 char *name
, size_t len
)
5927 const struct net_device_ops
*ops
= dev
->netdev_ops
;
5929 if (!ops
->ndo_get_phys_port_name
)
5931 return ops
->ndo_get_phys_port_name(dev
, name
, len
);
5933 EXPORT_SYMBOL(dev_get_phys_port_name
);
5936 * dev_new_index - allocate an ifindex
5937 * @net: the applicable net namespace
5939 * Returns a suitable unique value for a new device interface
5940 * number. The caller must hold the rtnl semaphore or the
5941 * dev_base_lock to be sure it remains unique.
5943 static int dev_new_index(struct net
*net
)
5945 int ifindex
= net
->ifindex
;
5949 if (!__dev_get_by_index(net
, ifindex
))
5950 return net
->ifindex
= ifindex
;
5954 /* Delayed registration/unregisteration */
5955 static LIST_HEAD(net_todo_list
);
5956 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq
);
5958 static void net_set_todo(struct net_device
*dev
)
5960 list_add_tail(&dev
->todo_list
, &net_todo_list
);
5961 dev_net(dev
)->dev_unreg_count
++;
5964 static void rollback_registered_many(struct list_head
*head
)
5966 struct net_device
*dev
, *tmp
;
5967 LIST_HEAD(close_head
);
5969 BUG_ON(dev_boot_phase
);
5972 list_for_each_entry_safe(dev
, tmp
, head
, unreg_list
) {
5973 /* Some devices call without registering
5974 * for initialization unwind. Remove those
5975 * devices and proceed with the remaining.
5977 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
5978 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5982 list_del(&dev
->unreg_list
);
5985 dev
->dismantle
= true;
5986 BUG_ON(dev
->reg_state
!= NETREG_REGISTERED
);
5989 /* If device is running, close it first. */
5990 list_for_each_entry(dev
, head
, unreg_list
)
5991 list_add_tail(&dev
->close_list
, &close_head
);
5992 dev_close_many(&close_head
, true);
5994 list_for_each_entry(dev
, head
, unreg_list
) {
5995 /* And unlink it from device chain. */
5996 unlist_netdevice(dev
);
5998 dev
->reg_state
= NETREG_UNREGISTERING
;
6003 list_for_each_entry(dev
, head
, unreg_list
) {
6004 struct sk_buff
*skb
= NULL
;
6006 /* Shutdown queueing discipline. */
6010 /* Notify protocols, that we are about to destroy
6011 this device. They should clean all the things.
6013 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
6015 if (!dev
->rtnl_link_ops
||
6016 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
6017 skb
= rtmsg_ifinfo_build_skb(RTM_DELLINK
, dev
, ~0U,
6021 * Flush the unicast and multicast chains
6026 if (dev
->netdev_ops
->ndo_uninit
)
6027 dev
->netdev_ops
->ndo_uninit(dev
);
6030 rtmsg_ifinfo_send(skb
, dev
, GFP_KERNEL
);
6032 /* Notifier chain MUST detach us all upper devices. */
6033 WARN_ON(netdev_has_any_upper_dev(dev
));
6035 /* Remove entries from kobject tree */
6036 netdev_unregister_kobject(dev
);
6038 /* Remove XPS queueing entries */
6039 netif_reset_xps_queues_gt(dev
, 0);
6045 list_for_each_entry(dev
, head
, unreg_list
)
6049 static void rollback_registered(struct net_device
*dev
)
6053 list_add(&dev
->unreg_list
, &single
);
6054 rollback_registered_many(&single
);
6058 static netdev_features_t
netdev_fix_features(struct net_device
*dev
,
6059 netdev_features_t features
)
6061 /* Fix illegal checksum combinations */
6062 if ((features
& NETIF_F_HW_CSUM
) &&
6063 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
6064 netdev_warn(dev
, "mixed HW and IP checksum settings.\n");
6065 features
&= ~(NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
);
6068 /* TSO requires that SG is present as well. */
6069 if ((features
& NETIF_F_ALL_TSO
) && !(features
& NETIF_F_SG
)) {
6070 netdev_dbg(dev
, "Dropping TSO features since no SG feature.\n");
6071 features
&= ~NETIF_F_ALL_TSO
;
6074 if ((features
& NETIF_F_TSO
) && !(features
& NETIF_F_HW_CSUM
) &&
6075 !(features
& NETIF_F_IP_CSUM
)) {
6076 netdev_dbg(dev
, "Dropping TSO features since no CSUM feature.\n");
6077 features
&= ~NETIF_F_TSO
;
6078 features
&= ~NETIF_F_TSO_ECN
;
6081 if ((features
& NETIF_F_TSO6
) && !(features
& NETIF_F_HW_CSUM
) &&
6082 !(features
& NETIF_F_IPV6_CSUM
)) {
6083 netdev_dbg(dev
, "Dropping TSO6 features since no CSUM feature.\n");
6084 features
&= ~NETIF_F_TSO6
;
6087 /* TSO ECN requires that TSO is present as well. */
6088 if ((features
& NETIF_F_ALL_TSO
) == NETIF_F_TSO_ECN
)
6089 features
&= ~NETIF_F_TSO_ECN
;
6091 /* Software GSO depends on SG. */
6092 if ((features
& NETIF_F_GSO
) && !(features
& NETIF_F_SG
)) {
6093 netdev_dbg(dev
, "Dropping NETIF_F_GSO since no SG feature.\n");
6094 features
&= ~NETIF_F_GSO
;
6097 /* UFO needs SG and checksumming */
6098 if (features
& NETIF_F_UFO
) {
6099 /* maybe split UFO into V4 and V6? */
6100 if (!((features
& NETIF_F_GEN_CSUM
) ||
6101 (features
& (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))
6102 == (NETIF_F_IP_CSUM
|NETIF_F_IPV6_CSUM
))) {
6104 "Dropping NETIF_F_UFO since no checksum offload features.\n");
6105 features
&= ~NETIF_F_UFO
;
6108 if (!(features
& NETIF_F_SG
)) {
6110 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
6111 features
&= ~NETIF_F_UFO
;
6115 #ifdef CONFIG_NET_RX_BUSY_POLL
6116 if (dev
->netdev_ops
->ndo_busy_poll
)
6117 features
|= NETIF_F_BUSY_POLL
;
6120 features
&= ~NETIF_F_BUSY_POLL
;
6125 int __netdev_update_features(struct net_device
*dev
)
6127 netdev_features_t features
;
6132 features
= netdev_get_wanted_features(dev
);
6134 if (dev
->netdev_ops
->ndo_fix_features
)
6135 features
= dev
->netdev_ops
->ndo_fix_features(dev
, features
);
6137 /* driver might be less strict about feature dependencies */
6138 features
= netdev_fix_features(dev
, features
);
6140 if (dev
->features
== features
)
6143 netdev_dbg(dev
, "Features changed: %pNF -> %pNF\n",
6144 &dev
->features
, &features
);
6146 if (dev
->netdev_ops
->ndo_set_features
)
6147 err
= dev
->netdev_ops
->ndo_set_features(dev
, features
);
6149 if (unlikely(err
< 0)) {
6151 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6152 err
, &features
, &dev
->features
);
6157 dev
->features
= features
;
6163 * netdev_update_features - recalculate device features
6164 * @dev: the device to check
6166 * Recalculate dev->features set and send notifications if it
6167 * has changed. Should be called after driver or hardware dependent
6168 * conditions might have changed that influence the features.
6170 void netdev_update_features(struct net_device
*dev
)
6172 if (__netdev_update_features(dev
))
6173 netdev_features_change(dev
);
6175 EXPORT_SYMBOL(netdev_update_features
);
6178 * netdev_change_features - recalculate device features
6179 * @dev: the device to check
6181 * Recalculate dev->features set and send notifications even
6182 * if they have not changed. Should be called instead of
6183 * netdev_update_features() if also dev->vlan_features might
6184 * have changed to allow the changes to be propagated to stacked
6187 void netdev_change_features(struct net_device
*dev
)
6189 __netdev_update_features(dev
);
6190 netdev_features_change(dev
);
6192 EXPORT_SYMBOL(netdev_change_features
);
6195 * netif_stacked_transfer_operstate - transfer operstate
6196 * @rootdev: the root or lower level device to transfer state from
6197 * @dev: the device to transfer operstate to
6199 * Transfer operational state from root to device. This is normally
6200 * called when a stacking relationship exists between the root
6201 * device and the device(a leaf device).
6203 void netif_stacked_transfer_operstate(const struct net_device
*rootdev
,
6204 struct net_device
*dev
)
6206 if (rootdev
->operstate
== IF_OPER_DORMANT
)
6207 netif_dormant_on(dev
);
6209 netif_dormant_off(dev
);
6211 if (netif_carrier_ok(rootdev
)) {
6212 if (!netif_carrier_ok(dev
))
6213 netif_carrier_on(dev
);
6215 if (netif_carrier_ok(dev
))
6216 netif_carrier_off(dev
);
6219 EXPORT_SYMBOL(netif_stacked_transfer_operstate
);
6222 static int netif_alloc_rx_queues(struct net_device
*dev
)
6224 unsigned int i
, count
= dev
->num_rx_queues
;
6225 struct netdev_rx_queue
*rx
;
6226 size_t sz
= count
* sizeof(*rx
);
6230 rx
= kzalloc(sz
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
6238 for (i
= 0; i
< count
; i
++)
6244 static void netdev_init_one_queue(struct net_device
*dev
,
6245 struct netdev_queue
*queue
, void *_unused
)
6247 /* Initialize queue lock */
6248 spin_lock_init(&queue
->_xmit_lock
);
6249 netdev_set_xmit_lockdep_class(&queue
->_xmit_lock
, dev
->type
);
6250 queue
->xmit_lock_owner
= -1;
6251 netdev_queue_numa_node_write(queue
, NUMA_NO_NODE
);
6254 dql_init(&queue
->dql
, HZ
);
6258 static void netif_free_tx_queues(struct net_device
*dev
)
6263 static int netif_alloc_netdev_queues(struct net_device
*dev
)
6265 unsigned int count
= dev
->num_tx_queues
;
6266 struct netdev_queue
*tx
;
6267 size_t sz
= count
* sizeof(*tx
);
6269 BUG_ON(count
< 1 || count
> 0xffff);
6271 tx
= kzalloc(sz
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
6279 netdev_for_each_tx_queue(dev
, netdev_init_one_queue
, NULL
);
6280 spin_lock_init(&dev
->tx_global_lock
);
6286 * register_netdevice - register a network device
6287 * @dev: device to register
6289 * Take a completed network device structure and add it to the kernel
6290 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6291 * chain. 0 is returned on success. A negative errno code is returned
6292 * on a failure to set up the device, or if the name is a duplicate.
6294 * Callers must hold the rtnl semaphore. You may want
6295 * register_netdev() instead of this.
6298 * The locking appears insufficient to guarantee two parallel registers
6299 * will not get the same name.
6302 int register_netdevice(struct net_device
*dev
)
6305 struct net
*net
= dev_net(dev
);
6307 BUG_ON(dev_boot_phase
);
6312 /* When net_device's are persistent, this will be fatal. */
6313 BUG_ON(dev
->reg_state
!= NETREG_UNINITIALIZED
);
6316 spin_lock_init(&dev
->addr_list_lock
);
6317 netdev_set_addr_lockdep_class(dev
);
6321 ret
= dev_get_valid_name(net
, dev
, dev
->name
);
6325 /* Init, if this function is available */
6326 if (dev
->netdev_ops
->ndo_init
) {
6327 ret
= dev
->netdev_ops
->ndo_init(dev
);
6335 if (((dev
->hw_features
| dev
->features
) &
6336 NETIF_F_HW_VLAN_CTAG_FILTER
) &&
6337 (!dev
->netdev_ops
->ndo_vlan_rx_add_vid
||
6338 !dev
->netdev_ops
->ndo_vlan_rx_kill_vid
)) {
6339 netdev_WARN(dev
, "Buggy VLAN acceleration in driver!\n");
6346 dev
->ifindex
= dev_new_index(net
);
6347 else if (__dev_get_by_index(net
, dev
->ifindex
))
6350 if (dev
->iflink
== -1)
6351 dev
->iflink
= dev
->ifindex
;
6353 /* Transfer changeable features to wanted_features and enable
6354 * software offloads (GSO and GRO).
6356 dev
->hw_features
|= NETIF_F_SOFT_FEATURES
;
6357 dev
->features
|= NETIF_F_SOFT_FEATURES
;
6358 dev
->wanted_features
= dev
->features
& dev
->hw_features
;
6360 if (!(dev
->flags
& IFF_LOOPBACK
)) {
6361 dev
->hw_features
|= NETIF_F_NOCACHE_COPY
;
6364 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
6366 dev
->vlan_features
|= NETIF_F_HIGHDMA
;
6368 /* Make NETIF_F_SG inheritable to tunnel devices.
6370 dev
->hw_enc_features
|= NETIF_F_SG
;
6372 /* Make NETIF_F_SG inheritable to MPLS.
6374 dev
->mpls_features
|= NETIF_F_SG
;
6376 ret
= call_netdevice_notifiers(NETDEV_POST_INIT
, dev
);
6377 ret
= notifier_to_errno(ret
);
6381 ret
= netdev_register_kobject(dev
);
6384 dev
->reg_state
= NETREG_REGISTERED
;
6386 __netdev_update_features(dev
);
6389 * Default initial state at registry is that the
6390 * device is present.
6393 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
6395 linkwatch_init_dev(dev
);
6397 dev_init_scheduler(dev
);
6399 list_netdevice(dev
);
6400 add_device_randomness(dev
->dev_addr
, dev
->addr_len
);
6402 /* If the device has permanent device address, driver should
6403 * set dev_addr and also addr_assign_type should be set to
6404 * NET_ADDR_PERM (default value).
6406 if (dev
->addr_assign_type
== NET_ADDR_PERM
)
6407 memcpy(dev
->perm_addr
, dev
->dev_addr
, dev
->addr_len
);
6409 /* Notify protocols, that a new device appeared. */
6410 ret
= call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
6411 ret
= notifier_to_errno(ret
);
6413 rollback_registered(dev
);
6414 dev
->reg_state
= NETREG_UNREGISTERED
;
6417 * Prevent userspace races by waiting until the network
6418 * device is fully setup before sending notifications.
6420 if (!dev
->rtnl_link_ops
||
6421 dev
->rtnl_link_state
== RTNL_LINK_INITIALIZED
)
6422 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
6428 if (dev
->netdev_ops
->ndo_uninit
)
6429 dev
->netdev_ops
->ndo_uninit(dev
);
6432 EXPORT_SYMBOL(register_netdevice
);
6435 * init_dummy_netdev - init a dummy network device for NAPI
6436 * @dev: device to init
6438 * This takes a network device structure and initialize the minimum
6439 * amount of fields so it can be used to schedule NAPI polls without
6440 * registering a full blown interface. This is to be used by drivers
6441 * that need to tie several hardware interfaces to a single NAPI
6442 * poll scheduler due to HW limitations.
6444 int init_dummy_netdev(struct net_device
*dev
)
6446 /* Clear everything. Note we don't initialize spinlocks
6447 * are they aren't supposed to be taken by any of the
6448 * NAPI code and this dummy netdev is supposed to be
6449 * only ever used for NAPI polls
6451 memset(dev
, 0, sizeof(struct net_device
));
6453 /* make sure we BUG if trying to hit standard
6454 * register/unregister code path
6456 dev
->reg_state
= NETREG_DUMMY
;
6458 /* NAPI wants this */
6459 INIT_LIST_HEAD(&dev
->napi_list
);
6461 /* a dummy interface is started by default */
6462 set_bit(__LINK_STATE_PRESENT
, &dev
->state
);
6463 set_bit(__LINK_STATE_START
, &dev
->state
);
6465 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6466 * because users of this 'device' dont need to change
6472 EXPORT_SYMBOL_GPL(init_dummy_netdev
);
6476 * register_netdev - register a network device
6477 * @dev: device to register
6479 * Take a completed network device structure and add it to the kernel
6480 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6481 * chain. 0 is returned on success. A negative errno code is returned
6482 * on a failure to set up the device, or if the name is a duplicate.
6484 * This is a wrapper around register_netdevice that takes the rtnl semaphore
6485 * and expands the device name if you passed a format string to
6488 int register_netdev(struct net_device
*dev
)
6493 err
= register_netdevice(dev
);
6497 EXPORT_SYMBOL(register_netdev
);
6499 int netdev_refcnt_read(const struct net_device
*dev
)
6503 for_each_possible_cpu(i
)
6504 refcnt
+= *per_cpu_ptr(dev
->pcpu_refcnt
, i
);
6507 EXPORT_SYMBOL(netdev_refcnt_read
);
6510 * netdev_wait_allrefs - wait until all references are gone.
6511 * @dev: target net_device
6513 * This is called when unregistering network devices.
6515 * Any protocol or device that holds a reference should register
6516 * for netdevice notification, and cleanup and put back the
6517 * reference if they receive an UNREGISTER event.
6518 * We can get stuck here if buggy protocols don't correctly
6521 static void netdev_wait_allrefs(struct net_device
*dev
)
6523 unsigned long rebroadcast_time
, warning_time
;
6526 linkwatch_forget_dev(dev
);
6528 rebroadcast_time
= warning_time
= jiffies
;
6529 refcnt
= netdev_refcnt_read(dev
);
6531 while (refcnt
!= 0) {
6532 if (time_after(jiffies
, rebroadcast_time
+ 1 * HZ
)) {
6535 /* Rebroadcast unregister notification */
6536 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
6542 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
6543 if (test_bit(__LINK_STATE_LINKWATCH_PENDING
,
6545 /* We must not have linkwatch events
6546 * pending on unregister. If this
6547 * happens, we simply run the queue
6548 * unscheduled, resulting in a noop
6551 linkwatch_run_queue();
6556 rebroadcast_time
= jiffies
;
6561 refcnt
= netdev_refcnt_read(dev
);
6563 if (time_after(jiffies
, warning_time
+ 10 * HZ
)) {
6564 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6566 warning_time
= jiffies
;
6575 * register_netdevice(x1);
6576 * register_netdevice(x2);
6578 * unregister_netdevice(y1);
6579 * unregister_netdevice(y2);
6585 * We are invoked by rtnl_unlock().
6586 * This allows us to deal with problems:
6587 * 1) We can delete sysfs objects which invoke hotplug
6588 * without deadlocking with linkwatch via keventd.
6589 * 2) Since we run with the RTNL semaphore not held, we can sleep
6590 * safely in order to wait for the netdev refcnt to drop to zero.
6592 * We must not return until all unregister events added during
6593 * the interval the lock was held have been completed.
6595 void netdev_run_todo(void)
6597 struct list_head list
;
6599 /* Snapshot list, allow later requests */
6600 list_replace_init(&net_todo_list
, &list
);
6605 /* Wait for rcu callbacks to finish before next phase */
6606 if (!list_empty(&list
))
6609 while (!list_empty(&list
)) {
6610 struct net_device
*dev
6611 = list_first_entry(&list
, struct net_device
, todo_list
);
6612 list_del(&dev
->todo_list
);
6615 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
6618 if (unlikely(dev
->reg_state
!= NETREG_UNREGISTERING
)) {
6619 pr_err("network todo '%s' but state %d\n",
6620 dev
->name
, dev
->reg_state
);
6625 dev
->reg_state
= NETREG_UNREGISTERED
;
6627 on_each_cpu(flush_backlog
, dev
, 1);
6629 netdev_wait_allrefs(dev
);
6632 BUG_ON(netdev_refcnt_read(dev
));
6633 BUG_ON(!list_empty(&dev
->ptype_all
));
6634 BUG_ON(!list_empty(&dev
->ptype_specific
));
6635 WARN_ON(rcu_access_pointer(dev
->ip_ptr
));
6636 WARN_ON(rcu_access_pointer(dev
->ip6_ptr
));
6637 WARN_ON(dev
->dn_ptr
);
6639 if (dev
->destructor
)
6640 dev
->destructor(dev
);
6642 /* Report a network device has been unregistered */
6644 dev_net(dev
)->dev_unreg_count
--;
6646 wake_up(&netdev_unregistering_wq
);
6648 /* Free network device */
6649 kobject_put(&dev
->dev
.kobj
);
6653 /* Convert net_device_stats to rtnl_link_stats64. They have the same
6654 * fields in the same order, with only the type differing.
6656 void netdev_stats_to_stats64(struct rtnl_link_stats64
*stats64
,
6657 const struct net_device_stats
*netdev_stats
)
6659 #if BITS_PER_LONG == 64
6660 BUILD_BUG_ON(sizeof(*stats64
) != sizeof(*netdev_stats
));
6661 memcpy(stats64
, netdev_stats
, sizeof(*stats64
));
6663 size_t i
, n
= sizeof(*stats64
) / sizeof(u64
);
6664 const unsigned long *src
= (const unsigned long *)netdev_stats
;
6665 u64
*dst
= (u64
*)stats64
;
6667 BUILD_BUG_ON(sizeof(*netdev_stats
) / sizeof(unsigned long) !=
6668 sizeof(*stats64
) / sizeof(u64
));
6669 for (i
= 0; i
< n
; i
++)
6673 EXPORT_SYMBOL(netdev_stats_to_stats64
);
6676 * dev_get_stats - get network device statistics
6677 * @dev: device to get statistics from
6678 * @storage: place to store stats
6680 * Get network statistics from device. Return @storage.
6681 * The device driver may provide its own method by setting
6682 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6683 * otherwise the internal statistics structure is used.
6685 struct rtnl_link_stats64
*dev_get_stats(struct net_device
*dev
,
6686 struct rtnl_link_stats64
*storage
)
6688 const struct net_device_ops
*ops
= dev
->netdev_ops
;
6690 if (ops
->ndo_get_stats64
) {
6691 memset(storage
, 0, sizeof(*storage
));
6692 ops
->ndo_get_stats64(dev
, storage
);
6693 } else if (ops
->ndo_get_stats
) {
6694 netdev_stats_to_stats64(storage
, ops
->ndo_get_stats(dev
));
6696 netdev_stats_to_stats64(storage
, &dev
->stats
);
6698 storage
->rx_dropped
+= atomic_long_read(&dev
->rx_dropped
);
6699 storage
->tx_dropped
+= atomic_long_read(&dev
->tx_dropped
);
6702 EXPORT_SYMBOL(dev_get_stats
);
6704 struct netdev_queue
*dev_ingress_queue_create(struct net_device
*dev
)
6706 struct netdev_queue
*queue
= dev_ingress_queue(dev
);
6708 #ifdef CONFIG_NET_CLS_ACT
6711 queue
= kzalloc(sizeof(*queue
), GFP_KERNEL
);
6714 netdev_init_one_queue(dev
, queue
, NULL
);
6715 RCU_INIT_POINTER(queue
->qdisc
, &noop_qdisc
);
6716 queue
->qdisc_sleeping
= &noop_qdisc
;
6717 rcu_assign_pointer(dev
->ingress_queue
, queue
);
6722 static const struct ethtool_ops default_ethtool_ops
;
6724 void netdev_set_default_ethtool_ops(struct net_device
*dev
,
6725 const struct ethtool_ops
*ops
)
6727 if (dev
->ethtool_ops
== &default_ethtool_ops
)
6728 dev
->ethtool_ops
= ops
;
6730 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops
);
6732 void netdev_freemem(struct net_device
*dev
)
6734 char *addr
= (char *)dev
- dev
->padded
;
6740 * alloc_netdev_mqs - allocate network device
6741 * @sizeof_priv: size of private data to allocate space for
6742 * @name: device name format string
6743 * @name_assign_type: origin of device name
6744 * @setup: callback to initialize device
6745 * @txqs: the number of TX subqueues to allocate
6746 * @rxqs: the number of RX subqueues to allocate
6748 * Allocates a struct net_device with private data area for driver use
6749 * and performs basic initialization. Also allocates subqueue structs
6750 * for each queue on the device.
6752 struct net_device
*alloc_netdev_mqs(int sizeof_priv
, const char *name
,
6753 unsigned char name_assign_type
,
6754 void (*setup
)(struct net_device
*),
6755 unsigned int txqs
, unsigned int rxqs
)
6757 struct net_device
*dev
;
6759 struct net_device
*p
;
6761 BUG_ON(strlen(name
) >= sizeof(dev
->name
));
6764 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6770 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6775 alloc_size
= sizeof(struct net_device
);
6777 /* ensure 32-byte alignment of private area */
6778 alloc_size
= ALIGN(alloc_size
, NETDEV_ALIGN
);
6779 alloc_size
+= sizeof_priv
;
6781 /* ensure 32-byte alignment of whole construct */
6782 alloc_size
+= NETDEV_ALIGN
- 1;
6784 p
= kzalloc(alloc_size
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
6786 p
= vzalloc(alloc_size
);
6790 dev
= PTR_ALIGN(p
, NETDEV_ALIGN
);
6791 dev
->padded
= (char *)dev
- (char *)p
;
6793 dev
->pcpu_refcnt
= alloc_percpu(int);
6794 if (!dev
->pcpu_refcnt
)
6797 if (dev_addr_init(dev
))
6803 dev_net_set(dev
, &init_net
);
6805 dev
->gso_max_size
= GSO_MAX_SIZE
;
6806 dev
->gso_max_segs
= GSO_MAX_SEGS
;
6807 dev
->gso_min_segs
= 0;
6809 INIT_LIST_HEAD(&dev
->napi_list
);
6810 INIT_LIST_HEAD(&dev
->unreg_list
);
6811 INIT_LIST_HEAD(&dev
->close_list
);
6812 INIT_LIST_HEAD(&dev
->link_watch_list
);
6813 INIT_LIST_HEAD(&dev
->adj_list
.upper
);
6814 INIT_LIST_HEAD(&dev
->adj_list
.lower
);
6815 INIT_LIST_HEAD(&dev
->all_adj_list
.upper
);
6816 INIT_LIST_HEAD(&dev
->all_adj_list
.lower
);
6817 INIT_LIST_HEAD(&dev
->ptype_all
);
6818 INIT_LIST_HEAD(&dev
->ptype_specific
);
6819 dev
->priv_flags
= IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
;
6822 dev
->num_tx_queues
= txqs
;
6823 dev
->real_num_tx_queues
= txqs
;
6824 if (netif_alloc_netdev_queues(dev
))
6828 dev
->num_rx_queues
= rxqs
;
6829 dev
->real_num_rx_queues
= rxqs
;
6830 if (netif_alloc_rx_queues(dev
))
6834 strcpy(dev
->name
, name
);
6835 dev
->name_assign_type
= name_assign_type
;
6836 dev
->group
= INIT_NETDEV_GROUP
;
6837 if (!dev
->ethtool_ops
)
6838 dev
->ethtool_ops
= &default_ethtool_ops
;
6846 free_percpu(dev
->pcpu_refcnt
);
6848 netdev_freemem(dev
);
6851 EXPORT_SYMBOL(alloc_netdev_mqs
);
6854 * free_netdev - free network device
6857 * This function does the last stage of destroying an allocated device
6858 * interface. The reference to the device object is released.
6859 * If this is the last reference then it will be freed.
6861 void free_netdev(struct net_device
*dev
)
6863 struct napi_struct
*p
, *n
;
6865 netif_free_tx_queues(dev
);
6870 kfree(rcu_dereference_protected(dev
->ingress_queue
, 1));
6872 /* Flush device addresses */
6873 dev_addr_flush(dev
);
6875 list_for_each_entry_safe(p
, n
, &dev
->napi_list
, dev_list
)
6878 free_percpu(dev
->pcpu_refcnt
);
6879 dev
->pcpu_refcnt
= NULL
;
6881 /* Compatibility with error handling in drivers */
6882 if (dev
->reg_state
== NETREG_UNINITIALIZED
) {
6883 netdev_freemem(dev
);
6887 BUG_ON(dev
->reg_state
!= NETREG_UNREGISTERED
);
6888 dev
->reg_state
= NETREG_RELEASED
;
6890 /* will free via device release */
6891 put_device(&dev
->dev
);
6893 EXPORT_SYMBOL(free_netdev
);
6896 * synchronize_net - Synchronize with packet receive processing
6898 * Wait for packets currently being received to be done.
6899 * Does not block later packets from starting.
6901 void synchronize_net(void)
6904 if (rtnl_is_locked())
6905 synchronize_rcu_expedited();
6909 EXPORT_SYMBOL(synchronize_net
);
6912 * unregister_netdevice_queue - remove device from the kernel
6916 * This function shuts down a device interface and removes it
6917 * from the kernel tables.
6918 * If head not NULL, device is queued to be unregistered later.
6920 * Callers must hold the rtnl semaphore. You may want
6921 * unregister_netdev() instead of this.
6924 void unregister_netdevice_queue(struct net_device
*dev
, struct list_head
*head
)
6929 list_move_tail(&dev
->unreg_list
, head
);
6931 rollback_registered(dev
);
6932 /* Finish processing unregister after unlock */
6936 EXPORT_SYMBOL(unregister_netdevice_queue
);
6939 * unregister_netdevice_many - unregister many devices
6940 * @head: list of devices
6942 * Note: As most callers use a stack allocated list_head,
6943 * we force a list_del() to make sure stack wont be corrupted later.
6945 void unregister_netdevice_many(struct list_head
*head
)
6947 struct net_device
*dev
;
6949 if (!list_empty(head
)) {
6950 rollback_registered_many(head
);
6951 list_for_each_entry(dev
, head
, unreg_list
)
6956 EXPORT_SYMBOL(unregister_netdevice_many
);
6959 * unregister_netdev - remove device from the kernel
6962 * This function shuts down a device interface and removes it
6963 * from the kernel tables.
6965 * This is just a wrapper for unregister_netdevice that takes
6966 * the rtnl semaphore. In general you want to use this and not
6967 * unregister_netdevice.
6969 void unregister_netdev(struct net_device
*dev
)
6972 unregister_netdevice(dev
);
6975 EXPORT_SYMBOL(unregister_netdev
);
6978 * dev_change_net_namespace - move device to different nethost namespace
6980 * @net: network namespace
6981 * @pat: If not NULL name pattern to try if the current device name
6982 * is already taken in the destination network namespace.
6984 * This function shuts down a device interface and moves it
6985 * to a new network namespace. On success 0 is returned, on
6986 * a failure a netagive errno code is returned.
6988 * Callers must hold the rtnl semaphore.
6991 int dev_change_net_namespace(struct net_device
*dev
, struct net
*net
, const char *pat
)
6997 /* Don't allow namespace local devices to be moved. */
6999 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
7002 /* Ensure the device has been registrered */
7003 if (dev
->reg_state
!= NETREG_REGISTERED
)
7006 /* Get out if there is nothing todo */
7008 if (net_eq(dev_net(dev
), net
))
7011 /* Pick the destination device name, and ensure
7012 * we can use it in the destination network namespace.
7015 if (__dev_get_by_name(net
, dev
->name
)) {
7016 /* We get here if we can't use the current device name */
7019 if (dev_get_valid_name(net
, dev
, pat
) < 0)
7024 * And now a mini version of register_netdevice unregister_netdevice.
7027 /* If device is running close it first. */
7030 /* And unlink it from device chain */
7032 unlist_netdevice(dev
);
7036 /* Shutdown queueing discipline. */
7039 /* Notify protocols, that we are about to destroy
7040 this device. They should clean all the things.
7042 Note that dev->reg_state stays at NETREG_REGISTERED.
7043 This is wanted because this way 8021q and macvlan know
7044 the device is just moving and can keep their slaves up.
7046 call_netdevice_notifiers(NETDEV_UNREGISTER
, dev
);
7048 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL
, dev
);
7049 rtmsg_ifinfo(RTM_DELLINK
, dev
, ~0U, GFP_KERNEL
);
7052 * Flush the unicast and multicast chains
7057 /* Send a netdev-removed uevent to the old namespace */
7058 kobject_uevent(&dev
->dev
.kobj
, KOBJ_REMOVE
);
7059 netdev_adjacent_del_links(dev
);
7061 /* Actually switch the network namespace */
7062 dev_net_set(dev
, net
);
7064 /* If there is an ifindex conflict assign a new one */
7065 if (__dev_get_by_index(net
, dev
->ifindex
)) {
7066 int iflink
= (dev
->iflink
== dev
->ifindex
);
7067 dev
->ifindex
= dev_new_index(net
);
7069 dev
->iflink
= dev
->ifindex
;
7072 /* Send a netdev-add uevent to the new namespace */
7073 kobject_uevent(&dev
->dev
.kobj
, KOBJ_ADD
);
7074 netdev_adjacent_add_links(dev
);
7076 /* Fixup kobjects */
7077 err
= device_rename(&dev
->dev
, dev
->name
);
7080 /* Add the device back in the hashes */
7081 list_netdevice(dev
);
7083 /* Notify protocols, that a new device appeared. */
7084 call_netdevice_notifiers(NETDEV_REGISTER
, dev
);
7087 * Prevent userspace races by waiting until the network
7088 * device is fully setup before sending notifications.
7090 rtmsg_ifinfo(RTM_NEWLINK
, dev
, ~0U, GFP_KERNEL
);
7097 EXPORT_SYMBOL_GPL(dev_change_net_namespace
);
7099 static int dev_cpu_callback(struct notifier_block
*nfb
,
7100 unsigned long action
,
7103 struct sk_buff
**list_skb
;
7104 struct sk_buff
*skb
;
7105 unsigned int cpu
, oldcpu
= (unsigned long)ocpu
;
7106 struct softnet_data
*sd
, *oldsd
;
7108 if (action
!= CPU_DEAD
&& action
!= CPU_DEAD_FROZEN
)
7111 local_irq_disable();
7112 cpu
= smp_processor_id();
7113 sd
= &per_cpu(softnet_data
, cpu
);
7114 oldsd
= &per_cpu(softnet_data
, oldcpu
);
7116 /* Find end of our completion_queue. */
7117 list_skb
= &sd
->completion_queue
;
7119 list_skb
= &(*list_skb
)->next
;
7120 /* Append completion queue from offline CPU. */
7121 *list_skb
= oldsd
->completion_queue
;
7122 oldsd
->completion_queue
= NULL
;
7124 /* Append output queue from offline CPU. */
7125 if (oldsd
->output_queue
) {
7126 *sd
->output_queue_tailp
= oldsd
->output_queue
;
7127 sd
->output_queue_tailp
= oldsd
->output_queue_tailp
;
7128 oldsd
->output_queue
= NULL
;
7129 oldsd
->output_queue_tailp
= &oldsd
->output_queue
;
7131 /* Append NAPI poll list from offline CPU, with one exception :
7132 * process_backlog() must be called by cpu owning percpu backlog.
7133 * We properly handle process_queue & input_pkt_queue later.
7135 while (!list_empty(&oldsd
->poll_list
)) {
7136 struct napi_struct
*napi
= list_first_entry(&oldsd
->poll_list
,
7140 list_del_init(&napi
->poll_list
);
7141 if (napi
->poll
== process_backlog
)
7144 ____napi_schedule(sd
, napi
);
7147 raise_softirq_irqoff(NET_TX_SOFTIRQ
);
7150 /* Process offline CPU's input_pkt_queue */
7151 while ((skb
= __skb_dequeue(&oldsd
->process_queue
))) {
7153 input_queue_head_incr(oldsd
);
7155 while ((skb
= skb_dequeue(&oldsd
->input_pkt_queue
))) {
7157 input_queue_head_incr(oldsd
);
7165 * netdev_increment_features - increment feature set by one
7166 * @all: current feature set
7167 * @one: new feature set
7168 * @mask: mask feature set
7170 * Computes a new feature set after adding a device with feature set
7171 * @one to the master device with current feature set @all. Will not
7172 * enable anything that is off in @mask. Returns the new feature set.
7174 netdev_features_t
netdev_increment_features(netdev_features_t all
,
7175 netdev_features_t one
, netdev_features_t mask
)
7177 if (mask
& NETIF_F_GEN_CSUM
)
7178 mask
|= NETIF_F_ALL_CSUM
;
7179 mask
|= NETIF_F_VLAN_CHALLENGED
;
7181 all
|= one
& (NETIF_F_ONE_FOR_ALL
|NETIF_F_ALL_CSUM
) & mask
;
7182 all
&= one
| ~NETIF_F_ALL_FOR_ALL
;
7184 /* If one device supports hw checksumming, set for all. */
7185 if (all
& NETIF_F_GEN_CSUM
)
7186 all
&= ~(NETIF_F_ALL_CSUM
& ~NETIF_F_GEN_CSUM
);
7190 EXPORT_SYMBOL(netdev_increment_features
);
7192 static struct hlist_head
* __net_init
netdev_create_hash(void)
7195 struct hlist_head
*hash
;
7197 hash
= kmalloc(sizeof(*hash
) * NETDEV_HASHENTRIES
, GFP_KERNEL
);
7199 for (i
= 0; i
< NETDEV_HASHENTRIES
; i
++)
7200 INIT_HLIST_HEAD(&hash
[i
]);
7205 /* Initialize per network namespace state */
7206 static int __net_init
netdev_init(struct net
*net
)
7208 if (net
!= &init_net
)
7209 INIT_LIST_HEAD(&net
->dev_base_head
);
7211 net
->dev_name_head
= netdev_create_hash();
7212 if (net
->dev_name_head
== NULL
)
7215 net
->dev_index_head
= netdev_create_hash();
7216 if (net
->dev_index_head
== NULL
)
7222 kfree(net
->dev_name_head
);
7228 * netdev_drivername - network driver for the device
7229 * @dev: network device
7231 * Determine network driver for device.
7233 const char *netdev_drivername(const struct net_device
*dev
)
7235 const struct device_driver
*driver
;
7236 const struct device
*parent
;
7237 const char *empty
= "";
7239 parent
= dev
->dev
.parent
;
7243 driver
= parent
->driver
;
7244 if (driver
&& driver
->name
)
7245 return driver
->name
;
7249 static void __netdev_printk(const char *level
, const struct net_device
*dev
,
7250 struct va_format
*vaf
)
7252 if (dev
&& dev
->dev
.parent
) {
7253 dev_printk_emit(level
[1] - '0',
7256 dev_driver_string(dev
->dev
.parent
),
7257 dev_name(dev
->dev
.parent
),
7258 netdev_name(dev
), netdev_reg_state(dev
),
7261 printk("%s%s%s: %pV",
7262 level
, netdev_name(dev
), netdev_reg_state(dev
), vaf
);
7264 printk("%s(NULL net_device): %pV", level
, vaf
);
7268 void netdev_printk(const char *level
, const struct net_device
*dev
,
7269 const char *format
, ...)
7271 struct va_format vaf
;
7274 va_start(args
, format
);
7279 __netdev_printk(level
, dev
, &vaf
);
7283 EXPORT_SYMBOL(netdev_printk
);
7285 #define define_netdev_printk_level(func, level) \
7286 void func(const struct net_device *dev, const char *fmt, ...) \
7288 struct va_format vaf; \
7291 va_start(args, fmt); \
7296 __netdev_printk(level, dev, &vaf); \
7300 EXPORT_SYMBOL(func);
7302 define_netdev_printk_level(netdev_emerg
, KERN_EMERG
);
7303 define_netdev_printk_level(netdev_alert
, KERN_ALERT
);
7304 define_netdev_printk_level(netdev_crit
, KERN_CRIT
);
7305 define_netdev_printk_level(netdev_err
, KERN_ERR
);
7306 define_netdev_printk_level(netdev_warn
, KERN_WARNING
);
7307 define_netdev_printk_level(netdev_notice
, KERN_NOTICE
);
7308 define_netdev_printk_level(netdev_info
, KERN_INFO
);
7310 static void __net_exit
netdev_exit(struct net
*net
)
7312 kfree(net
->dev_name_head
);
7313 kfree(net
->dev_index_head
);
7316 static struct pernet_operations __net_initdata netdev_net_ops
= {
7317 .init
= netdev_init
,
7318 .exit
= netdev_exit
,
7321 static void __net_exit
default_device_exit(struct net
*net
)
7323 struct net_device
*dev
, *aux
;
7325 * Push all migratable network devices back to the
7326 * initial network namespace
7329 for_each_netdev_safe(net
, dev
, aux
) {
7331 char fb_name
[IFNAMSIZ
];
7333 /* Ignore unmoveable devices (i.e. loopback) */
7334 if (dev
->features
& NETIF_F_NETNS_LOCAL
)
7337 /* Leave virtual devices for the generic cleanup */
7338 if (dev
->rtnl_link_ops
)
7341 /* Push remaining network devices to init_net */
7342 snprintf(fb_name
, IFNAMSIZ
, "dev%d", dev
->ifindex
);
7343 err
= dev_change_net_namespace(dev
, &init_net
, fb_name
);
7345 pr_emerg("%s: failed to move %s to init_net: %d\n",
7346 __func__
, dev
->name
, err
);
7353 static void __net_exit
rtnl_lock_unregistering(struct list_head
*net_list
)
7355 /* Return with the rtnl_lock held when there are no network
7356 * devices unregistering in any network namespace in net_list.
7360 DEFINE_WAIT_FUNC(wait
, woken_wake_function
);
7362 add_wait_queue(&netdev_unregistering_wq
, &wait
);
7364 unregistering
= false;
7366 list_for_each_entry(net
, net_list
, exit_list
) {
7367 if (net
->dev_unreg_count
> 0) {
7368 unregistering
= true;
7376 wait_woken(&wait
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
7378 remove_wait_queue(&netdev_unregistering_wq
, &wait
);
7381 static void __net_exit
default_device_exit_batch(struct list_head
*net_list
)
7383 /* At exit all network devices most be removed from a network
7384 * namespace. Do this in the reverse order of registration.
7385 * Do this across as many network namespaces as possible to
7386 * improve batching efficiency.
7388 struct net_device
*dev
;
7390 LIST_HEAD(dev_kill_list
);
7392 /* To prevent network device cleanup code from dereferencing
7393 * loopback devices or network devices that have been freed
7394 * wait here for all pending unregistrations to complete,
7395 * before unregistring the loopback device and allowing the
7396 * network namespace be freed.
7398 * The netdev todo list containing all network devices
7399 * unregistrations that happen in default_device_exit_batch
7400 * will run in the rtnl_unlock() at the end of
7401 * default_device_exit_batch.
7403 rtnl_lock_unregistering(net_list
);
7404 list_for_each_entry(net
, net_list
, exit_list
) {
7405 for_each_netdev_reverse(net
, dev
) {
7406 if (dev
->rtnl_link_ops
&& dev
->rtnl_link_ops
->dellink
)
7407 dev
->rtnl_link_ops
->dellink(dev
, &dev_kill_list
);
7409 unregister_netdevice_queue(dev
, &dev_kill_list
);
7412 unregister_netdevice_many(&dev_kill_list
);
7416 static struct pernet_operations __net_initdata default_device_ops
= {
7417 .exit
= default_device_exit
,
7418 .exit_batch
= default_device_exit_batch
,
7422 * Initialize the DEV module. At boot time this walks the device list and
7423 * unhooks any devices that fail to initialise (normally hardware not
7424 * present) and leaves us with a valid list of present and active devices.
7429 * This is called single threaded during boot, so no need
7430 * to take the rtnl semaphore.
7432 static int __init
net_dev_init(void)
7434 int i
, rc
= -ENOMEM
;
7436 BUG_ON(!dev_boot_phase
);
7438 if (dev_proc_init())
7441 if (netdev_kobject_init())
7444 INIT_LIST_HEAD(&ptype_all
);
7445 for (i
= 0; i
< PTYPE_HASH_SIZE
; i
++)
7446 INIT_LIST_HEAD(&ptype_base
[i
]);
7448 INIT_LIST_HEAD(&offload_base
);
7450 if (register_pernet_subsys(&netdev_net_ops
))
7454 * Initialise the packet receive queues.
7457 for_each_possible_cpu(i
) {
7458 struct softnet_data
*sd
= &per_cpu(softnet_data
, i
);
7460 skb_queue_head_init(&sd
->input_pkt_queue
);
7461 skb_queue_head_init(&sd
->process_queue
);
7462 INIT_LIST_HEAD(&sd
->poll_list
);
7463 sd
->output_queue_tailp
= &sd
->output_queue
;
7465 sd
->csd
.func
= rps_trigger_softirq
;
7470 sd
->backlog
.poll
= process_backlog
;
7471 sd
->backlog
.weight
= weight_p
;
7476 /* The loopback device is special if any other network devices
7477 * is present in a network namespace the loopback device must
7478 * be present. Since we now dynamically allocate and free the
7479 * loopback device ensure this invariant is maintained by
7480 * keeping the loopback device as the first device on the
7481 * list of network devices. Ensuring the loopback devices
7482 * is the first device that appears and the last network device
7485 if (register_pernet_device(&loopback_net_ops
))
7488 if (register_pernet_device(&default_device_ops
))
7491 open_softirq(NET_TX_SOFTIRQ
, net_tx_action
);
7492 open_softirq(NET_RX_SOFTIRQ
, net_rx_action
);
7494 hotcpu_notifier(dev_cpu_callback
, 0);
7501 subsys_initcall(net_dev_init
);