Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetoot...
[deliverable/linux.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
1da177e4 76#include <linux/bitops.h>
4fc268d2 77#include <linux/capability.h>
1da177e4
LT
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
08e9897d 81#include <linux/hash.h>
5a0e3ad6 82#include <linux/slab.h>
1da177e4 83#include <linux/sched.h>
4a3e2f71 84#include <linux/mutex.h>
1da177e4
LT
85#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
0187bdfb 94#include <linux/ethtool.h>
1da177e4
LT
95#include <linux/notifier.h>
96#include <linux/skbuff.h>
457c4cbc 97#include <net/net_namespace.h>
1da177e4 98#include <net/sock.h>
02d62e86 99#include <net/busy_poll.h>
1da177e4 100#include <linux/rtnetlink.h>
1da177e4 101#include <linux/stat.h>
1da177e4 102#include <net/dst.h>
fc4099f1 103#include <net/dst_metadata.h>
1da177e4
LT
104#include <net/pkt_sched.h>
105#include <net/checksum.h>
44540960 106#include <net/xfrm.h>
1da177e4
LT
107#include <linux/highmem.h>
108#include <linux/init.h>
1da177e4 109#include <linux/module.h>
1da177e4
LT
110#include <linux/netpoll.h>
111#include <linux/rcupdate.h>
112#include <linux/delay.h>
1da177e4 113#include <net/iw_handler.h>
1da177e4 114#include <asm/current.h>
5bdb9886 115#include <linux/audit.h>
db217334 116#include <linux/dmaengine.h>
f6a78bfc 117#include <linux/err.h>
c7fa9d18 118#include <linux/ctype.h>
723e98b7 119#include <linux/if_arp.h>
6de329e2 120#include <linux/if_vlan.h>
8f0f2223 121#include <linux/ip.h>
ad55dcaf 122#include <net/ip.h>
25cd9ba0 123#include <net/mpls.h>
8f0f2223
DM
124#include <linux/ipv6.h>
125#include <linux/in.h>
b6b2fed1
DM
126#include <linux/jhash.h>
127#include <linux/random.h>
9cbc1cb8 128#include <trace/events/napi.h>
cf66ba58 129#include <trace/events/net.h>
07dc22e7 130#include <trace/events/skb.h>
5acbbd42 131#include <linux/pci.h>
caeda9b9 132#include <linux/inetdevice.h>
c445477d 133#include <linux/cpu_rmap.h>
c5905afb 134#include <linux/static_key.h>
af12fa6e 135#include <linux/hashtable.h>
60877a32 136#include <linux/vmalloc.h>
529d0489 137#include <linux/if_macvlan.h>
e7fd2885 138#include <linux/errqueue.h>
3b47d303 139#include <linux/hrtimer.h>
e687ad60 140#include <linux/netfilter_ingress.h>
6ae23ad3 141#include <linux/sctp.h>
1da177e4 142
342709ef
PE
143#include "net-sysfs.h"
144
d565b0a1
HX
145/* Instead of increasing this, you should create a hash table. */
146#define MAX_GRO_SKBS 8
147
5d38a079
HX
148/* This should be increased if a protocol with a bigger head is added. */
149#define GRO_MAX_HEAD (MAX_HEADER + 128)
150
1da177e4 151static DEFINE_SPINLOCK(ptype_lock);
62532da9 152static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
153struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
154struct list_head ptype_all __read_mostly; /* Taps */
62532da9 155static struct list_head offload_base __read_mostly;
1da177e4 156
ae78dbfa 157static int netif_rx_internal(struct sk_buff *skb);
54951194
LP
158static int call_netdevice_notifiers_info(unsigned long val,
159 struct net_device *dev,
160 struct netdev_notifier_info *info);
ae78dbfa 161
1da177e4 162/*
7562f876 163 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
164 * semaphore.
165 *
c6d14c84 166 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
167 *
168 * Writers must hold the rtnl semaphore while they loop through the
7562f876 169 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
170 * actual updates. This allows pure readers to access the list even
171 * while a writer is preparing to update it.
172 *
173 * To put it another way, dev_base_lock is held for writing only to
174 * protect against pure readers; the rtnl semaphore provides the
175 * protection against other writers.
176 *
177 * See, for example usages, register_netdevice() and
178 * unregister_netdevice(), which must be called with the rtnl
179 * semaphore held.
180 */
1da177e4 181DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
182EXPORT_SYMBOL(dev_base_lock);
183
af12fa6e
ET
184/* protects napi_hash addition/deletion and napi_gen_id */
185static DEFINE_SPINLOCK(napi_hash_lock);
186
52bd2d62 187static unsigned int napi_gen_id = NR_CPUS;
6180d9de 188static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
af12fa6e 189
18afa4b0 190static seqcount_t devnet_rename_seq;
c91f6df2 191
4e985ada
TG
192static inline void dev_base_seq_inc(struct net *net)
193{
194 while (++net->dev_base_seq == 0);
195}
196
881d966b 197static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 198{
95c96174
ED
199 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
200
08e9897d 201 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
202}
203
881d966b 204static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 205{
7c28bd0b 206 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
207}
208
e36fa2f7 209static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
210{
211#ifdef CONFIG_RPS
e36fa2f7 212 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
213#endif
214}
215
e36fa2f7 216static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
217{
218#ifdef CONFIG_RPS
e36fa2f7 219 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
220#endif
221}
222
ce286d32 223/* Device list insertion */
53759be9 224static void list_netdevice(struct net_device *dev)
ce286d32 225{
c346dca1 226 struct net *net = dev_net(dev);
ce286d32
EB
227
228 ASSERT_RTNL();
229
230 write_lock_bh(&dev_base_lock);
c6d14c84 231 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 232 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
233 hlist_add_head_rcu(&dev->index_hlist,
234 dev_index_hash(net, dev->ifindex));
ce286d32 235 write_unlock_bh(&dev_base_lock);
4e985ada
TG
236
237 dev_base_seq_inc(net);
ce286d32
EB
238}
239
fb699dfd
ED
240/* Device list removal
241 * caller must respect a RCU grace period before freeing/reusing dev
242 */
ce286d32
EB
243static void unlist_netdevice(struct net_device *dev)
244{
245 ASSERT_RTNL();
246
247 /* Unlink dev from the device chain */
248 write_lock_bh(&dev_base_lock);
c6d14c84 249 list_del_rcu(&dev->dev_list);
72c9528b 250 hlist_del_rcu(&dev->name_hlist);
fb699dfd 251 hlist_del_rcu(&dev->index_hlist);
ce286d32 252 write_unlock_bh(&dev_base_lock);
4e985ada
TG
253
254 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
255}
256
1da177e4
LT
257/*
258 * Our notifier list
259 */
260
f07d5b94 261static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
262
263/*
264 * Device drivers call our routines to queue packets here. We empty the
265 * queue in the local softnet handler.
266 */
bea3348e 267
9958da05 268DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 269EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 270
cf508b12 271#ifdef CONFIG_LOCKDEP
723e98b7 272/*
c773e847 273 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
274 * according to dev->type
275 */
276static const unsigned short netdev_lock_type[] =
277 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
278 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
279 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
280 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
281 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
282 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
283 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
284 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
285 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
286 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
287 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
288 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
211ed865
PG
289 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
290 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
291 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
723e98b7 292
36cbd3dc 293static const char *const netdev_lock_name[] =
723e98b7
JP
294 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
295 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
296 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
297 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
298 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
299 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
300 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
301 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
302 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
303 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
304 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
305 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
211ed865
PG
306 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
307 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
308 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
309
310static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 311static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
312
313static inline unsigned short netdev_lock_pos(unsigned short dev_type)
314{
315 int i;
316
317 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
318 if (netdev_lock_type[i] == dev_type)
319 return i;
320 /* the last key is used by default */
321 return ARRAY_SIZE(netdev_lock_type) - 1;
322}
323
cf508b12
DM
324static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
325 unsigned short dev_type)
723e98b7
JP
326{
327 int i;
328
329 i = netdev_lock_pos(dev_type);
330 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
331 netdev_lock_name[i]);
332}
cf508b12
DM
333
334static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
335{
336 int i;
337
338 i = netdev_lock_pos(dev->type);
339 lockdep_set_class_and_name(&dev->addr_list_lock,
340 &netdev_addr_lock_key[i],
341 netdev_lock_name[i]);
342}
723e98b7 343#else
cf508b12
DM
344static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
345 unsigned short dev_type)
346{
347}
348static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
349{
350}
351#endif
1da177e4
LT
352
353/*******************************************************************************
354
355 Protocol management and registration routines
356
357*******************************************************************************/
358
1da177e4
LT
359/*
360 * Add a protocol ID to the list. Now that the input handler is
361 * smarter we can dispense with all the messy stuff that used to be
362 * here.
363 *
364 * BEWARE!!! Protocol handlers, mangling input packets,
365 * MUST BE last in hash buckets and checking protocol handlers
366 * MUST start from promiscuous ptype_all chain in net_bh.
367 * It is true now, do not change it.
368 * Explanation follows: if protocol handler, mangling packet, will
369 * be the first on list, it is not able to sense, that packet
370 * is cloned and should be copied-on-write, so that it will
371 * change it and subsequent readers will get broken packet.
372 * --ANK (980803)
373 */
374
c07b68e8
ED
375static inline struct list_head *ptype_head(const struct packet_type *pt)
376{
377 if (pt->type == htons(ETH_P_ALL))
7866a621 378 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
c07b68e8 379 else
7866a621
SN
380 return pt->dev ? &pt->dev->ptype_specific :
381 &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
c07b68e8
ED
382}
383
1da177e4
LT
384/**
385 * dev_add_pack - add packet handler
386 * @pt: packet type declaration
387 *
388 * Add a protocol handler to the networking stack. The passed &packet_type
389 * is linked into kernel lists and may not be freed until it has been
390 * removed from the kernel lists.
391 *
4ec93edb 392 * This call does not sleep therefore it can not
1da177e4
LT
393 * guarantee all CPU's that are in middle of receiving packets
394 * will see the new packet type (until the next received packet).
395 */
396
397void dev_add_pack(struct packet_type *pt)
398{
c07b68e8 399 struct list_head *head = ptype_head(pt);
1da177e4 400
c07b68e8
ED
401 spin_lock(&ptype_lock);
402 list_add_rcu(&pt->list, head);
403 spin_unlock(&ptype_lock);
1da177e4 404}
d1b19dff 405EXPORT_SYMBOL(dev_add_pack);
1da177e4 406
1da177e4
LT
407/**
408 * __dev_remove_pack - remove packet handler
409 * @pt: packet type declaration
410 *
411 * Remove a protocol handler that was previously added to the kernel
412 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
413 * from the kernel lists and can be freed or reused once this function
4ec93edb 414 * returns.
1da177e4
LT
415 *
416 * The packet type might still be in use by receivers
417 * and must not be freed until after all the CPU's have gone
418 * through a quiescent state.
419 */
420void __dev_remove_pack(struct packet_type *pt)
421{
c07b68e8 422 struct list_head *head = ptype_head(pt);
1da177e4
LT
423 struct packet_type *pt1;
424
c07b68e8 425 spin_lock(&ptype_lock);
1da177e4
LT
426
427 list_for_each_entry(pt1, head, list) {
428 if (pt == pt1) {
429 list_del_rcu(&pt->list);
430 goto out;
431 }
432 }
433
7b6cd1ce 434 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 435out:
c07b68e8 436 spin_unlock(&ptype_lock);
1da177e4 437}
d1b19dff
ED
438EXPORT_SYMBOL(__dev_remove_pack);
439
1da177e4
LT
440/**
441 * dev_remove_pack - remove packet handler
442 * @pt: packet type declaration
443 *
444 * Remove a protocol handler that was previously added to the kernel
445 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
446 * from the kernel lists and can be freed or reused once this function
447 * returns.
448 *
449 * This call sleeps to guarantee that no CPU is looking at the packet
450 * type after return.
451 */
452void dev_remove_pack(struct packet_type *pt)
453{
454 __dev_remove_pack(pt);
4ec93edb 455
1da177e4
LT
456 synchronize_net();
457}
d1b19dff 458EXPORT_SYMBOL(dev_remove_pack);
1da177e4 459
62532da9
VY
460
461/**
462 * dev_add_offload - register offload handlers
463 * @po: protocol offload declaration
464 *
465 * Add protocol offload handlers to the networking stack. The passed
466 * &proto_offload is linked into kernel lists and may not be freed until
467 * it has been removed from the kernel lists.
468 *
469 * This call does not sleep therefore it can not
470 * guarantee all CPU's that are in middle of receiving packets
471 * will see the new offload handlers (until the next received packet).
472 */
473void dev_add_offload(struct packet_offload *po)
474{
bdef7de4 475 struct packet_offload *elem;
62532da9
VY
476
477 spin_lock(&offload_lock);
bdef7de4
DM
478 list_for_each_entry(elem, &offload_base, list) {
479 if (po->priority < elem->priority)
480 break;
481 }
482 list_add_rcu(&po->list, elem->list.prev);
62532da9
VY
483 spin_unlock(&offload_lock);
484}
485EXPORT_SYMBOL(dev_add_offload);
486
487/**
488 * __dev_remove_offload - remove offload handler
489 * @po: packet offload declaration
490 *
491 * Remove a protocol offload handler that was previously added to the
492 * kernel offload handlers by dev_add_offload(). The passed &offload_type
493 * is removed from the kernel lists and can be freed or reused once this
494 * function returns.
495 *
496 * The packet type might still be in use by receivers
497 * and must not be freed until after all the CPU's have gone
498 * through a quiescent state.
499 */
1d143d9f 500static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
501{
502 struct list_head *head = &offload_base;
503 struct packet_offload *po1;
504
c53aa505 505 spin_lock(&offload_lock);
62532da9
VY
506
507 list_for_each_entry(po1, head, list) {
508 if (po == po1) {
509 list_del_rcu(&po->list);
510 goto out;
511 }
512 }
513
514 pr_warn("dev_remove_offload: %p not found\n", po);
515out:
c53aa505 516 spin_unlock(&offload_lock);
62532da9 517}
62532da9
VY
518
519/**
520 * dev_remove_offload - remove packet offload handler
521 * @po: packet offload declaration
522 *
523 * Remove a packet offload handler that was previously added to the kernel
524 * offload handlers by dev_add_offload(). The passed &offload_type is
525 * removed from the kernel lists and can be freed or reused once this
526 * function returns.
527 *
528 * This call sleeps to guarantee that no CPU is looking at the packet
529 * type after return.
530 */
531void dev_remove_offload(struct packet_offload *po)
532{
533 __dev_remove_offload(po);
534
535 synchronize_net();
536}
537EXPORT_SYMBOL(dev_remove_offload);
538
1da177e4
LT
539/******************************************************************************
540
541 Device Boot-time Settings Routines
542
543*******************************************************************************/
544
545/* Boot time configuration table */
546static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
547
548/**
549 * netdev_boot_setup_add - add new setup entry
550 * @name: name of the device
551 * @map: configured settings for the device
552 *
553 * Adds new setup entry to the dev_boot_setup list. The function
554 * returns 0 on error and 1 on success. This is a generic routine to
555 * all netdevices.
556 */
557static int netdev_boot_setup_add(char *name, struct ifmap *map)
558{
559 struct netdev_boot_setup *s;
560 int i;
561
562 s = dev_boot_setup;
563 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
564 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
565 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 566 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
567 memcpy(&s[i].map, map, sizeof(s[i].map));
568 break;
569 }
570 }
571
572 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
573}
574
575/**
576 * netdev_boot_setup_check - check boot time settings
577 * @dev: the netdevice
578 *
579 * Check boot time settings for the device.
580 * The found settings are set for the device to be used
581 * later in the device probing.
582 * Returns 0 if no settings found, 1 if they are.
583 */
584int netdev_boot_setup_check(struct net_device *dev)
585{
586 struct netdev_boot_setup *s = dev_boot_setup;
587 int i;
588
589 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
590 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 591 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
592 dev->irq = s[i].map.irq;
593 dev->base_addr = s[i].map.base_addr;
594 dev->mem_start = s[i].map.mem_start;
595 dev->mem_end = s[i].map.mem_end;
596 return 1;
597 }
598 }
599 return 0;
600}
d1b19dff 601EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
602
603
604/**
605 * netdev_boot_base - get address from boot time settings
606 * @prefix: prefix for network device
607 * @unit: id for network device
608 *
609 * Check boot time settings for the base address of device.
610 * The found settings are set for the device to be used
611 * later in the device probing.
612 * Returns 0 if no settings found.
613 */
614unsigned long netdev_boot_base(const char *prefix, int unit)
615{
616 const struct netdev_boot_setup *s = dev_boot_setup;
617 char name[IFNAMSIZ];
618 int i;
619
620 sprintf(name, "%s%d", prefix, unit);
621
622 /*
623 * If device already registered then return base of 1
624 * to indicate not to probe for this interface
625 */
881d966b 626 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
627 return 1;
628
629 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
630 if (!strcmp(name, s[i].name))
631 return s[i].map.base_addr;
632 return 0;
633}
634
635/*
636 * Saves at boot time configured settings for any netdevice.
637 */
638int __init netdev_boot_setup(char *str)
639{
640 int ints[5];
641 struct ifmap map;
642
643 str = get_options(str, ARRAY_SIZE(ints), ints);
644 if (!str || !*str)
645 return 0;
646
647 /* Save settings */
648 memset(&map, 0, sizeof(map));
649 if (ints[0] > 0)
650 map.irq = ints[1];
651 if (ints[0] > 1)
652 map.base_addr = ints[2];
653 if (ints[0] > 2)
654 map.mem_start = ints[3];
655 if (ints[0] > 3)
656 map.mem_end = ints[4];
657
658 /* Add new entry to the list */
659 return netdev_boot_setup_add(str, &map);
660}
661
662__setup("netdev=", netdev_boot_setup);
663
664/*******************************************************************************
665
666 Device Interface Subroutines
667
668*******************************************************************************/
669
a54acb3a
ND
670/**
671 * dev_get_iflink - get 'iflink' value of a interface
672 * @dev: targeted interface
673 *
674 * Indicates the ifindex the interface is linked to.
675 * Physical interfaces have the same 'ifindex' and 'iflink' values.
676 */
677
678int dev_get_iflink(const struct net_device *dev)
679{
680 if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
681 return dev->netdev_ops->ndo_get_iflink(dev);
682
7a66bbc9 683 return dev->ifindex;
a54acb3a
ND
684}
685EXPORT_SYMBOL(dev_get_iflink);
686
fc4099f1
PS
687/**
688 * dev_fill_metadata_dst - Retrieve tunnel egress information.
689 * @dev: targeted interface
690 * @skb: The packet.
691 *
692 * For better visibility of tunnel traffic OVS needs to retrieve
693 * egress tunnel information for a packet. Following API allows
694 * user to get this info.
695 */
696int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
697{
698 struct ip_tunnel_info *info;
699
700 if (!dev->netdev_ops || !dev->netdev_ops->ndo_fill_metadata_dst)
701 return -EINVAL;
702
703 info = skb_tunnel_info_unclone(skb);
704 if (!info)
705 return -ENOMEM;
706 if (unlikely(!(info->mode & IP_TUNNEL_INFO_TX)))
707 return -EINVAL;
708
709 return dev->netdev_ops->ndo_fill_metadata_dst(dev, skb);
710}
711EXPORT_SYMBOL_GPL(dev_fill_metadata_dst);
712
1da177e4
LT
713/**
714 * __dev_get_by_name - find a device by its name
c4ea43c5 715 * @net: the applicable net namespace
1da177e4
LT
716 * @name: name to find
717 *
718 * Find an interface by name. Must be called under RTNL semaphore
719 * or @dev_base_lock. If the name is found a pointer to the device
720 * is returned. If the name is not found then %NULL is returned. The
721 * reference counters are not incremented so the caller must be
722 * careful with locks.
723 */
724
881d966b 725struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 726{
0bd8d536
ED
727 struct net_device *dev;
728 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 729
b67bfe0d 730 hlist_for_each_entry(dev, head, name_hlist)
1da177e4
LT
731 if (!strncmp(dev->name, name, IFNAMSIZ))
732 return dev;
0bd8d536 733
1da177e4
LT
734 return NULL;
735}
d1b19dff 736EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 737
72c9528b
ED
738/**
739 * dev_get_by_name_rcu - find a device by its name
740 * @net: the applicable net namespace
741 * @name: name to find
742 *
743 * Find an interface by name.
744 * If the name is found a pointer to the device is returned.
745 * If the name is not found then %NULL is returned.
746 * The reference counters are not incremented so the caller must be
747 * careful with locks. The caller must hold RCU lock.
748 */
749
750struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
751{
72c9528b
ED
752 struct net_device *dev;
753 struct hlist_head *head = dev_name_hash(net, name);
754
b67bfe0d 755 hlist_for_each_entry_rcu(dev, head, name_hlist)
72c9528b
ED
756 if (!strncmp(dev->name, name, IFNAMSIZ))
757 return dev;
758
759 return NULL;
760}
761EXPORT_SYMBOL(dev_get_by_name_rcu);
762
1da177e4
LT
763/**
764 * dev_get_by_name - find a device by its name
c4ea43c5 765 * @net: the applicable net namespace
1da177e4
LT
766 * @name: name to find
767 *
768 * Find an interface by name. This can be called from any
769 * context and does its own locking. The returned handle has
770 * the usage count incremented and the caller must use dev_put() to
771 * release it when it is no longer needed. %NULL is returned if no
772 * matching device is found.
773 */
774
881d966b 775struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
776{
777 struct net_device *dev;
778
72c9528b
ED
779 rcu_read_lock();
780 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
781 if (dev)
782 dev_hold(dev);
72c9528b 783 rcu_read_unlock();
1da177e4
LT
784 return dev;
785}
d1b19dff 786EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
787
788/**
789 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 790 * @net: the applicable net namespace
1da177e4
LT
791 * @ifindex: index of device
792 *
793 * Search for an interface by index. Returns %NULL if the device
794 * is not found or a pointer to the device. The device has not
795 * had its reference counter increased so the caller must be careful
796 * about locking. The caller must hold either the RTNL semaphore
797 * or @dev_base_lock.
798 */
799
881d966b 800struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 801{
0bd8d536
ED
802 struct net_device *dev;
803 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 804
b67bfe0d 805 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
806 if (dev->ifindex == ifindex)
807 return dev;
0bd8d536 808
1da177e4
LT
809 return NULL;
810}
d1b19dff 811EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 812
fb699dfd
ED
813/**
814 * dev_get_by_index_rcu - find a device by its ifindex
815 * @net: the applicable net namespace
816 * @ifindex: index of device
817 *
818 * Search for an interface by index. Returns %NULL if the device
819 * is not found or a pointer to the device. The device has not
820 * had its reference counter increased so the caller must be careful
821 * about locking. The caller must hold RCU lock.
822 */
823
824struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
825{
fb699dfd
ED
826 struct net_device *dev;
827 struct hlist_head *head = dev_index_hash(net, ifindex);
828
b67bfe0d 829 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
830 if (dev->ifindex == ifindex)
831 return dev;
832
833 return NULL;
834}
835EXPORT_SYMBOL(dev_get_by_index_rcu);
836
1da177e4
LT
837
838/**
839 * dev_get_by_index - find a device by its ifindex
c4ea43c5 840 * @net: the applicable net namespace
1da177e4
LT
841 * @ifindex: index of device
842 *
843 * Search for an interface by index. Returns NULL if the device
844 * is not found or a pointer to the device. The device returned has
845 * had a reference added and the pointer is safe until the user calls
846 * dev_put to indicate they have finished with it.
847 */
848
881d966b 849struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
850{
851 struct net_device *dev;
852
fb699dfd
ED
853 rcu_read_lock();
854 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
855 if (dev)
856 dev_hold(dev);
fb699dfd 857 rcu_read_unlock();
1da177e4
LT
858 return dev;
859}
d1b19dff 860EXPORT_SYMBOL(dev_get_by_index);
1da177e4 861
5dbe7c17
NS
862/**
863 * netdev_get_name - get a netdevice name, knowing its ifindex.
864 * @net: network namespace
865 * @name: a pointer to the buffer where the name will be stored.
866 * @ifindex: the ifindex of the interface to get the name from.
867 *
868 * The use of raw_seqcount_begin() and cond_resched() before
869 * retrying is required as we want to give the writers a chance
870 * to complete when CONFIG_PREEMPT is not set.
871 */
872int netdev_get_name(struct net *net, char *name, int ifindex)
873{
874 struct net_device *dev;
875 unsigned int seq;
876
877retry:
878 seq = raw_seqcount_begin(&devnet_rename_seq);
879 rcu_read_lock();
880 dev = dev_get_by_index_rcu(net, ifindex);
881 if (!dev) {
882 rcu_read_unlock();
883 return -ENODEV;
884 }
885
886 strcpy(name, dev->name);
887 rcu_read_unlock();
888 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
889 cond_resched();
890 goto retry;
891 }
892
893 return 0;
894}
895
1da177e4 896/**
941666c2 897 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 898 * @net: the applicable net namespace
1da177e4
LT
899 * @type: media type of device
900 * @ha: hardware address
901 *
902 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
903 * is not found or a pointer to the device.
904 * The caller must hold RCU or RTNL.
941666c2 905 * The returned device has not had its ref count increased
1da177e4
LT
906 * and the caller must therefore be careful about locking
907 *
1da177e4
LT
908 */
909
941666c2
ED
910struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
911 const char *ha)
1da177e4
LT
912{
913 struct net_device *dev;
914
941666c2 915 for_each_netdev_rcu(net, dev)
1da177e4
LT
916 if (dev->type == type &&
917 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
918 return dev;
919
920 return NULL;
1da177e4 921}
941666c2 922EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 923
881d966b 924struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
925{
926 struct net_device *dev;
927
4e9cac2b 928 ASSERT_RTNL();
881d966b 929 for_each_netdev(net, dev)
4e9cac2b 930 if (dev->type == type)
7562f876
PE
931 return dev;
932
933 return NULL;
4e9cac2b 934}
4e9cac2b
PM
935EXPORT_SYMBOL(__dev_getfirstbyhwtype);
936
881d966b 937struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 938{
99fe3c39 939 struct net_device *dev, *ret = NULL;
4e9cac2b 940
99fe3c39
ED
941 rcu_read_lock();
942 for_each_netdev_rcu(net, dev)
943 if (dev->type == type) {
944 dev_hold(dev);
945 ret = dev;
946 break;
947 }
948 rcu_read_unlock();
949 return ret;
1da177e4 950}
1da177e4
LT
951EXPORT_SYMBOL(dev_getfirstbyhwtype);
952
953/**
6c555490 954 * __dev_get_by_flags - find any device with given flags
c4ea43c5 955 * @net: the applicable net namespace
1da177e4
LT
956 * @if_flags: IFF_* values
957 * @mask: bitmask of bits in if_flags to check
958 *
959 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04 960 * is not found or a pointer to the device. Must be called inside
6c555490 961 * rtnl_lock(), and result refcount is unchanged.
1da177e4
LT
962 */
963
6c555490
WC
964struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
965 unsigned short mask)
1da177e4 966{
7562f876 967 struct net_device *dev, *ret;
1da177e4 968
6c555490
WC
969 ASSERT_RTNL();
970
7562f876 971 ret = NULL;
6c555490 972 for_each_netdev(net, dev) {
1da177e4 973 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 974 ret = dev;
1da177e4
LT
975 break;
976 }
977 }
7562f876 978 return ret;
1da177e4 979}
6c555490 980EXPORT_SYMBOL(__dev_get_by_flags);
1da177e4
LT
981
982/**
983 * dev_valid_name - check if name is okay for network device
984 * @name: name string
985 *
986 * Network device names need to be valid file names to
c7fa9d18
DM
987 * to allow sysfs to work. We also disallow any kind of
988 * whitespace.
1da177e4 989 */
95f050bf 990bool dev_valid_name(const char *name)
1da177e4 991{
c7fa9d18 992 if (*name == '\0')
95f050bf 993 return false;
b6fe17d6 994 if (strlen(name) >= IFNAMSIZ)
95f050bf 995 return false;
c7fa9d18 996 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 997 return false;
c7fa9d18
DM
998
999 while (*name) {
a4176a93 1000 if (*name == '/' || *name == ':' || isspace(*name))
95f050bf 1001 return false;
c7fa9d18
DM
1002 name++;
1003 }
95f050bf 1004 return true;
1da177e4 1005}
d1b19dff 1006EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
1007
1008/**
b267b179
EB
1009 * __dev_alloc_name - allocate a name for a device
1010 * @net: network namespace to allocate the device name in
1da177e4 1011 * @name: name format string
b267b179 1012 * @buf: scratch buffer and result name string
1da177e4
LT
1013 *
1014 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
1015 * id. It scans list of devices to build up a free map, then chooses
1016 * the first empty slot. The caller must hold the dev_base or rtnl lock
1017 * while allocating the name and adding the device in order to avoid
1018 * duplicates.
1019 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1020 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
1021 */
1022
b267b179 1023static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
1024{
1025 int i = 0;
1da177e4
LT
1026 const char *p;
1027 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 1028 unsigned long *inuse;
1da177e4
LT
1029 struct net_device *d;
1030
1031 p = strnchr(name, IFNAMSIZ-1, '%');
1032 if (p) {
1033 /*
1034 * Verify the string as this thing may have come from
1035 * the user. There must be either one "%d" and no other "%"
1036 * characters.
1037 */
1038 if (p[1] != 'd' || strchr(p + 2, '%'))
1039 return -EINVAL;
1040
1041 /* Use one page as a bit array of possible slots */
cfcabdcc 1042 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
1043 if (!inuse)
1044 return -ENOMEM;
1045
881d966b 1046 for_each_netdev(net, d) {
1da177e4
LT
1047 if (!sscanf(d->name, name, &i))
1048 continue;
1049 if (i < 0 || i >= max_netdevices)
1050 continue;
1051
1052 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 1053 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
1054 if (!strncmp(buf, d->name, IFNAMSIZ))
1055 set_bit(i, inuse);
1056 }
1057
1058 i = find_first_zero_bit(inuse, max_netdevices);
1059 free_page((unsigned long) inuse);
1060 }
1061
d9031024
OP
1062 if (buf != name)
1063 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1064 if (!__dev_get_by_name(net, buf))
1da177e4 1065 return i;
1da177e4
LT
1066
1067 /* It is possible to run out of possible slots
1068 * when the name is long and there isn't enough space left
1069 * for the digits, or if all bits are used.
1070 */
1071 return -ENFILE;
1072}
1073
b267b179
EB
1074/**
1075 * dev_alloc_name - allocate a name for a device
1076 * @dev: device
1077 * @name: name format string
1078 *
1079 * Passed a format string - eg "lt%d" it will try and find a suitable
1080 * id. It scans list of devices to build up a free map, then chooses
1081 * the first empty slot. The caller must hold the dev_base or rtnl lock
1082 * while allocating the name and adding the device in order to avoid
1083 * duplicates.
1084 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1085 * Returns the number of the unit assigned or a negative errno code.
1086 */
1087
1088int dev_alloc_name(struct net_device *dev, const char *name)
1089{
1090 char buf[IFNAMSIZ];
1091 struct net *net;
1092 int ret;
1093
c346dca1
YH
1094 BUG_ON(!dev_net(dev));
1095 net = dev_net(dev);
b267b179
EB
1096 ret = __dev_alloc_name(net, name, buf);
1097 if (ret >= 0)
1098 strlcpy(dev->name, buf, IFNAMSIZ);
1099 return ret;
1100}
d1b19dff 1101EXPORT_SYMBOL(dev_alloc_name);
b267b179 1102
828de4f6
G
1103static int dev_alloc_name_ns(struct net *net,
1104 struct net_device *dev,
1105 const char *name)
d9031024 1106{
828de4f6
G
1107 char buf[IFNAMSIZ];
1108 int ret;
8ce6cebc 1109
828de4f6
G
1110 ret = __dev_alloc_name(net, name, buf);
1111 if (ret >= 0)
1112 strlcpy(dev->name, buf, IFNAMSIZ);
1113 return ret;
1114}
1115
1116static int dev_get_valid_name(struct net *net,
1117 struct net_device *dev,
1118 const char *name)
1119{
1120 BUG_ON(!net);
8ce6cebc 1121
d9031024
OP
1122 if (!dev_valid_name(name))
1123 return -EINVAL;
1124
1c5cae81 1125 if (strchr(name, '%'))
828de4f6 1126 return dev_alloc_name_ns(net, dev, name);
d9031024
OP
1127 else if (__dev_get_by_name(net, name))
1128 return -EEXIST;
8ce6cebc
DL
1129 else if (dev->name != name)
1130 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
1131
1132 return 0;
1133}
1da177e4
LT
1134
1135/**
1136 * dev_change_name - change name of a device
1137 * @dev: device
1138 * @newname: name (or format string) must be at least IFNAMSIZ
1139 *
1140 * Change name of a device, can pass format strings "eth%d".
1141 * for wildcarding.
1142 */
cf04a4c7 1143int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1144{
238fa362 1145 unsigned char old_assign_type;
fcc5a03a 1146 char oldname[IFNAMSIZ];
1da177e4 1147 int err = 0;
fcc5a03a 1148 int ret;
881d966b 1149 struct net *net;
1da177e4
LT
1150
1151 ASSERT_RTNL();
c346dca1 1152 BUG_ON(!dev_net(dev));
1da177e4 1153
c346dca1 1154 net = dev_net(dev);
1da177e4
LT
1155 if (dev->flags & IFF_UP)
1156 return -EBUSY;
1157
30e6c9fa 1158 write_seqcount_begin(&devnet_rename_seq);
c91f6df2
BH
1159
1160 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
30e6c9fa 1161 write_seqcount_end(&devnet_rename_seq);
c8d90dca 1162 return 0;
c91f6df2 1163 }
c8d90dca 1164
fcc5a03a
HX
1165 memcpy(oldname, dev->name, IFNAMSIZ);
1166
828de4f6 1167 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1168 if (err < 0) {
30e6c9fa 1169 write_seqcount_end(&devnet_rename_seq);
d9031024 1170 return err;
c91f6df2 1171 }
1da177e4 1172
6fe82a39
VF
1173 if (oldname[0] && !strchr(oldname, '%'))
1174 netdev_info(dev, "renamed from %s\n", oldname);
1175
238fa362
TG
1176 old_assign_type = dev->name_assign_type;
1177 dev->name_assign_type = NET_NAME_RENAMED;
1178
fcc5a03a 1179rollback:
a1b3f594
EB
1180 ret = device_rename(&dev->dev, dev->name);
1181 if (ret) {
1182 memcpy(dev->name, oldname, IFNAMSIZ);
238fa362 1183 dev->name_assign_type = old_assign_type;
30e6c9fa 1184 write_seqcount_end(&devnet_rename_seq);
a1b3f594 1185 return ret;
dcc99773 1186 }
7f988eab 1187
30e6c9fa 1188 write_seqcount_end(&devnet_rename_seq);
c91f6df2 1189
5bb025fa
VF
1190 netdev_adjacent_rename_links(dev, oldname);
1191
7f988eab 1192 write_lock_bh(&dev_base_lock);
372b2312 1193 hlist_del_rcu(&dev->name_hlist);
72c9528b
ED
1194 write_unlock_bh(&dev_base_lock);
1195
1196 synchronize_rcu();
1197
1198 write_lock_bh(&dev_base_lock);
1199 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1200 write_unlock_bh(&dev_base_lock);
1201
056925ab 1202 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1203 ret = notifier_to_errno(ret);
1204
1205 if (ret) {
91e9c07b
ED
1206 /* err >= 0 after dev_alloc_name() or stores the first errno */
1207 if (err >= 0) {
fcc5a03a 1208 err = ret;
30e6c9fa 1209 write_seqcount_begin(&devnet_rename_seq);
fcc5a03a 1210 memcpy(dev->name, oldname, IFNAMSIZ);
5bb025fa 1211 memcpy(oldname, newname, IFNAMSIZ);
238fa362
TG
1212 dev->name_assign_type = old_assign_type;
1213 old_assign_type = NET_NAME_RENAMED;
fcc5a03a 1214 goto rollback;
91e9c07b 1215 } else {
7b6cd1ce 1216 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1217 dev->name, ret);
fcc5a03a
HX
1218 }
1219 }
1da177e4
LT
1220
1221 return err;
1222}
1223
0b815a1a
SH
1224/**
1225 * dev_set_alias - change ifalias of a device
1226 * @dev: device
1227 * @alias: name up to IFALIASZ
f0db275a 1228 * @len: limit of bytes to copy from info
0b815a1a
SH
1229 *
1230 * Set ifalias for a device,
1231 */
1232int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1233{
7364e445
AK
1234 char *new_ifalias;
1235
0b815a1a
SH
1236 ASSERT_RTNL();
1237
1238 if (len >= IFALIASZ)
1239 return -EINVAL;
1240
96ca4a2c 1241 if (!len) {
388dfc2d
SK
1242 kfree(dev->ifalias);
1243 dev->ifalias = NULL;
96ca4a2c
OH
1244 return 0;
1245 }
1246
7364e445
AK
1247 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1248 if (!new_ifalias)
0b815a1a 1249 return -ENOMEM;
7364e445 1250 dev->ifalias = new_ifalias;
0b815a1a
SH
1251
1252 strlcpy(dev->ifalias, alias, len+1);
1253 return len;
1254}
1255
1256
d8a33ac4 1257/**
3041a069 1258 * netdev_features_change - device changes features
d8a33ac4
SH
1259 * @dev: device to cause notification
1260 *
1261 * Called to indicate a device has changed features.
1262 */
1263void netdev_features_change(struct net_device *dev)
1264{
056925ab 1265 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1266}
1267EXPORT_SYMBOL(netdev_features_change);
1268
1da177e4
LT
1269/**
1270 * netdev_state_change - device changes state
1271 * @dev: device to cause notification
1272 *
1273 * Called to indicate a device has changed state. This function calls
1274 * the notifier chains for netdev_chain and sends a NEWLINK message
1275 * to the routing socket.
1276 */
1277void netdev_state_change(struct net_device *dev)
1278{
1279 if (dev->flags & IFF_UP) {
54951194
LP
1280 struct netdev_notifier_change_info change_info;
1281
1282 change_info.flags_changed = 0;
1283 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1284 &change_info.info);
7f294054 1285 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1286 }
1287}
d1b19dff 1288EXPORT_SYMBOL(netdev_state_change);
1da177e4 1289
ee89bab1
AW
1290/**
1291 * netdev_notify_peers - notify network peers about existence of @dev
1292 * @dev: network device
1293 *
1294 * Generate traffic such that interested network peers are aware of
1295 * @dev, such as by generating a gratuitous ARP. This may be used when
1296 * a device wants to inform the rest of the network about some sort of
1297 * reconfiguration such as a failover event or virtual machine
1298 * migration.
1299 */
1300void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1301{
ee89bab1
AW
1302 rtnl_lock();
1303 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1304 rtnl_unlock();
c1da4ac7 1305}
ee89bab1 1306EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1307
bd380811 1308static int __dev_open(struct net_device *dev)
1da177e4 1309{
d314774c 1310 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1311 int ret;
1da177e4 1312
e46b66bc
BH
1313 ASSERT_RTNL();
1314
1da177e4
LT
1315 if (!netif_device_present(dev))
1316 return -ENODEV;
1317
ca99ca14
NH
1318 /* Block netpoll from trying to do any rx path servicing.
1319 * If we don't do this there is a chance ndo_poll_controller
1320 * or ndo_poll may be running while we open the device
1321 */
66b5552f 1322 netpoll_poll_disable(dev);
ca99ca14 1323
3b8bcfd5
JB
1324 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1325 ret = notifier_to_errno(ret);
1326 if (ret)
1327 return ret;
1328
1da177e4 1329 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1330
d314774c
SH
1331 if (ops->ndo_validate_addr)
1332 ret = ops->ndo_validate_addr(dev);
bada339b 1333
d314774c
SH
1334 if (!ret && ops->ndo_open)
1335 ret = ops->ndo_open(dev);
1da177e4 1336
66b5552f 1337 netpoll_poll_enable(dev);
ca99ca14 1338
bada339b
JG
1339 if (ret)
1340 clear_bit(__LINK_STATE_START, &dev->state);
1341 else {
1da177e4 1342 dev->flags |= IFF_UP;
4417da66 1343 dev_set_rx_mode(dev);
1da177e4 1344 dev_activate(dev);
7bf23575 1345 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1346 }
bada339b 1347
1da177e4
LT
1348 return ret;
1349}
1350
1351/**
bd380811
PM
1352 * dev_open - prepare an interface for use.
1353 * @dev: device to open
1da177e4 1354 *
bd380811
PM
1355 * Takes a device from down to up state. The device's private open
1356 * function is invoked and then the multicast lists are loaded. Finally
1357 * the device is moved into the up state and a %NETDEV_UP message is
1358 * sent to the netdev notifier chain.
1359 *
1360 * Calling this function on an active interface is a nop. On a failure
1361 * a negative errno code is returned.
1da177e4 1362 */
bd380811
PM
1363int dev_open(struct net_device *dev)
1364{
1365 int ret;
1366
bd380811
PM
1367 if (dev->flags & IFF_UP)
1368 return 0;
1369
bd380811
PM
1370 ret = __dev_open(dev);
1371 if (ret < 0)
1372 return ret;
1373
7f294054 1374 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1375 call_netdevice_notifiers(NETDEV_UP, dev);
1376
1377 return ret;
1378}
1379EXPORT_SYMBOL(dev_open);
1380
44345724 1381static int __dev_close_many(struct list_head *head)
1da177e4 1382{
44345724 1383 struct net_device *dev;
e46b66bc 1384
bd380811 1385 ASSERT_RTNL();
9d5010db
DM
1386 might_sleep();
1387
5cde2829 1388 list_for_each_entry(dev, head, close_list) {
3f4df206 1389 /* Temporarily disable netpoll until the interface is down */
66b5552f 1390 netpoll_poll_disable(dev);
3f4df206 1391
44345724 1392 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1393
44345724 1394 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1395
44345724
OP
1396 /* Synchronize to scheduled poll. We cannot touch poll list, it
1397 * can be even on different cpu. So just clear netif_running().
1398 *
1399 * dev->stop() will invoke napi_disable() on all of it's
1400 * napi_struct instances on this device.
1401 */
4e857c58 1402 smp_mb__after_atomic(); /* Commit netif_running(). */
44345724 1403 }
1da177e4 1404
44345724 1405 dev_deactivate_many(head);
d8b2a4d2 1406
5cde2829 1407 list_for_each_entry(dev, head, close_list) {
44345724 1408 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1409
44345724
OP
1410 /*
1411 * Call the device specific close. This cannot fail.
1412 * Only if device is UP
1413 *
1414 * We allow it to be called even after a DETACH hot-plug
1415 * event.
1416 */
1417 if (ops->ndo_stop)
1418 ops->ndo_stop(dev);
1419
44345724 1420 dev->flags &= ~IFF_UP;
66b5552f 1421 netpoll_poll_enable(dev);
44345724
OP
1422 }
1423
1424 return 0;
1425}
1426
1427static int __dev_close(struct net_device *dev)
1428{
f87e6f47 1429 int retval;
44345724
OP
1430 LIST_HEAD(single);
1431
5cde2829 1432 list_add(&dev->close_list, &single);
f87e6f47
LT
1433 retval = __dev_close_many(&single);
1434 list_del(&single);
ca99ca14 1435
f87e6f47 1436 return retval;
44345724
OP
1437}
1438
99c4a26a 1439int dev_close_many(struct list_head *head, bool unlink)
44345724
OP
1440{
1441 struct net_device *dev, *tmp;
1da177e4 1442
5cde2829
EB
1443 /* Remove the devices that don't need to be closed */
1444 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1445 if (!(dev->flags & IFF_UP))
5cde2829 1446 list_del_init(&dev->close_list);
44345724
OP
1447
1448 __dev_close_many(head);
1da177e4 1449
5cde2829 1450 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1451 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1452 call_netdevice_notifiers(NETDEV_DOWN, dev);
99c4a26a
DM
1453 if (unlink)
1454 list_del_init(&dev->close_list);
44345724 1455 }
bd380811
PM
1456
1457 return 0;
1458}
99c4a26a 1459EXPORT_SYMBOL(dev_close_many);
bd380811
PM
1460
1461/**
1462 * dev_close - shutdown an interface.
1463 * @dev: device to shutdown
1464 *
1465 * This function moves an active device into down state. A
1466 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1467 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1468 * chain.
1469 */
1470int dev_close(struct net_device *dev)
1471{
e14a5993
ED
1472 if (dev->flags & IFF_UP) {
1473 LIST_HEAD(single);
1da177e4 1474
5cde2829 1475 list_add(&dev->close_list, &single);
99c4a26a 1476 dev_close_many(&single, true);
e14a5993
ED
1477 list_del(&single);
1478 }
da6e378b 1479 return 0;
1da177e4 1480}
d1b19dff 1481EXPORT_SYMBOL(dev_close);
1da177e4
LT
1482
1483
0187bdfb
BH
1484/**
1485 * dev_disable_lro - disable Large Receive Offload on a device
1486 * @dev: device
1487 *
1488 * Disable Large Receive Offload (LRO) on a net device. Must be
1489 * called under RTNL. This is needed if received packets may be
1490 * forwarded to another interface.
1491 */
1492void dev_disable_lro(struct net_device *dev)
1493{
fbe168ba
MK
1494 struct net_device *lower_dev;
1495 struct list_head *iter;
529d0489 1496
bc5787c6
MM
1497 dev->wanted_features &= ~NETIF_F_LRO;
1498 netdev_update_features(dev);
27660515 1499
22d5969f
MM
1500 if (unlikely(dev->features & NETIF_F_LRO))
1501 netdev_WARN(dev, "failed to disable LRO!\n");
fbe168ba
MK
1502
1503 netdev_for_each_lower_dev(dev, lower_dev, iter)
1504 dev_disable_lro(lower_dev);
0187bdfb
BH
1505}
1506EXPORT_SYMBOL(dev_disable_lro);
1507
351638e7
JP
1508static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1509 struct net_device *dev)
1510{
1511 struct netdev_notifier_info info;
1512
1513 netdev_notifier_info_init(&info, dev);
1514 return nb->notifier_call(nb, val, &info);
1515}
0187bdfb 1516
881d966b
EB
1517static int dev_boot_phase = 1;
1518
1da177e4
LT
1519/**
1520 * register_netdevice_notifier - register a network notifier block
1521 * @nb: notifier
1522 *
1523 * Register a notifier to be called when network device events occur.
1524 * The notifier passed is linked into the kernel structures and must
1525 * not be reused until it has been unregistered. A negative errno code
1526 * is returned on a failure.
1527 *
1528 * When registered all registration and up events are replayed
4ec93edb 1529 * to the new notifier to allow device to have a race free
1da177e4
LT
1530 * view of the network device list.
1531 */
1532
1533int register_netdevice_notifier(struct notifier_block *nb)
1534{
1535 struct net_device *dev;
fcc5a03a 1536 struct net_device *last;
881d966b 1537 struct net *net;
1da177e4
LT
1538 int err;
1539
1540 rtnl_lock();
f07d5b94 1541 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1542 if (err)
1543 goto unlock;
881d966b
EB
1544 if (dev_boot_phase)
1545 goto unlock;
1546 for_each_net(net) {
1547 for_each_netdev(net, dev) {
351638e7 1548 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
881d966b
EB
1549 err = notifier_to_errno(err);
1550 if (err)
1551 goto rollback;
1552
1553 if (!(dev->flags & IFF_UP))
1554 continue;
1da177e4 1555
351638e7 1556 call_netdevice_notifier(nb, NETDEV_UP, dev);
881d966b 1557 }
1da177e4 1558 }
fcc5a03a
HX
1559
1560unlock:
1da177e4
LT
1561 rtnl_unlock();
1562 return err;
fcc5a03a
HX
1563
1564rollback:
1565 last = dev;
881d966b
EB
1566 for_each_net(net) {
1567 for_each_netdev(net, dev) {
1568 if (dev == last)
8f891489 1569 goto outroll;
fcc5a03a 1570
881d966b 1571 if (dev->flags & IFF_UP) {
351638e7
JP
1572 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1573 dev);
1574 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
881d966b 1575 }
351638e7 1576 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1577 }
fcc5a03a 1578 }
c67625a1 1579
8f891489 1580outroll:
c67625a1 1581 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1582 goto unlock;
1da177e4 1583}
d1b19dff 1584EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1585
1586/**
1587 * unregister_netdevice_notifier - unregister a network notifier block
1588 * @nb: notifier
1589 *
1590 * Unregister a notifier previously registered by
1591 * register_netdevice_notifier(). The notifier is unlinked into the
1592 * kernel structures and may then be reused. A negative errno code
1593 * is returned on a failure.
7d3d43da
EB
1594 *
1595 * After unregistering unregister and down device events are synthesized
1596 * for all devices on the device list to the removed notifier to remove
1597 * the need for special case cleanup code.
1da177e4
LT
1598 */
1599
1600int unregister_netdevice_notifier(struct notifier_block *nb)
1601{
7d3d43da
EB
1602 struct net_device *dev;
1603 struct net *net;
9f514950
HX
1604 int err;
1605
1606 rtnl_lock();
f07d5b94 1607 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1608 if (err)
1609 goto unlock;
1610
1611 for_each_net(net) {
1612 for_each_netdev(net, dev) {
1613 if (dev->flags & IFF_UP) {
351638e7
JP
1614 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1615 dev);
1616 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
7d3d43da 1617 }
351638e7 1618 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
7d3d43da
EB
1619 }
1620 }
1621unlock:
9f514950
HX
1622 rtnl_unlock();
1623 return err;
1da177e4 1624}
d1b19dff 1625EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1626
351638e7
JP
1627/**
1628 * call_netdevice_notifiers_info - call all network notifier blocks
1629 * @val: value passed unmodified to notifier function
1630 * @dev: net_device pointer passed unmodified to notifier function
1631 * @info: notifier information data
1632 *
1633 * Call all network notifier blocks. Parameters and return value
1634 * are as for raw_notifier_call_chain().
1635 */
1636
1d143d9f 1637static int call_netdevice_notifiers_info(unsigned long val,
1638 struct net_device *dev,
1639 struct netdev_notifier_info *info)
351638e7
JP
1640{
1641 ASSERT_RTNL();
1642 netdev_notifier_info_init(info, dev);
1643 return raw_notifier_call_chain(&netdev_chain, val, info);
1644}
351638e7 1645
1da177e4
LT
1646/**
1647 * call_netdevice_notifiers - call all network notifier blocks
1648 * @val: value passed unmodified to notifier function
c4ea43c5 1649 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1650 *
1651 * Call all network notifier blocks. Parameters and return value
f07d5b94 1652 * are as for raw_notifier_call_chain().
1da177e4
LT
1653 */
1654
ad7379d4 1655int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1656{
351638e7
JP
1657 struct netdev_notifier_info info;
1658
1659 return call_netdevice_notifiers_info(val, dev, &info);
1da177e4 1660}
edf947f1 1661EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1662
1cf51900 1663#ifdef CONFIG_NET_INGRESS
4577139b
DB
1664static struct static_key ingress_needed __read_mostly;
1665
1666void net_inc_ingress_queue(void)
1667{
1668 static_key_slow_inc(&ingress_needed);
1669}
1670EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1671
1672void net_dec_ingress_queue(void)
1673{
1674 static_key_slow_dec(&ingress_needed);
1675}
1676EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1677#endif
1678
1f211a1b
DB
1679#ifdef CONFIG_NET_EGRESS
1680static struct static_key egress_needed __read_mostly;
1681
1682void net_inc_egress_queue(void)
1683{
1684 static_key_slow_inc(&egress_needed);
1685}
1686EXPORT_SYMBOL_GPL(net_inc_egress_queue);
1687
1688void net_dec_egress_queue(void)
1689{
1690 static_key_slow_dec(&egress_needed);
1691}
1692EXPORT_SYMBOL_GPL(net_dec_egress_queue);
1693#endif
1694
c5905afb 1695static struct static_key netstamp_needed __read_mostly;
b90e5794 1696#ifdef HAVE_JUMP_LABEL
c5905afb 1697/* We are not allowed to call static_key_slow_dec() from irq context
b90e5794 1698 * If net_disable_timestamp() is called from irq context, defer the
c5905afb 1699 * static_key_slow_dec() calls.
b90e5794
ED
1700 */
1701static atomic_t netstamp_needed_deferred;
1702#endif
1da177e4
LT
1703
1704void net_enable_timestamp(void)
1705{
b90e5794
ED
1706#ifdef HAVE_JUMP_LABEL
1707 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1708
1709 if (deferred) {
1710 while (--deferred)
c5905afb 1711 static_key_slow_dec(&netstamp_needed);
b90e5794
ED
1712 return;
1713 }
1714#endif
c5905afb 1715 static_key_slow_inc(&netstamp_needed);
1da177e4 1716}
d1b19dff 1717EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1718
1719void net_disable_timestamp(void)
1720{
b90e5794
ED
1721#ifdef HAVE_JUMP_LABEL
1722 if (in_interrupt()) {
1723 atomic_inc(&netstamp_needed_deferred);
1724 return;
1725 }
1726#endif
c5905afb 1727 static_key_slow_dec(&netstamp_needed);
1da177e4 1728}
d1b19dff 1729EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1730
3b098e2d 1731static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 1732{
588f0330 1733 skb->tstamp.tv64 = 0;
c5905afb 1734 if (static_key_false(&netstamp_needed))
a61bbcf2 1735 __net_timestamp(skb);
1da177e4
LT
1736}
1737
588f0330 1738#define net_timestamp_check(COND, SKB) \
c5905afb 1739 if (static_key_false(&netstamp_needed)) { \
588f0330
ED
1740 if ((COND) && !(SKB)->tstamp.tv64) \
1741 __net_timestamp(SKB); \
1742 } \
3b098e2d 1743
1ee481fb 1744bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
79b569f0
DL
1745{
1746 unsigned int len;
1747
1748 if (!(dev->flags & IFF_UP))
1749 return false;
1750
1751 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1752 if (skb->len <= len)
1753 return true;
1754
1755 /* if TSO is enabled, we don't care about the length as the packet
1756 * could be forwarded without being segmented before
1757 */
1758 if (skb_is_gso(skb))
1759 return true;
1760
1761 return false;
1762}
1ee481fb 1763EXPORT_SYMBOL_GPL(is_skb_forwardable);
79b569f0 1764
a0265d28
HX
1765int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1766{
bbbf2df0
WB
1767 if (skb_orphan_frags(skb, GFP_ATOMIC) ||
1768 unlikely(!is_skb_forwardable(dev, skb))) {
a0265d28
HX
1769 atomic_long_inc(&dev->rx_dropped);
1770 kfree_skb(skb);
1771 return NET_RX_DROP;
1772 }
1773
1774 skb_scrub_packet(skb, true);
08b4b8ea 1775 skb->priority = 0;
a0265d28 1776 skb->protocol = eth_type_trans(skb, dev);
2c26d34b 1777 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
a0265d28
HX
1778
1779 return 0;
1780}
1781EXPORT_SYMBOL_GPL(__dev_forward_skb);
1782
44540960
AB
1783/**
1784 * dev_forward_skb - loopback an skb to another netif
1785 *
1786 * @dev: destination network device
1787 * @skb: buffer to forward
1788 *
1789 * return values:
1790 * NET_RX_SUCCESS (no congestion)
6ec82562 1791 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1792 *
1793 * dev_forward_skb can be used for injecting an skb from the
1794 * start_xmit function of one device into the receive queue
1795 * of another device.
1796 *
1797 * The receiving device may be in another namespace, so
1798 * we have to clear all information in the skb that could
1799 * impact namespace isolation.
1800 */
1801int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1802{
a0265d28 1803 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
44540960
AB
1804}
1805EXPORT_SYMBOL_GPL(dev_forward_skb);
1806
71d9dec2
CG
1807static inline int deliver_skb(struct sk_buff *skb,
1808 struct packet_type *pt_prev,
1809 struct net_device *orig_dev)
1810{
1080e512
MT
1811 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1812 return -ENOMEM;
71d9dec2
CG
1813 atomic_inc(&skb->users);
1814 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1815}
1816
7866a621
SN
1817static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1818 struct packet_type **pt,
fbcb2170
JP
1819 struct net_device *orig_dev,
1820 __be16 type,
7866a621
SN
1821 struct list_head *ptype_list)
1822{
1823 struct packet_type *ptype, *pt_prev = *pt;
1824
1825 list_for_each_entry_rcu(ptype, ptype_list, list) {
1826 if (ptype->type != type)
1827 continue;
1828 if (pt_prev)
fbcb2170 1829 deliver_skb(skb, pt_prev, orig_dev);
7866a621
SN
1830 pt_prev = ptype;
1831 }
1832 *pt = pt_prev;
1833}
1834
c0de08d0
EL
1835static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1836{
a3d744e9 1837 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
1838 return false;
1839
1840 if (ptype->id_match)
1841 return ptype->id_match(ptype, skb->sk);
1842 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1843 return true;
1844
1845 return false;
1846}
1847
1da177e4
LT
1848/*
1849 * Support routine. Sends outgoing frames to any network
1850 * taps currently in use.
1851 */
1852
f6a78bfc 1853static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1854{
1855 struct packet_type *ptype;
71d9dec2
CG
1856 struct sk_buff *skb2 = NULL;
1857 struct packet_type *pt_prev = NULL;
7866a621 1858 struct list_head *ptype_list = &ptype_all;
a61bbcf2 1859
1da177e4 1860 rcu_read_lock();
7866a621
SN
1861again:
1862 list_for_each_entry_rcu(ptype, ptype_list, list) {
1da177e4
LT
1863 /* Never send packets back to the socket
1864 * they originated from - MvS (miquels@drinkel.ow.org)
1865 */
7866a621
SN
1866 if (skb_loop_sk(ptype, skb))
1867 continue;
71d9dec2 1868
7866a621
SN
1869 if (pt_prev) {
1870 deliver_skb(skb2, pt_prev, skb->dev);
1871 pt_prev = ptype;
1872 continue;
1873 }
1da177e4 1874
7866a621
SN
1875 /* need to clone skb, done only once */
1876 skb2 = skb_clone(skb, GFP_ATOMIC);
1877 if (!skb2)
1878 goto out_unlock;
70978182 1879
7866a621 1880 net_timestamp_set(skb2);
1da177e4 1881
7866a621
SN
1882 /* skb->nh should be correctly
1883 * set by sender, so that the second statement is
1884 * just protection against buggy protocols.
1885 */
1886 skb_reset_mac_header(skb2);
1887
1888 if (skb_network_header(skb2) < skb2->data ||
1889 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1890 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1891 ntohs(skb2->protocol),
1892 dev->name);
1893 skb_reset_network_header(skb2);
1da177e4 1894 }
7866a621
SN
1895
1896 skb2->transport_header = skb2->network_header;
1897 skb2->pkt_type = PACKET_OUTGOING;
1898 pt_prev = ptype;
1899 }
1900
1901 if (ptype_list == &ptype_all) {
1902 ptype_list = &dev->ptype_all;
1903 goto again;
1da177e4 1904 }
7866a621 1905out_unlock:
71d9dec2
CG
1906 if (pt_prev)
1907 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1da177e4
LT
1908 rcu_read_unlock();
1909}
1910
2c53040f
BH
1911/**
1912 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
1913 * @dev: Network device
1914 * @txq: number of queues available
1915 *
1916 * If real_num_tx_queues is changed the tc mappings may no longer be
1917 * valid. To resolve this verify the tc mapping remains valid and if
1918 * not NULL the mapping. With no priorities mapping to this
1919 * offset/count pair it will no longer be used. In the worst case TC0
1920 * is invalid nothing can be done so disable priority mappings. If is
1921 * expected that drivers will fix this mapping if they can before
1922 * calling netif_set_real_num_tx_queues.
1923 */
bb134d22 1924static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
1925{
1926 int i;
1927 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1928
1929 /* If TC0 is invalidated disable TC mapping */
1930 if (tc->offset + tc->count > txq) {
7b6cd1ce 1931 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
1932 dev->num_tc = 0;
1933 return;
1934 }
1935
1936 /* Invalidated prio to tc mappings set to TC0 */
1937 for (i = 1; i < TC_BITMASK + 1; i++) {
1938 int q = netdev_get_prio_tc_map(dev, i);
1939
1940 tc = &dev->tc_to_txq[q];
1941 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
1942 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1943 i, q);
4f57c087
JF
1944 netdev_set_prio_tc_map(dev, i, 0);
1945 }
1946 }
1947}
1948
537c00de
AD
1949#ifdef CONFIG_XPS
1950static DEFINE_MUTEX(xps_map_mutex);
1951#define xmap_dereference(P) \
1952 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1953
10cdc3f3
AD
1954static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1955 int cpu, u16 index)
537c00de 1956{
10cdc3f3
AD
1957 struct xps_map *map = NULL;
1958 int pos;
537c00de 1959
10cdc3f3
AD
1960 if (dev_maps)
1961 map = xmap_dereference(dev_maps->cpu_map[cpu]);
537c00de 1962
10cdc3f3
AD
1963 for (pos = 0; map && pos < map->len; pos++) {
1964 if (map->queues[pos] == index) {
537c00de
AD
1965 if (map->len > 1) {
1966 map->queues[pos] = map->queues[--map->len];
1967 } else {
10cdc3f3 1968 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
537c00de
AD
1969 kfree_rcu(map, rcu);
1970 map = NULL;
1971 }
10cdc3f3 1972 break;
537c00de 1973 }
537c00de
AD
1974 }
1975
10cdc3f3
AD
1976 return map;
1977}
1978
024e9679 1979static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
10cdc3f3
AD
1980{
1981 struct xps_dev_maps *dev_maps;
024e9679 1982 int cpu, i;
10cdc3f3
AD
1983 bool active = false;
1984
1985 mutex_lock(&xps_map_mutex);
1986 dev_maps = xmap_dereference(dev->xps_maps);
1987
1988 if (!dev_maps)
1989 goto out_no_maps;
1990
1991 for_each_possible_cpu(cpu) {
024e9679
AD
1992 for (i = index; i < dev->num_tx_queues; i++) {
1993 if (!remove_xps_queue(dev_maps, cpu, i))
1994 break;
1995 }
1996 if (i == dev->num_tx_queues)
10cdc3f3
AD
1997 active = true;
1998 }
1999
2000 if (!active) {
537c00de
AD
2001 RCU_INIT_POINTER(dev->xps_maps, NULL);
2002 kfree_rcu(dev_maps, rcu);
2003 }
2004
024e9679
AD
2005 for (i = index; i < dev->num_tx_queues; i++)
2006 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
2007 NUMA_NO_NODE);
2008
537c00de
AD
2009out_no_maps:
2010 mutex_unlock(&xps_map_mutex);
2011}
2012
01c5f864
AD
2013static struct xps_map *expand_xps_map(struct xps_map *map,
2014 int cpu, u16 index)
2015{
2016 struct xps_map *new_map;
2017 int alloc_len = XPS_MIN_MAP_ALLOC;
2018 int i, pos;
2019
2020 for (pos = 0; map && pos < map->len; pos++) {
2021 if (map->queues[pos] != index)
2022 continue;
2023 return map;
2024 }
2025
2026 /* Need to add queue to this CPU's existing map */
2027 if (map) {
2028 if (pos < map->alloc_len)
2029 return map;
2030
2031 alloc_len = map->alloc_len * 2;
2032 }
2033
2034 /* Need to allocate new map to store queue on this CPU's map */
2035 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
2036 cpu_to_node(cpu));
2037 if (!new_map)
2038 return NULL;
2039
2040 for (i = 0; i < pos; i++)
2041 new_map->queues[i] = map->queues[i];
2042 new_map->alloc_len = alloc_len;
2043 new_map->len = pos;
2044
2045 return new_map;
2046}
2047
3573540c
MT
2048int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2049 u16 index)
537c00de 2050{
01c5f864 2051 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
537c00de 2052 struct xps_map *map, *new_map;
537c00de 2053 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
01c5f864
AD
2054 int cpu, numa_node_id = -2;
2055 bool active = false;
537c00de
AD
2056
2057 mutex_lock(&xps_map_mutex);
2058
2059 dev_maps = xmap_dereference(dev->xps_maps);
2060
01c5f864
AD
2061 /* allocate memory for queue storage */
2062 for_each_online_cpu(cpu) {
2063 if (!cpumask_test_cpu(cpu, mask))
2064 continue;
2065
2066 if (!new_dev_maps)
2067 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
2068 if (!new_dev_maps) {
2069 mutex_unlock(&xps_map_mutex);
01c5f864 2070 return -ENOMEM;
2bb60cb9 2071 }
01c5f864
AD
2072
2073 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2074 NULL;
2075
2076 map = expand_xps_map(map, cpu, index);
2077 if (!map)
2078 goto error;
2079
2080 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2081 }
2082
2083 if (!new_dev_maps)
2084 goto out_no_new_maps;
2085
537c00de 2086 for_each_possible_cpu(cpu) {
01c5f864
AD
2087 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2088 /* add queue to CPU maps */
2089 int pos = 0;
2090
2091 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2092 while ((pos < map->len) && (map->queues[pos] != index))
2093 pos++;
2094
2095 if (pos == map->len)
2096 map->queues[map->len++] = index;
537c00de 2097#ifdef CONFIG_NUMA
537c00de
AD
2098 if (numa_node_id == -2)
2099 numa_node_id = cpu_to_node(cpu);
2100 else if (numa_node_id != cpu_to_node(cpu))
2101 numa_node_id = -1;
537c00de 2102#endif
01c5f864
AD
2103 } else if (dev_maps) {
2104 /* fill in the new device map from the old device map */
2105 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2106 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
537c00de 2107 }
01c5f864 2108
537c00de
AD
2109 }
2110
01c5f864
AD
2111 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2112
537c00de 2113 /* Cleanup old maps */
01c5f864
AD
2114 if (dev_maps) {
2115 for_each_possible_cpu(cpu) {
2116 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2117 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2118 if (map && map != new_map)
2119 kfree_rcu(map, rcu);
2120 }
537c00de 2121
01c5f864 2122 kfree_rcu(dev_maps, rcu);
537c00de
AD
2123 }
2124
01c5f864
AD
2125 dev_maps = new_dev_maps;
2126 active = true;
537c00de 2127
01c5f864
AD
2128out_no_new_maps:
2129 /* update Tx queue numa node */
537c00de
AD
2130 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2131 (numa_node_id >= 0) ? numa_node_id :
2132 NUMA_NO_NODE);
2133
01c5f864
AD
2134 if (!dev_maps)
2135 goto out_no_maps;
2136
2137 /* removes queue from unused CPUs */
2138 for_each_possible_cpu(cpu) {
2139 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2140 continue;
2141
2142 if (remove_xps_queue(dev_maps, cpu, index))
2143 active = true;
2144 }
2145
2146 /* free map if not active */
2147 if (!active) {
2148 RCU_INIT_POINTER(dev->xps_maps, NULL);
2149 kfree_rcu(dev_maps, rcu);
2150 }
2151
2152out_no_maps:
537c00de
AD
2153 mutex_unlock(&xps_map_mutex);
2154
2155 return 0;
2156error:
01c5f864
AD
2157 /* remove any maps that we added */
2158 for_each_possible_cpu(cpu) {
2159 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2160 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2161 NULL;
2162 if (new_map && new_map != map)
2163 kfree(new_map);
2164 }
2165
537c00de
AD
2166 mutex_unlock(&xps_map_mutex);
2167
537c00de
AD
2168 kfree(new_dev_maps);
2169 return -ENOMEM;
2170}
2171EXPORT_SYMBOL(netif_set_xps_queue);
2172
2173#endif
f0796d5c
JF
2174/*
2175 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2176 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2177 */
e6484930 2178int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2179{
1d24eb48
TH
2180 int rc;
2181
e6484930
TH
2182 if (txq < 1 || txq > dev->num_tx_queues)
2183 return -EINVAL;
f0796d5c 2184
5c56580b
BH
2185 if (dev->reg_state == NETREG_REGISTERED ||
2186 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2187 ASSERT_RTNL();
2188
1d24eb48
TH
2189 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2190 txq);
bf264145
TH
2191 if (rc)
2192 return rc;
2193
4f57c087
JF
2194 if (dev->num_tc)
2195 netif_setup_tc(dev, txq);
2196
024e9679 2197 if (txq < dev->real_num_tx_queues) {
e6484930 2198 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2199#ifdef CONFIG_XPS
2200 netif_reset_xps_queues_gt(dev, txq);
2201#endif
2202 }
f0796d5c 2203 }
e6484930
TH
2204
2205 dev->real_num_tx_queues = txq;
2206 return 0;
f0796d5c
JF
2207}
2208EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 2209
a953be53 2210#ifdef CONFIG_SYSFS
62fe0b40
BH
2211/**
2212 * netif_set_real_num_rx_queues - set actual number of RX queues used
2213 * @dev: Network device
2214 * @rxq: Actual number of RX queues
2215 *
2216 * This must be called either with the rtnl_lock held or before
2217 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
2218 * negative error code. If called before registration, it always
2219 * succeeds.
62fe0b40
BH
2220 */
2221int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2222{
2223 int rc;
2224
bd25fa7b
TH
2225 if (rxq < 1 || rxq > dev->num_rx_queues)
2226 return -EINVAL;
2227
62fe0b40
BH
2228 if (dev->reg_state == NETREG_REGISTERED) {
2229 ASSERT_RTNL();
2230
62fe0b40
BH
2231 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2232 rxq);
2233 if (rc)
2234 return rc;
62fe0b40
BH
2235 }
2236
2237 dev->real_num_rx_queues = rxq;
2238 return 0;
2239}
2240EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2241#endif
2242
2c53040f
BH
2243/**
2244 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
2245 *
2246 * This routine should set an upper limit on the number of RSS queues
2247 * used by default by multiqueue devices.
2248 */
a55b138b 2249int netif_get_num_default_rss_queues(void)
16917b87
YM
2250{
2251 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2252}
2253EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2254
def82a1d 2255static inline void __netif_reschedule(struct Qdisc *q)
56079431 2256{
def82a1d
JP
2257 struct softnet_data *sd;
2258 unsigned long flags;
56079431 2259
def82a1d 2260 local_irq_save(flags);
903ceff7 2261 sd = this_cpu_ptr(&softnet_data);
a9cbd588
CG
2262 q->next_sched = NULL;
2263 *sd->output_queue_tailp = q;
2264 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
2265 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2266 local_irq_restore(flags);
2267}
2268
2269void __netif_schedule(struct Qdisc *q)
2270{
2271 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2272 __netif_reschedule(q);
56079431
DV
2273}
2274EXPORT_SYMBOL(__netif_schedule);
2275
e6247027
ED
2276struct dev_kfree_skb_cb {
2277 enum skb_free_reason reason;
2278};
2279
2280static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
56079431 2281{
e6247027
ED
2282 return (struct dev_kfree_skb_cb *)skb->cb;
2283}
2284
46e5da40
JF
2285void netif_schedule_queue(struct netdev_queue *txq)
2286{
2287 rcu_read_lock();
2288 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2289 struct Qdisc *q = rcu_dereference(txq->qdisc);
2290
2291 __netif_schedule(q);
2292 }
2293 rcu_read_unlock();
2294}
2295EXPORT_SYMBOL(netif_schedule_queue);
2296
2297/**
2298 * netif_wake_subqueue - allow sending packets on subqueue
2299 * @dev: network device
2300 * @queue_index: sub queue index
2301 *
2302 * Resume individual transmit queue of a device with multiple transmit queues.
2303 */
2304void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2305{
2306 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2307
2308 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2309 struct Qdisc *q;
2310
2311 rcu_read_lock();
2312 q = rcu_dereference(txq->qdisc);
2313 __netif_schedule(q);
2314 rcu_read_unlock();
2315 }
2316}
2317EXPORT_SYMBOL(netif_wake_subqueue);
2318
2319void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2320{
2321 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2322 struct Qdisc *q;
2323
2324 rcu_read_lock();
2325 q = rcu_dereference(dev_queue->qdisc);
2326 __netif_schedule(q);
2327 rcu_read_unlock();
2328 }
2329}
2330EXPORT_SYMBOL(netif_tx_wake_queue);
2331
e6247027 2332void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 2333{
e6247027 2334 unsigned long flags;
56079431 2335
e6247027
ED
2336 if (likely(atomic_read(&skb->users) == 1)) {
2337 smp_rmb();
2338 atomic_set(&skb->users, 0);
2339 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2340 return;
bea3348e 2341 }
e6247027
ED
2342 get_kfree_skb_cb(skb)->reason = reason;
2343 local_irq_save(flags);
2344 skb->next = __this_cpu_read(softnet_data.completion_queue);
2345 __this_cpu_write(softnet_data.completion_queue, skb);
2346 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2347 local_irq_restore(flags);
56079431 2348}
e6247027 2349EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 2350
e6247027 2351void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
2352{
2353 if (in_irq() || irqs_disabled())
e6247027 2354 __dev_kfree_skb_irq(skb, reason);
56079431
DV
2355 else
2356 dev_kfree_skb(skb);
2357}
e6247027 2358EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
2359
2360
bea3348e
SH
2361/**
2362 * netif_device_detach - mark device as removed
2363 * @dev: network device
2364 *
2365 * Mark device as removed from system and therefore no longer available.
2366 */
56079431
DV
2367void netif_device_detach(struct net_device *dev)
2368{
2369 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2370 netif_running(dev)) {
d543103a 2371 netif_tx_stop_all_queues(dev);
56079431
DV
2372 }
2373}
2374EXPORT_SYMBOL(netif_device_detach);
2375
bea3348e
SH
2376/**
2377 * netif_device_attach - mark device as attached
2378 * @dev: network device
2379 *
2380 * Mark device as attached from system and restart if needed.
2381 */
56079431
DV
2382void netif_device_attach(struct net_device *dev)
2383{
2384 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2385 netif_running(dev)) {
d543103a 2386 netif_tx_wake_all_queues(dev);
4ec93edb 2387 __netdev_watchdog_up(dev);
56079431
DV
2388 }
2389}
2390EXPORT_SYMBOL(netif_device_attach);
2391
5605c762
JP
2392/*
2393 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2394 * to be used as a distribution range.
2395 */
2396u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
2397 unsigned int num_tx_queues)
2398{
2399 u32 hash;
2400 u16 qoffset = 0;
2401 u16 qcount = num_tx_queues;
2402
2403 if (skb_rx_queue_recorded(skb)) {
2404 hash = skb_get_rx_queue(skb);
2405 while (unlikely(hash >= num_tx_queues))
2406 hash -= num_tx_queues;
2407 return hash;
2408 }
2409
2410 if (dev->num_tc) {
2411 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2412 qoffset = dev->tc_to_txq[tc].offset;
2413 qcount = dev->tc_to_txq[tc].count;
2414 }
2415
2416 return (u16) reciprocal_scale(skb_get_hash(skb), qcount) + qoffset;
2417}
2418EXPORT_SYMBOL(__skb_tx_hash);
2419
36c92474
BH
2420static void skb_warn_bad_offload(const struct sk_buff *skb)
2421{
65e9d2fa 2422 static const netdev_features_t null_features = 0;
36c92474 2423 struct net_device *dev = skb->dev;
88ad4175 2424 const char *name = "";
36c92474 2425
c846ad9b
BG
2426 if (!net_ratelimit())
2427 return;
2428
88ad4175
BM
2429 if (dev) {
2430 if (dev->dev.parent)
2431 name = dev_driver_string(dev->dev.parent);
2432 else
2433 name = netdev_name(dev);
2434 }
36c92474
BH
2435 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2436 "gso_type=%d ip_summed=%d\n",
88ad4175 2437 name, dev ? &dev->features : &null_features,
65e9d2fa 2438 skb->sk ? &skb->sk->sk_route_caps : &null_features,
36c92474
BH
2439 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2440 skb_shinfo(skb)->gso_type, skb->ip_summed);
2441}
2442
1da177e4
LT
2443/*
2444 * Invalidate hardware checksum when packet is to be mangled, and
2445 * complete checksum manually on outgoing path.
2446 */
84fa7933 2447int skb_checksum_help(struct sk_buff *skb)
1da177e4 2448{
d3bc23e7 2449 __wsum csum;
663ead3b 2450 int ret = 0, offset;
1da177e4 2451
84fa7933 2452 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
2453 goto out_set_summed;
2454
2455 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
2456 skb_warn_bad_offload(skb);
2457 return -EINVAL;
1da177e4
LT
2458 }
2459
cef401de
ED
2460 /* Before computing a checksum, we should make sure no frag could
2461 * be modified by an external entity : checksum could be wrong.
2462 */
2463 if (skb_has_shared_frag(skb)) {
2464 ret = __skb_linearize(skb);
2465 if (ret)
2466 goto out;
2467 }
2468
55508d60 2469 offset = skb_checksum_start_offset(skb);
a030847e
HX
2470 BUG_ON(offset >= skb_headlen(skb));
2471 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2472
2473 offset += skb->csum_offset;
2474 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2475
2476 if (skb_cloned(skb) &&
2477 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
2478 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2479 if (ret)
2480 goto out;
2481 }
2482
a030847e 2483 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 2484out_set_summed:
1da177e4 2485 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 2486out:
1da177e4
LT
2487 return ret;
2488}
d1b19dff 2489EXPORT_SYMBOL(skb_checksum_help);
1da177e4 2490
6ae23ad3
TH
2491/* skb_csum_offload_check - Driver helper function to determine if a device
2492 * with limited checksum offload capabilities is able to offload the checksum
2493 * for a given packet.
2494 *
2495 * Arguments:
2496 * skb - sk_buff for the packet in question
2497 * spec - contains the description of what device can offload
2498 * csum_encapped - returns true if the checksum being offloaded is
2499 * encpasulated. That is it is checksum for the transport header
2500 * in the inner headers.
2501 * checksum_help - when set indicates that helper function should
2502 * call skb_checksum_help if offload checks fail
2503 *
2504 * Returns:
2505 * true: Packet has passed the checksum checks and should be offloadable to
2506 * the device (a driver may still need to check for additional
2507 * restrictions of its device)
2508 * false: Checksum is not offloadable. If checksum_help was set then
2509 * skb_checksum_help was called to resolve checksum for non-GSO
2510 * packets and when IP protocol is not SCTP
2511 */
2512bool __skb_csum_offload_chk(struct sk_buff *skb,
2513 const struct skb_csum_offl_spec *spec,
2514 bool *csum_encapped,
2515 bool csum_help)
2516{
2517 struct iphdr *iph;
2518 struct ipv6hdr *ipv6;
2519 void *nhdr;
2520 int protocol;
2521 u8 ip_proto;
2522
2523 if (skb->protocol == htons(ETH_P_8021Q) ||
2524 skb->protocol == htons(ETH_P_8021AD)) {
2525 if (!spec->vlan_okay)
2526 goto need_help;
2527 }
2528
2529 /* We check whether the checksum refers to a transport layer checksum in
2530 * the outermost header or an encapsulated transport layer checksum that
2531 * corresponds to the inner headers of the skb. If the checksum is for
2532 * something else in the packet we need help.
2533 */
2534 if (skb_checksum_start_offset(skb) == skb_transport_offset(skb)) {
2535 /* Non-encapsulated checksum */
2536 protocol = eproto_to_ipproto(vlan_get_protocol(skb));
2537 nhdr = skb_network_header(skb);
2538 *csum_encapped = false;
2539 if (spec->no_not_encapped)
2540 goto need_help;
2541 } else if (skb->encapsulation && spec->encap_okay &&
2542 skb_checksum_start_offset(skb) ==
2543 skb_inner_transport_offset(skb)) {
2544 /* Encapsulated checksum */
2545 *csum_encapped = true;
2546 switch (skb->inner_protocol_type) {
2547 case ENCAP_TYPE_ETHER:
2548 protocol = eproto_to_ipproto(skb->inner_protocol);
2549 break;
2550 case ENCAP_TYPE_IPPROTO:
2551 protocol = skb->inner_protocol;
2552 break;
2553 }
2554 nhdr = skb_inner_network_header(skb);
2555 } else {
2556 goto need_help;
2557 }
2558
2559 switch (protocol) {
2560 case IPPROTO_IP:
2561 if (!spec->ipv4_okay)
2562 goto need_help;
2563 iph = nhdr;
2564 ip_proto = iph->protocol;
2565 if (iph->ihl != 5 && !spec->ip_options_okay)
2566 goto need_help;
2567 break;
2568 case IPPROTO_IPV6:
2569 if (!spec->ipv6_okay)
2570 goto need_help;
2571 if (spec->no_encapped_ipv6 && *csum_encapped)
2572 goto need_help;
2573 ipv6 = nhdr;
2574 nhdr += sizeof(*ipv6);
2575 ip_proto = ipv6->nexthdr;
2576 break;
2577 default:
2578 goto need_help;
2579 }
2580
2581ip_proto_again:
2582 switch (ip_proto) {
2583 case IPPROTO_TCP:
2584 if (!spec->tcp_okay ||
2585 skb->csum_offset != offsetof(struct tcphdr, check))
2586 goto need_help;
2587 break;
2588 case IPPROTO_UDP:
2589 if (!spec->udp_okay ||
2590 skb->csum_offset != offsetof(struct udphdr, check))
2591 goto need_help;
2592 break;
2593 case IPPROTO_SCTP:
2594 if (!spec->sctp_okay ||
2595 skb->csum_offset != offsetof(struct sctphdr, checksum))
2596 goto cant_help;
2597 break;
2598 case NEXTHDR_HOP:
2599 case NEXTHDR_ROUTING:
2600 case NEXTHDR_DEST: {
2601 u8 *opthdr = nhdr;
2602
2603 if (protocol != IPPROTO_IPV6 || !spec->ext_hdrs_okay)
2604 goto need_help;
2605
2606 ip_proto = opthdr[0];
2607 nhdr += (opthdr[1] + 1) << 3;
2608
2609 goto ip_proto_again;
2610 }
2611 default:
2612 goto need_help;
2613 }
2614
2615 /* Passed the tests for offloading checksum */
2616 return true;
2617
2618need_help:
2619 if (csum_help && !skb_shinfo(skb)->gso_size)
2620 skb_checksum_help(skb);
2621cant_help:
2622 return false;
2623}
2624EXPORT_SYMBOL(__skb_csum_offload_chk);
2625
53d6471c 2626__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
f6a78bfc 2627{
252e3346 2628 __be16 type = skb->protocol;
f6a78bfc 2629
19acc327
PS
2630 /* Tunnel gso handlers can set protocol to ethernet. */
2631 if (type == htons(ETH_P_TEB)) {
2632 struct ethhdr *eth;
2633
2634 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2635 return 0;
2636
2637 eth = (struct ethhdr *)skb_mac_header(skb);
2638 type = eth->h_proto;
2639 }
2640
d4bcef3f 2641 return __vlan_get_protocol(skb, type, depth);
ec5f0615
PS
2642}
2643
2644/**
2645 * skb_mac_gso_segment - mac layer segmentation handler.
2646 * @skb: buffer to segment
2647 * @features: features for the output path (see dev->features)
2648 */
2649struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2650 netdev_features_t features)
2651{
2652 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2653 struct packet_offload *ptype;
53d6471c
VY
2654 int vlan_depth = skb->mac_len;
2655 __be16 type = skb_network_protocol(skb, &vlan_depth);
ec5f0615
PS
2656
2657 if (unlikely(!type))
2658 return ERR_PTR(-EINVAL);
2659
53d6471c 2660 __skb_pull(skb, vlan_depth);
f6a78bfc
HX
2661
2662 rcu_read_lock();
22061d80 2663 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 2664 if (ptype->type == type && ptype->callbacks.gso_segment) {
f191a1d1 2665 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
2666 break;
2667 }
2668 }
2669 rcu_read_unlock();
2670
98e399f8 2671 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 2672
f6a78bfc
HX
2673 return segs;
2674}
05e8ef4a
PS
2675EXPORT_SYMBOL(skb_mac_gso_segment);
2676
2677
2678/* openvswitch calls this on rx path, so we need a different check.
2679 */
2680static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2681{
2682 if (tx_path)
2683 return skb->ip_summed != CHECKSUM_PARTIAL;
2684 else
2685 return skb->ip_summed == CHECKSUM_NONE;
2686}
2687
2688/**
2689 * __skb_gso_segment - Perform segmentation on skb.
2690 * @skb: buffer to segment
2691 * @features: features for the output path (see dev->features)
2692 * @tx_path: whether it is called in TX path
2693 *
2694 * This function segments the given skb and returns a list of segments.
2695 *
2696 * It may return NULL if the skb requires no segmentation. This is
2697 * only possible when GSO is used for verifying header integrity.
9207f9d4
KK
2698 *
2699 * Segmentation preserves SKB_SGO_CB_OFFSET bytes of previous skb cb.
05e8ef4a
PS
2700 */
2701struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2702 netdev_features_t features, bool tx_path)
2703{
2704 if (unlikely(skb_needs_check(skb, tx_path))) {
2705 int err;
2706
2707 skb_warn_bad_offload(skb);
2708
a40e0a66 2709 err = skb_cow_head(skb, 0);
2710 if (err < 0)
05e8ef4a
PS
2711 return ERR_PTR(err);
2712 }
2713
9207f9d4
KK
2714 BUILD_BUG_ON(SKB_SGO_CB_OFFSET +
2715 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb));
2716
68c33163 2717 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
2718 SKB_GSO_CB(skb)->encap_level = 0;
2719
05e8ef4a
PS
2720 skb_reset_mac_header(skb);
2721 skb_reset_mac_len(skb);
2722
2723 return skb_mac_gso_segment(skb, features);
2724}
12b0004d 2725EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 2726
fb286bb2
HX
2727/* Take action when hardware reception checksum errors are detected. */
2728#ifdef CONFIG_BUG
2729void netdev_rx_csum_fault(struct net_device *dev)
2730{
2731 if (net_ratelimit()) {
7b6cd1ce 2732 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
fb286bb2
HX
2733 dump_stack();
2734 }
2735}
2736EXPORT_SYMBOL(netdev_rx_csum_fault);
2737#endif
2738
1da177e4
LT
2739/* Actually, we should eliminate this check as soon as we know, that:
2740 * 1. IOMMU is present and allows to map all the memory.
2741 * 2. No high memory really exists on this machine.
2742 */
2743
c1e756bf 2744static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 2745{
3d3a8533 2746#ifdef CONFIG_HIGHMEM
1da177e4 2747 int i;
5acbbd42 2748 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
2749 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2750 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2751 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 2752 return 1;
ea2ab693 2753 }
5acbbd42 2754 }
1da177e4 2755
5acbbd42
FT
2756 if (PCI_DMA_BUS_IS_PHYS) {
2757 struct device *pdev = dev->dev.parent;
1da177e4 2758
9092c658
ED
2759 if (!pdev)
2760 return 0;
5acbbd42 2761 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ea2ab693
IC
2762 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2763 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
5acbbd42
FT
2764 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2765 return 1;
2766 }
2767 }
3d3a8533 2768#endif
1da177e4
LT
2769 return 0;
2770}
1da177e4 2771
3b392ddb
SH
2772/* If MPLS offload request, verify we are testing hardware MPLS features
2773 * instead of standard features for the netdev.
2774 */
d0edc7bf 2775#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
3b392ddb
SH
2776static netdev_features_t net_mpls_features(struct sk_buff *skb,
2777 netdev_features_t features,
2778 __be16 type)
2779{
25cd9ba0 2780 if (eth_p_mpls(type))
3b392ddb
SH
2781 features &= skb->dev->mpls_features;
2782
2783 return features;
2784}
2785#else
2786static netdev_features_t net_mpls_features(struct sk_buff *skb,
2787 netdev_features_t features,
2788 __be16 type)
2789{
2790 return features;
2791}
2792#endif
2793
c8f44aff 2794static netdev_features_t harmonize_features(struct sk_buff *skb,
c1e756bf 2795 netdev_features_t features)
f01a5236 2796{
53d6471c 2797 int tmp;
3b392ddb
SH
2798 __be16 type;
2799
2800 type = skb_network_protocol(skb, &tmp);
2801 features = net_mpls_features(skb, features, type);
53d6471c 2802
c0d680e5 2803 if (skb->ip_summed != CHECKSUM_NONE &&
3b392ddb 2804 !can_checksum_protocol(features, type)) {
a188222b 2805 features &= ~NETIF_F_CSUM_MASK;
c1e756bf 2806 } else if (illegal_highdma(skb->dev, skb)) {
f01a5236
JG
2807 features &= ~NETIF_F_SG;
2808 }
2809
2810 return features;
2811}
2812
e38f3025
TM
2813netdev_features_t passthru_features_check(struct sk_buff *skb,
2814 struct net_device *dev,
2815 netdev_features_t features)
2816{
2817 return features;
2818}
2819EXPORT_SYMBOL(passthru_features_check);
2820
8cb65d00
TM
2821static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2822 struct net_device *dev,
2823 netdev_features_t features)
2824{
2825 return vlan_features_check(skb, features);
2826}
2827
c1e756bf 2828netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6 2829{
5f35227e 2830 struct net_device *dev = skb->dev;
fcbeb976
ED
2831 netdev_features_t features = dev->features;
2832 u16 gso_segs = skb_shinfo(skb)->gso_segs;
58e998c6 2833
fcbeb976 2834 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
30b678d8
BH
2835 features &= ~NETIF_F_GSO_MASK;
2836
5f35227e
JG
2837 /* If encapsulation offload request, verify we are testing
2838 * hardware encapsulation features instead of standard
2839 * features for the netdev
2840 */
2841 if (skb->encapsulation)
2842 features &= dev->hw_enc_features;
2843
f5a7fb88
TM
2844 if (skb_vlan_tagged(skb))
2845 features = netdev_intersect_features(features,
2846 dev->vlan_features |
2847 NETIF_F_HW_VLAN_CTAG_TX |
2848 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 2849
5f35227e
JG
2850 if (dev->netdev_ops->ndo_features_check)
2851 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2852 features);
8cb65d00
TM
2853 else
2854 features &= dflt_features_check(skb, dev, features);
5f35227e 2855
c1e756bf 2856 return harmonize_features(skb, features);
58e998c6 2857}
c1e756bf 2858EXPORT_SYMBOL(netif_skb_features);
58e998c6 2859
2ea25513 2860static int xmit_one(struct sk_buff *skb, struct net_device *dev,
95f6b3dd 2861 struct netdev_queue *txq, bool more)
f6a78bfc 2862{
2ea25513
DM
2863 unsigned int len;
2864 int rc;
00829823 2865
7866a621 2866 if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2ea25513 2867 dev_queue_xmit_nit(skb, dev);
fc741216 2868
2ea25513
DM
2869 len = skb->len;
2870 trace_net_dev_start_xmit(skb, dev);
95f6b3dd 2871 rc = netdev_start_xmit(skb, dev, txq, more);
2ea25513 2872 trace_net_dev_xmit(skb, rc, dev, len);
adf30907 2873
2ea25513
DM
2874 return rc;
2875}
7b9c6090 2876
8dcda22a
DM
2877struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2878 struct netdev_queue *txq, int *ret)
7f2e870f
DM
2879{
2880 struct sk_buff *skb = first;
2881 int rc = NETDEV_TX_OK;
7b9c6090 2882
7f2e870f
DM
2883 while (skb) {
2884 struct sk_buff *next = skb->next;
fc70fb64 2885
7f2e870f 2886 skb->next = NULL;
95f6b3dd 2887 rc = xmit_one(skb, dev, txq, next != NULL);
7f2e870f
DM
2888 if (unlikely(!dev_xmit_complete(rc))) {
2889 skb->next = next;
2890 goto out;
2891 }
6afff0ca 2892
7f2e870f
DM
2893 skb = next;
2894 if (netif_xmit_stopped(txq) && skb) {
2895 rc = NETDEV_TX_BUSY;
2896 break;
9ccb8975 2897 }
7f2e870f 2898 }
9ccb8975 2899
7f2e870f
DM
2900out:
2901 *ret = rc;
2902 return skb;
2903}
b40863c6 2904
1ff0dc94
ED
2905static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2906 netdev_features_t features)
f6a78bfc 2907{
df8a39de 2908 if (skb_vlan_tag_present(skb) &&
5968250c
JP
2909 !vlan_hw_offload_capable(features, skb->vlan_proto))
2910 skb = __vlan_hwaccel_push_inside(skb);
eae3f88e
DM
2911 return skb;
2912}
f6a78bfc 2913
55a93b3e 2914static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
eae3f88e
DM
2915{
2916 netdev_features_t features;
f6a78bfc 2917
eae3f88e
DM
2918 if (skb->next)
2919 return skb;
068a2de5 2920
eae3f88e
DM
2921 features = netif_skb_features(skb);
2922 skb = validate_xmit_vlan(skb, features);
2923 if (unlikely(!skb))
2924 goto out_null;
7b9c6090 2925
8b86a61d 2926 if (netif_needs_gso(skb, features)) {
ce93718f
DM
2927 struct sk_buff *segs;
2928
2929 segs = skb_gso_segment(skb, features);
cecda693 2930 if (IS_ERR(segs)) {
af6dabc9 2931 goto out_kfree_skb;
cecda693
JW
2932 } else if (segs) {
2933 consume_skb(skb);
2934 skb = segs;
f6a78bfc 2935 }
eae3f88e
DM
2936 } else {
2937 if (skb_needs_linearize(skb, features) &&
2938 __skb_linearize(skb))
2939 goto out_kfree_skb;
4ec93edb 2940
eae3f88e
DM
2941 /* If packet is not checksummed and device does not
2942 * support checksumming for this protocol, complete
2943 * checksumming here.
2944 */
2945 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2946 if (skb->encapsulation)
2947 skb_set_inner_transport_header(skb,
2948 skb_checksum_start_offset(skb));
2949 else
2950 skb_set_transport_header(skb,
2951 skb_checksum_start_offset(skb));
a188222b 2952 if (!(features & NETIF_F_CSUM_MASK) &&
eae3f88e
DM
2953 skb_checksum_help(skb))
2954 goto out_kfree_skb;
7b9c6090 2955 }
0c772159 2956 }
7b9c6090 2957
eae3f88e 2958 return skb;
fc70fb64 2959
f6a78bfc
HX
2960out_kfree_skb:
2961 kfree_skb(skb);
eae3f88e
DM
2962out_null:
2963 return NULL;
2964}
6afff0ca 2965
55a93b3e
ED
2966struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2967{
2968 struct sk_buff *next, *head = NULL, *tail;
2969
bec3cfdc 2970 for (; skb != NULL; skb = next) {
55a93b3e
ED
2971 next = skb->next;
2972 skb->next = NULL;
bec3cfdc
ED
2973
2974 /* in case skb wont be segmented, point to itself */
2975 skb->prev = skb;
2976
55a93b3e 2977 skb = validate_xmit_skb(skb, dev);
bec3cfdc
ED
2978 if (!skb)
2979 continue;
55a93b3e 2980
bec3cfdc
ED
2981 if (!head)
2982 head = skb;
2983 else
2984 tail->next = skb;
2985 /* If skb was segmented, skb->prev points to
2986 * the last segment. If not, it still contains skb.
2987 */
2988 tail = skb->prev;
55a93b3e
ED
2989 }
2990 return head;
f6a78bfc
HX
2991}
2992
1def9238
ED
2993static void qdisc_pkt_len_init(struct sk_buff *skb)
2994{
2995 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2996
2997 qdisc_skb_cb(skb)->pkt_len = skb->len;
2998
2999 /* To get more precise estimation of bytes sent on wire,
3000 * we add to pkt_len the headers size of all segments
3001 */
3002 if (shinfo->gso_size) {
757b8b1d 3003 unsigned int hdr_len;
15e5a030 3004 u16 gso_segs = shinfo->gso_segs;
1def9238 3005
757b8b1d
ED
3006 /* mac layer + network layer */
3007 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
3008
3009 /* + transport layer */
1def9238
ED
3010 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3011 hdr_len += tcp_hdrlen(skb);
3012 else
3013 hdr_len += sizeof(struct udphdr);
15e5a030
JW
3014
3015 if (shinfo->gso_type & SKB_GSO_DODGY)
3016 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
3017 shinfo->gso_size);
3018
3019 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
3020 }
3021}
3022
bbd8a0d3
KK
3023static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
3024 struct net_device *dev,
3025 struct netdev_queue *txq)
3026{
3027 spinlock_t *root_lock = qdisc_lock(q);
a2da570d 3028 bool contended;
bbd8a0d3
KK
3029 int rc;
3030
a2da570d 3031 qdisc_calculate_pkt_len(skb, q);
79640a4c
ED
3032 /*
3033 * Heuristic to force contended enqueues to serialize on a
3034 * separate lock before trying to get qdisc main lock.
9bf2b8c2
YX
3035 * This permits __QDISC___STATE_RUNNING owner to get the lock more
3036 * often and dequeue packets faster.
79640a4c 3037 */
a2da570d 3038 contended = qdisc_is_running(q);
79640a4c
ED
3039 if (unlikely(contended))
3040 spin_lock(&q->busylock);
3041
bbd8a0d3
KK
3042 spin_lock(root_lock);
3043 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
3044 kfree_skb(skb);
3045 rc = NET_XMIT_DROP;
3046 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 3047 qdisc_run_begin(q)) {
bbd8a0d3
KK
3048 /*
3049 * This is a work-conserving queue; there are no old skbs
3050 * waiting to be sent out; and the qdisc is not running -
3051 * xmit the skb directly.
3052 */
bfe0d029 3053
bfe0d029
ED
3054 qdisc_bstats_update(q, skb);
3055
55a93b3e 3056 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
79640a4c
ED
3057 if (unlikely(contended)) {
3058 spin_unlock(&q->busylock);
3059 contended = false;
3060 }
bbd8a0d3 3061 __qdisc_run(q);
79640a4c 3062 } else
bc135b23 3063 qdisc_run_end(q);
bbd8a0d3
KK
3064
3065 rc = NET_XMIT_SUCCESS;
3066 } else {
a2da570d 3067 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
79640a4c
ED
3068 if (qdisc_run_begin(q)) {
3069 if (unlikely(contended)) {
3070 spin_unlock(&q->busylock);
3071 contended = false;
3072 }
3073 __qdisc_run(q);
3074 }
bbd8a0d3
KK
3075 }
3076 spin_unlock(root_lock);
79640a4c
ED
3077 if (unlikely(contended))
3078 spin_unlock(&q->busylock);
bbd8a0d3
KK
3079 return rc;
3080}
3081
86f8515f 3082#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
3083static void skb_update_prio(struct sk_buff *skb)
3084{
6977a79d 3085 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
5bc1421e 3086
91c68ce2 3087 if (!skb->priority && skb->sk && map) {
2a56a1fe
TH
3088 unsigned int prioidx =
3089 sock_cgroup_prioidx(&skb->sk->sk_cgrp_data);
91c68ce2
ED
3090
3091 if (prioidx < map->priomap_len)
3092 skb->priority = map->priomap[prioidx];
3093 }
5bc1421e
NH
3094}
3095#else
3096#define skb_update_prio(skb)
3097#endif
3098
f60e5990 3099DEFINE_PER_CPU(int, xmit_recursion);
3100EXPORT_SYMBOL(xmit_recursion);
3101
11a766ce 3102#define RECURSION_LIMIT 10
745e20f1 3103
95603e22
MM
3104/**
3105 * dev_loopback_xmit - loop back @skb
0c4b51f0
EB
3106 * @net: network namespace this loopback is happening in
3107 * @sk: sk needed to be a netfilter okfn
95603e22
MM
3108 * @skb: buffer to transmit
3109 */
0c4b51f0 3110int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
95603e22
MM
3111{
3112 skb_reset_mac_header(skb);
3113 __skb_pull(skb, skb_network_offset(skb));
3114 skb->pkt_type = PACKET_LOOPBACK;
3115 skb->ip_summed = CHECKSUM_UNNECESSARY;
3116 WARN_ON(!skb_dst(skb));
3117 skb_dst_force(skb);
3118 netif_rx_ni(skb);
3119 return 0;
3120}
3121EXPORT_SYMBOL(dev_loopback_xmit);
3122
1f211a1b
DB
3123#ifdef CONFIG_NET_EGRESS
3124static struct sk_buff *
3125sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev)
3126{
3127 struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list);
3128 struct tcf_result cl_res;
3129
3130 if (!cl)
3131 return skb;
3132
3133 /* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set
3134 * earlier by the caller.
3135 */
3136 qdisc_bstats_cpu_update(cl->q, skb);
3137
3138 switch (tc_classify(skb, cl, &cl_res, false)) {
3139 case TC_ACT_OK:
3140 case TC_ACT_RECLASSIFY:
3141 skb->tc_index = TC_H_MIN(cl_res.classid);
3142 break;
3143 case TC_ACT_SHOT:
3144 qdisc_qstats_cpu_drop(cl->q);
3145 *ret = NET_XMIT_DROP;
3146 goto drop;
3147 case TC_ACT_STOLEN:
3148 case TC_ACT_QUEUED:
3149 *ret = NET_XMIT_SUCCESS;
3150drop:
3151 kfree_skb(skb);
3152 return NULL;
3153 case TC_ACT_REDIRECT:
3154 /* No need to push/pop skb's mac_header here on egress! */
3155 skb_do_redirect(skb);
3156 *ret = NET_XMIT_SUCCESS;
3157 return NULL;
3158 default:
3159 break;
3160 }
3161
3162 return skb;
3163}
3164#endif /* CONFIG_NET_EGRESS */
3165
638b2a69
JP
3166static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
3167{
3168#ifdef CONFIG_XPS
3169 struct xps_dev_maps *dev_maps;
3170 struct xps_map *map;
3171 int queue_index = -1;
3172
3173 rcu_read_lock();
3174 dev_maps = rcu_dereference(dev->xps_maps);
3175 if (dev_maps) {
3176 map = rcu_dereference(
3177 dev_maps->cpu_map[skb->sender_cpu - 1]);
3178 if (map) {
3179 if (map->len == 1)
3180 queue_index = map->queues[0];
3181 else
3182 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb),
3183 map->len)];
3184 if (unlikely(queue_index >= dev->real_num_tx_queues))
3185 queue_index = -1;
3186 }
3187 }
3188 rcu_read_unlock();
3189
3190 return queue_index;
3191#else
3192 return -1;
3193#endif
3194}
3195
3196static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
3197{
3198 struct sock *sk = skb->sk;
3199 int queue_index = sk_tx_queue_get(sk);
3200
3201 if (queue_index < 0 || skb->ooo_okay ||
3202 queue_index >= dev->real_num_tx_queues) {
3203 int new_index = get_xps_queue(dev, skb);
3204 if (new_index < 0)
3205 new_index = skb_tx_hash(dev, skb);
3206
3207 if (queue_index != new_index && sk &&
004a5d01 3208 sk_fullsock(sk) &&
638b2a69
JP
3209 rcu_access_pointer(sk->sk_dst_cache))
3210 sk_tx_queue_set(sk, new_index);
3211
3212 queue_index = new_index;
3213 }
3214
3215 return queue_index;
3216}
3217
3218struct netdev_queue *netdev_pick_tx(struct net_device *dev,
3219 struct sk_buff *skb,
3220 void *accel_priv)
3221{
3222 int queue_index = 0;
3223
3224#ifdef CONFIG_XPS
52bd2d62
ED
3225 u32 sender_cpu = skb->sender_cpu - 1;
3226
3227 if (sender_cpu >= (u32)NR_CPUS)
638b2a69
JP
3228 skb->sender_cpu = raw_smp_processor_id() + 1;
3229#endif
3230
3231 if (dev->real_num_tx_queues != 1) {
3232 const struct net_device_ops *ops = dev->netdev_ops;
3233 if (ops->ndo_select_queue)
3234 queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
3235 __netdev_pick_tx);
3236 else
3237 queue_index = __netdev_pick_tx(dev, skb);
3238
3239 if (!accel_priv)
3240 queue_index = netdev_cap_txqueue(dev, queue_index);
3241 }
3242
3243 skb_set_queue_mapping(skb, queue_index);
3244 return netdev_get_tx_queue(dev, queue_index);
3245}
3246
d29f749e 3247/**
9d08dd3d 3248 * __dev_queue_xmit - transmit a buffer
d29f749e 3249 * @skb: buffer to transmit
9d08dd3d 3250 * @accel_priv: private data used for L2 forwarding offload
d29f749e
DJ
3251 *
3252 * Queue a buffer for transmission to a network device. The caller must
3253 * have set the device and priority and built the buffer before calling
3254 * this function. The function can be called from an interrupt.
3255 *
3256 * A negative errno code is returned on a failure. A success does not
3257 * guarantee the frame will be transmitted as it may be dropped due
3258 * to congestion or traffic shaping.
3259 *
3260 * -----------------------------------------------------------------------------------
3261 * I notice this method can also return errors from the queue disciplines,
3262 * including NET_XMIT_DROP, which is a positive value. So, errors can also
3263 * be positive.
3264 *
3265 * Regardless of the return value, the skb is consumed, so it is currently
3266 * difficult to retry a send to this method. (You can bump the ref count
3267 * before sending to hold a reference for retry if you are careful.)
3268 *
3269 * When calling this method, interrupts MUST be enabled. This is because
3270 * the BH enable code must have IRQs enabled so that it will not deadlock.
3271 * --BLG
3272 */
0a59f3a9 3273static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
1da177e4
LT
3274{
3275 struct net_device *dev = skb->dev;
dc2b4847 3276 struct netdev_queue *txq;
1da177e4
LT
3277 struct Qdisc *q;
3278 int rc = -ENOMEM;
3279
6d1ccff6
ED
3280 skb_reset_mac_header(skb);
3281
e7fd2885
WB
3282 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
3283 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
3284
4ec93edb
YH
3285 /* Disable soft irqs for various locks below. Also
3286 * stops preemption for RCU.
1da177e4 3287 */
4ec93edb 3288 rcu_read_lock_bh();
1da177e4 3289
5bc1421e
NH
3290 skb_update_prio(skb);
3291
1f211a1b
DB
3292 qdisc_pkt_len_init(skb);
3293#ifdef CONFIG_NET_CLS_ACT
3294 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
3295# ifdef CONFIG_NET_EGRESS
3296 if (static_key_false(&egress_needed)) {
3297 skb = sch_handle_egress(skb, &rc, dev);
3298 if (!skb)
3299 goto out;
3300 }
3301# endif
3302#endif
02875878
ED
3303 /* If device/qdisc don't need skb->dst, release it right now while
3304 * its hot in this cpu cache.
3305 */
3306 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
3307 skb_dst_drop(skb);
3308 else
3309 skb_dst_force(skb);
3310
0c4f691f
SF
3311#ifdef CONFIG_NET_SWITCHDEV
3312 /* Don't forward if offload device already forwarded */
3313 if (skb->offload_fwd_mark &&
3314 skb->offload_fwd_mark == dev->offload_fwd_mark) {
3315 consume_skb(skb);
3316 rc = NET_XMIT_SUCCESS;
3317 goto out;
3318 }
3319#endif
3320
f663dd9a 3321 txq = netdev_pick_tx(dev, skb, accel_priv);
a898def2 3322 q = rcu_dereference_bh(txq->qdisc);
37437bb2 3323
cf66ba58 3324 trace_net_dev_queue(skb);
1da177e4 3325 if (q->enqueue) {
bbd8a0d3 3326 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 3327 goto out;
1da177e4
LT
3328 }
3329
3330 /* The device has no queue. Common case for software devices:
3331 loopback, all the sorts of tunnels...
3332
932ff279
HX
3333 Really, it is unlikely that netif_tx_lock protection is necessary
3334 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
3335 counters.)
3336 However, it is possible, that they rely on protection
3337 made by us here.
3338
3339 Check this and shot the lock. It is not prone from deadlocks.
3340 Either shot noqueue qdisc, it is even simpler 8)
3341 */
3342 if (dev->flags & IFF_UP) {
3343 int cpu = smp_processor_id(); /* ok because BHs are off */
3344
c773e847 3345 if (txq->xmit_lock_owner != cpu) {
1da177e4 3346
745e20f1
ED
3347 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
3348 goto recursion_alert;
3349
1f59533f
JDB
3350 skb = validate_xmit_skb(skb, dev);
3351 if (!skb)
3352 goto drop;
3353
c773e847 3354 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 3355
73466498 3356 if (!netif_xmit_stopped(txq)) {
745e20f1 3357 __this_cpu_inc(xmit_recursion);
ce93718f 3358 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
745e20f1 3359 __this_cpu_dec(xmit_recursion);
572a9d7b 3360 if (dev_xmit_complete(rc)) {
c773e847 3361 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
3362 goto out;
3363 }
3364 }
c773e847 3365 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
3366 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3367 dev->name);
1da177e4
LT
3368 } else {
3369 /* Recursion is detected! It is possible,
745e20f1
ED
3370 * unfortunately
3371 */
3372recursion_alert:
e87cc472
JP
3373 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3374 dev->name);
1da177e4
LT
3375 }
3376 }
3377
3378 rc = -ENETDOWN;
1f59533f 3379drop:
d4828d85 3380 rcu_read_unlock_bh();
1da177e4 3381
015f0688 3382 atomic_long_inc(&dev->tx_dropped);
1f59533f 3383 kfree_skb_list(skb);
1da177e4
LT
3384 return rc;
3385out:
d4828d85 3386 rcu_read_unlock_bh();
1da177e4
LT
3387 return rc;
3388}
f663dd9a 3389
2b4aa3ce 3390int dev_queue_xmit(struct sk_buff *skb)
f663dd9a
JW
3391{
3392 return __dev_queue_xmit(skb, NULL);
3393}
2b4aa3ce 3394EXPORT_SYMBOL(dev_queue_xmit);
1da177e4 3395
f663dd9a
JW
3396int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3397{
3398 return __dev_queue_xmit(skb, accel_priv);
3399}
3400EXPORT_SYMBOL(dev_queue_xmit_accel);
3401
1da177e4
LT
3402
3403/*=======================================================================
3404 Receiver routines
3405 =======================================================================*/
3406
6b2bedc3 3407int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
3408EXPORT_SYMBOL(netdev_max_backlog);
3409
3b098e2d 3410int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
3411int netdev_budget __read_mostly = 300;
3412int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 3413
eecfd7c4
ED
3414/* Called with irq disabled */
3415static inline void ____napi_schedule(struct softnet_data *sd,
3416 struct napi_struct *napi)
3417{
3418 list_add_tail(&napi->poll_list, &sd->poll_list);
3419 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3420}
3421
bfb564e7
KK
3422#ifdef CONFIG_RPS
3423
3424/* One global table that all flow-based protocols share. */
6e3f7faf 3425struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7 3426EXPORT_SYMBOL(rps_sock_flow_table);
567e4b79
ED
3427u32 rps_cpu_mask __read_mostly;
3428EXPORT_SYMBOL(rps_cpu_mask);
bfb564e7 3429
c5905afb 3430struct static_key rps_needed __read_mostly;
adc9300e 3431
c445477d
BH
3432static struct rps_dev_flow *
3433set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3434 struct rps_dev_flow *rflow, u16 next_cpu)
3435{
a31196b0 3436 if (next_cpu < nr_cpu_ids) {
c445477d
BH
3437#ifdef CONFIG_RFS_ACCEL
3438 struct netdev_rx_queue *rxqueue;
3439 struct rps_dev_flow_table *flow_table;
3440 struct rps_dev_flow *old_rflow;
3441 u32 flow_id;
3442 u16 rxq_index;
3443 int rc;
3444
3445 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
3446 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3447 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
3448 goto out;
3449 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3450 if (rxq_index == skb_get_rx_queue(skb))
3451 goto out;
3452
3453 rxqueue = dev->_rx + rxq_index;
3454 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3455 if (!flow_table)
3456 goto out;
61b905da 3457 flow_id = skb_get_hash(skb) & flow_table->mask;
c445477d
BH
3458 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3459 rxq_index, flow_id);
3460 if (rc < 0)
3461 goto out;
3462 old_rflow = rflow;
3463 rflow = &flow_table->flows[flow_id];
c445477d
BH
3464 rflow->filter = rc;
3465 if (old_rflow->filter == rflow->filter)
3466 old_rflow->filter = RPS_NO_FILTER;
3467 out:
3468#endif
3469 rflow->last_qtail =
09994d1b 3470 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
3471 }
3472
09994d1b 3473 rflow->cpu = next_cpu;
c445477d
BH
3474 return rflow;
3475}
3476
bfb564e7
KK
3477/*
3478 * get_rps_cpu is called from netif_receive_skb and returns the target
3479 * CPU from the RPS map of the receiving queue for a given skb.
3480 * rcu_read_lock must be held on entry.
3481 */
3482static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3483 struct rps_dev_flow **rflowp)
3484{
567e4b79
ED
3485 const struct rps_sock_flow_table *sock_flow_table;
3486 struct netdev_rx_queue *rxqueue = dev->_rx;
bfb564e7 3487 struct rps_dev_flow_table *flow_table;
567e4b79 3488 struct rps_map *map;
bfb564e7 3489 int cpu = -1;
567e4b79 3490 u32 tcpu;
61b905da 3491 u32 hash;
bfb564e7
KK
3492
3493 if (skb_rx_queue_recorded(skb)) {
3494 u16 index = skb_get_rx_queue(skb);
567e4b79 3495
62fe0b40
BH
3496 if (unlikely(index >= dev->real_num_rx_queues)) {
3497 WARN_ONCE(dev->real_num_rx_queues > 1,
3498 "%s received packet on queue %u, but number "
3499 "of RX queues is %u\n",
3500 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
3501 goto done;
3502 }
567e4b79
ED
3503 rxqueue += index;
3504 }
bfb564e7 3505
567e4b79
ED
3506 /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3507
3508 flow_table = rcu_dereference(rxqueue->rps_flow_table);
6e3f7faf 3509 map = rcu_dereference(rxqueue->rps_map);
567e4b79 3510 if (!flow_table && !map)
bfb564e7
KK
3511 goto done;
3512
2d47b459 3513 skb_reset_network_header(skb);
61b905da
TH
3514 hash = skb_get_hash(skb);
3515 if (!hash)
bfb564e7
KK
3516 goto done;
3517
fec5e652
TH
3518 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3519 if (flow_table && sock_flow_table) {
fec5e652 3520 struct rps_dev_flow *rflow;
567e4b79
ED
3521 u32 next_cpu;
3522 u32 ident;
3523
3524 /* First check into global flow table if there is a match */
3525 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3526 if ((ident ^ hash) & ~rps_cpu_mask)
3527 goto try_rps;
fec5e652 3528
567e4b79
ED
3529 next_cpu = ident & rps_cpu_mask;
3530
3531 /* OK, now we know there is a match,
3532 * we can look at the local (per receive queue) flow table
3533 */
61b905da 3534 rflow = &flow_table->flows[hash & flow_table->mask];
fec5e652
TH
3535 tcpu = rflow->cpu;
3536
fec5e652
TH
3537 /*
3538 * If the desired CPU (where last recvmsg was done) is
3539 * different from current CPU (one in the rx-queue flow
3540 * table entry), switch if one of the following holds:
a31196b0 3541 * - Current CPU is unset (>= nr_cpu_ids).
fec5e652
TH
3542 * - Current CPU is offline.
3543 * - The current CPU's queue tail has advanced beyond the
3544 * last packet that was enqueued using this table entry.
3545 * This guarantees that all previous packets for the flow
3546 * have been dequeued, thus preserving in order delivery.
3547 */
3548 if (unlikely(tcpu != next_cpu) &&
a31196b0 3549 (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
fec5e652 3550 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
3551 rflow->last_qtail)) >= 0)) {
3552 tcpu = next_cpu;
c445477d 3553 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 3554 }
c445477d 3555
a31196b0 3556 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
fec5e652
TH
3557 *rflowp = rflow;
3558 cpu = tcpu;
3559 goto done;
3560 }
3561 }
3562
567e4b79
ED
3563try_rps:
3564
0a9627f2 3565 if (map) {
8fc54f68 3566 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
0a9627f2
TH
3567 if (cpu_online(tcpu)) {
3568 cpu = tcpu;
3569 goto done;
3570 }
3571 }
3572
3573done:
0a9627f2
TH
3574 return cpu;
3575}
3576
c445477d
BH
3577#ifdef CONFIG_RFS_ACCEL
3578
3579/**
3580 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3581 * @dev: Device on which the filter was set
3582 * @rxq_index: RX queue index
3583 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3584 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3585 *
3586 * Drivers that implement ndo_rx_flow_steer() should periodically call
3587 * this function for each installed filter and remove the filters for
3588 * which it returns %true.
3589 */
3590bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3591 u32 flow_id, u16 filter_id)
3592{
3593 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3594 struct rps_dev_flow_table *flow_table;
3595 struct rps_dev_flow *rflow;
3596 bool expire = true;
a31196b0 3597 unsigned int cpu;
c445477d
BH
3598
3599 rcu_read_lock();
3600 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3601 if (flow_table && flow_id <= flow_table->mask) {
3602 rflow = &flow_table->flows[flow_id];
3603 cpu = ACCESS_ONCE(rflow->cpu);
a31196b0 3604 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
c445477d
BH
3605 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3606 rflow->last_qtail) <
3607 (int)(10 * flow_table->mask)))
3608 expire = false;
3609 }
3610 rcu_read_unlock();
3611 return expire;
3612}
3613EXPORT_SYMBOL(rps_may_expire_flow);
3614
3615#endif /* CONFIG_RFS_ACCEL */
3616
0a9627f2 3617/* Called from hardirq (IPI) context */
e36fa2f7 3618static void rps_trigger_softirq(void *data)
0a9627f2 3619{
e36fa2f7
ED
3620 struct softnet_data *sd = data;
3621
eecfd7c4 3622 ____napi_schedule(sd, &sd->backlog);
dee42870 3623 sd->received_rps++;
0a9627f2 3624}
e36fa2f7 3625
fec5e652 3626#endif /* CONFIG_RPS */
0a9627f2 3627
e36fa2f7
ED
3628/*
3629 * Check if this softnet_data structure is another cpu one
3630 * If yes, queue it to our IPI list and return 1
3631 * If no, return 0
3632 */
3633static int rps_ipi_queued(struct softnet_data *sd)
3634{
3635#ifdef CONFIG_RPS
903ceff7 3636 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
e36fa2f7
ED
3637
3638 if (sd != mysd) {
3639 sd->rps_ipi_next = mysd->rps_ipi_list;
3640 mysd->rps_ipi_list = sd;
3641
3642 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3643 return 1;
3644 }
3645#endif /* CONFIG_RPS */
3646 return 0;
3647}
3648
99bbc707
WB
3649#ifdef CONFIG_NET_FLOW_LIMIT
3650int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3651#endif
3652
3653static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3654{
3655#ifdef CONFIG_NET_FLOW_LIMIT
3656 struct sd_flow_limit *fl;
3657 struct softnet_data *sd;
3658 unsigned int old_flow, new_flow;
3659
3660 if (qlen < (netdev_max_backlog >> 1))
3661 return false;
3662
903ceff7 3663 sd = this_cpu_ptr(&softnet_data);
99bbc707
WB
3664
3665 rcu_read_lock();
3666 fl = rcu_dereference(sd->flow_limit);
3667 if (fl) {
3958afa1 3668 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
3669 old_flow = fl->history[fl->history_head];
3670 fl->history[fl->history_head] = new_flow;
3671
3672 fl->history_head++;
3673 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3674
3675 if (likely(fl->buckets[old_flow]))
3676 fl->buckets[old_flow]--;
3677
3678 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3679 fl->count++;
3680 rcu_read_unlock();
3681 return true;
3682 }
3683 }
3684 rcu_read_unlock();
3685#endif
3686 return false;
3687}
3688
0a9627f2
TH
3689/*
3690 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3691 * queue (may be a remote CPU queue).
3692 */
fec5e652
TH
3693static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3694 unsigned int *qtail)
0a9627f2 3695{
e36fa2f7 3696 struct softnet_data *sd;
0a9627f2 3697 unsigned long flags;
99bbc707 3698 unsigned int qlen;
0a9627f2 3699
e36fa2f7 3700 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
3701
3702 local_irq_save(flags);
0a9627f2 3703
e36fa2f7 3704 rps_lock(sd);
e9e4dd32
JA
3705 if (!netif_running(skb->dev))
3706 goto drop;
99bbc707
WB
3707 qlen = skb_queue_len(&sd->input_pkt_queue);
3708 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
e008f3f0 3709 if (qlen) {
0a9627f2 3710enqueue:
e36fa2f7 3711 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 3712 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 3713 rps_unlock(sd);
152102c7 3714 local_irq_restore(flags);
0a9627f2
TH
3715 return NET_RX_SUCCESS;
3716 }
3717
ebda37c2
ED
3718 /* Schedule NAPI for backlog device
3719 * We can use non atomic operation since we own the queue lock
3720 */
3721 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 3722 if (!rps_ipi_queued(sd))
eecfd7c4 3723 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
3724 }
3725 goto enqueue;
3726 }
3727
e9e4dd32 3728drop:
dee42870 3729 sd->dropped++;
e36fa2f7 3730 rps_unlock(sd);
0a9627f2 3731
0a9627f2
TH
3732 local_irq_restore(flags);
3733
caf586e5 3734 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
3735 kfree_skb(skb);
3736 return NET_RX_DROP;
3737}
1da177e4 3738
ae78dbfa 3739static int netif_rx_internal(struct sk_buff *skb)
1da177e4 3740{
b0e28f1e 3741 int ret;
1da177e4 3742
588f0330 3743 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 3744
cf66ba58 3745 trace_netif_rx(skb);
df334545 3746#ifdef CONFIG_RPS
c5905afb 3747 if (static_key_false(&rps_needed)) {
fec5e652 3748 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
3749 int cpu;
3750
cece1945 3751 preempt_disable();
b0e28f1e 3752 rcu_read_lock();
fec5e652
TH
3753
3754 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
3755 if (cpu < 0)
3756 cpu = smp_processor_id();
fec5e652
TH
3757
3758 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3759
b0e28f1e 3760 rcu_read_unlock();
cece1945 3761 preempt_enable();
adc9300e
ED
3762 } else
3763#endif
fec5e652
TH
3764 {
3765 unsigned int qtail;
3766 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3767 put_cpu();
3768 }
b0e28f1e 3769 return ret;
1da177e4 3770}
ae78dbfa
BH
3771
3772/**
3773 * netif_rx - post buffer to the network code
3774 * @skb: buffer to post
3775 *
3776 * This function receives a packet from a device driver and queues it for
3777 * the upper (protocol) levels to process. It always succeeds. The buffer
3778 * may be dropped during processing for congestion control or by the
3779 * protocol layers.
3780 *
3781 * return values:
3782 * NET_RX_SUCCESS (no congestion)
3783 * NET_RX_DROP (packet was dropped)
3784 *
3785 */
3786
3787int netif_rx(struct sk_buff *skb)
3788{
3789 trace_netif_rx_entry(skb);
3790
3791 return netif_rx_internal(skb);
3792}
d1b19dff 3793EXPORT_SYMBOL(netif_rx);
1da177e4
LT
3794
3795int netif_rx_ni(struct sk_buff *skb)
3796{
3797 int err;
3798
ae78dbfa
BH
3799 trace_netif_rx_ni_entry(skb);
3800
1da177e4 3801 preempt_disable();
ae78dbfa 3802 err = netif_rx_internal(skb);
1da177e4
LT
3803 if (local_softirq_pending())
3804 do_softirq();
3805 preempt_enable();
3806
3807 return err;
3808}
1da177e4
LT
3809EXPORT_SYMBOL(netif_rx_ni);
3810
1da177e4
LT
3811static void net_tx_action(struct softirq_action *h)
3812{
903ceff7 3813 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1da177e4
LT
3814
3815 if (sd->completion_queue) {
3816 struct sk_buff *clist;
3817
3818 local_irq_disable();
3819 clist = sd->completion_queue;
3820 sd->completion_queue = NULL;
3821 local_irq_enable();
3822
3823 while (clist) {
3824 struct sk_buff *skb = clist;
3825 clist = clist->next;
3826
547b792c 3827 WARN_ON(atomic_read(&skb->users));
e6247027
ED
3828 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3829 trace_consume_skb(skb);
3830 else
3831 trace_kfree_skb(skb, net_tx_action);
1da177e4
LT
3832 __kfree_skb(skb);
3833 }
3834 }
3835
3836 if (sd->output_queue) {
37437bb2 3837 struct Qdisc *head;
1da177e4
LT
3838
3839 local_irq_disable();
3840 head = sd->output_queue;
3841 sd->output_queue = NULL;
a9cbd588 3842 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
3843 local_irq_enable();
3844
3845 while (head) {
37437bb2
DM
3846 struct Qdisc *q = head;
3847 spinlock_t *root_lock;
3848
1da177e4
LT
3849 head = head->next_sched;
3850
5fb66229 3851 root_lock = qdisc_lock(q);
37437bb2 3852 if (spin_trylock(root_lock)) {
4e857c58 3853 smp_mb__before_atomic();
def82a1d
JP
3854 clear_bit(__QDISC_STATE_SCHED,
3855 &q->state);
37437bb2
DM
3856 qdisc_run(q);
3857 spin_unlock(root_lock);
1da177e4 3858 } else {
195648bb 3859 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 3860 &q->state)) {
195648bb 3861 __netif_reschedule(q);
e8a83e10 3862 } else {
4e857c58 3863 smp_mb__before_atomic();
e8a83e10
JP
3864 clear_bit(__QDISC_STATE_SCHED,
3865 &q->state);
3866 }
1da177e4
LT
3867 }
3868 }
3869 }
3870}
3871
ab95bfe0
JP
3872#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3873 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
3874/* This hook is defined here for ATM LANE */
3875int (*br_fdb_test_addr_hook)(struct net_device *dev,
3876 unsigned char *addr) __read_mostly;
4fb019a0 3877EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 3878#endif
1da177e4 3879
1f211a1b
DB
3880static inline struct sk_buff *
3881sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
3882 struct net_device *orig_dev)
f697c3e8 3883{
e7582bab 3884#ifdef CONFIG_NET_CLS_ACT
d2788d34
DB
3885 struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list);
3886 struct tcf_result cl_res;
24824a09 3887
c9e99fd0
DB
3888 /* If there's at least one ingress present somewhere (so
3889 * we get here via enabled static key), remaining devices
3890 * that are not configured with an ingress qdisc will bail
d2788d34 3891 * out here.
c9e99fd0 3892 */
d2788d34 3893 if (!cl)
4577139b 3894 return skb;
f697c3e8
HX
3895 if (*pt_prev) {
3896 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3897 *pt_prev = NULL;
1da177e4
LT
3898 }
3899
3365495c 3900 qdisc_skb_cb(skb)->pkt_len = skb->len;
c9e99fd0 3901 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
24ea591d 3902 qdisc_bstats_cpu_update(cl->q, skb);
c9e99fd0 3903
3b3ae880 3904 switch (tc_classify(skb, cl, &cl_res, false)) {
d2788d34
DB
3905 case TC_ACT_OK:
3906 case TC_ACT_RECLASSIFY:
3907 skb->tc_index = TC_H_MIN(cl_res.classid);
3908 break;
3909 case TC_ACT_SHOT:
24ea591d 3910 qdisc_qstats_cpu_drop(cl->q);
d2788d34
DB
3911 case TC_ACT_STOLEN:
3912 case TC_ACT_QUEUED:
3913 kfree_skb(skb);
3914 return NULL;
27b29f63
AS
3915 case TC_ACT_REDIRECT:
3916 /* skb_mac_header check was done by cls/act_bpf, so
3917 * we can safely push the L2 header back before
3918 * redirecting to another netdev
3919 */
3920 __skb_push(skb, skb->mac_len);
3921 skb_do_redirect(skb);
3922 return NULL;
d2788d34
DB
3923 default:
3924 break;
f697c3e8 3925 }
e7582bab 3926#endif /* CONFIG_NET_CLS_ACT */
e687ad60
PN
3927 return skb;
3928}
1da177e4 3929
ab95bfe0
JP
3930/**
3931 * netdev_rx_handler_register - register receive handler
3932 * @dev: device to register a handler for
3933 * @rx_handler: receive handler to register
93e2c32b 3934 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0 3935 *
e227867f 3936 * Register a receive handler for a device. This handler will then be
ab95bfe0
JP
3937 * called from __netif_receive_skb. A negative errno code is returned
3938 * on a failure.
3939 *
3940 * The caller must hold the rtnl_mutex.
8a4eb573
JP
3941 *
3942 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
3943 */
3944int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
3945 rx_handler_func_t *rx_handler,
3946 void *rx_handler_data)
ab95bfe0
JP
3947{
3948 ASSERT_RTNL();
3949
3950 if (dev->rx_handler)
3951 return -EBUSY;
3952
00cfec37 3953 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 3954 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
3955 rcu_assign_pointer(dev->rx_handler, rx_handler);
3956
3957 return 0;
3958}
3959EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3960
3961/**
3962 * netdev_rx_handler_unregister - unregister receive handler
3963 * @dev: device to unregister a handler from
3964 *
166ec369 3965 * Unregister a receive handler from a device.
ab95bfe0
JP
3966 *
3967 * The caller must hold the rtnl_mutex.
3968 */
3969void netdev_rx_handler_unregister(struct net_device *dev)
3970{
3971
3972 ASSERT_RTNL();
a9b3cd7f 3973 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
3974 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3975 * section has a guarantee to see a non NULL rx_handler_data
3976 * as well.
3977 */
3978 synchronize_net();
a9b3cd7f 3979 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
3980}
3981EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3982
b4b9e355
MG
3983/*
3984 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3985 * the special handling of PFMEMALLOC skbs.
3986 */
3987static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3988{
3989 switch (skb->protocol) {
2b8837ae
JP
3990 case htons(ETH_P_ARP):
3991 case htons(ETH_P_IP):
3992 case htons(ETH_P_IPV6):
3993 case htons(ETH_P_8021Q):
3994 case htons(ETH_P_8021AD):
b4b9e355
MG
3995 return true;
3996 default:
3997 return false;
3998 }
3999}
4000
e687ad60
PN
4001static inline int nf_ingress(struct sk_buff *skb, struct packet_type **pt_prev,
4002 int *ret, struct net_device *orig_dev)
4003{
e7582bab 4004#ifdef CONFIG_NETFILTER_INGRESS
e687ad60
PN
4005 if (nf_hook_ingress_active(skb)) {
4006 if (*pt_prev) {
4007 *ret = deliver_skb(skb, *pt_prev, orig_dev);
4008 *pt_prev = NULL;
4009 }
4010
4011 return nf_hook_ingress(skb);
4012 }
e7582bab 4013#endif /* CONFIG_NETFILTER_INGRESS */
e687ad60
PN
4014 return 0;
4015}
e687ad60 4016
9754e293 4017static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
1da177e4
LT
4018{
4019 struct packet_type *ptype, *pt_prev;
ab95bfe0 4020 rx_handler_func_t *rx_handler;
f2ccd8fa 4021 struct net_device *orig_dev;
8a4eb573 4022 bool deliver_exact = false;
1da177e4 4023 int ret = NET_RX_DROP;
252e3346 4024 __be16 type;
1da177e4 4025
588f0330 4026 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 4027
cf66ba58 4028 trace_netif_receive_skb(skb);
9b22ea56 4029
cc9bd5ce 4030 orig_dev = skb->dev;
8f903c70 4031
c1d2bbe1 4032 skb_reset_network_header(skb);
fda55eca
ED
4033 if (!skb_transport_header_was_set(skb))
4034 skb_reset_transport_header(skb);
0b5c9db1 4035 skb_reset_mac_len(skb);
1da177e4
LT
4036
4037 pt_prev = NULL;
4038
63d8ea7f 4039another_round:
b6858177 4040 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
4041
4042 __this_cpu_inc(softnet_data.processed);
4043
8ad227ff
PM
4044 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
4045 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
0d5501c1 4046 skb = skb_vlan_untag(skb);
bcc6d479 4047 if (unlikely(!skb))
2c17d27c 4048 goto out;
bcc6d479
JP
4049 }
4050
1da177e4
LT
4051#ifdef CONFIG_NET_CLS_ACT
4052 if (skb->tc_verd & TC_NCLS) {
4053 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
4054 goto ncls;
4055 }
4056#endif
4057
9754e293 4058 if (pfmemalloc)
b4b9e355
MG
4059 goto skip_taps;
4060
1da177e4 4061 list_for_each_entry_rcu(ptype, &ptype_all, list) {
7866a621
SN
4062 if (pt_prev)
4063 ret = deliver_skb(skb, pt_prev, orig_dev);
4064 pt_prev = ptype;
4065 }
4066
4067 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
4068 if (pt_prev)
4069 ret = deliver_skb(skb, pt_prev, orig_dev);
4070 pt_prev = ptype;
1da177e4
LT
4071 }
4072
b4b9e355 4073skip_taps:
1cf51900 4074#ifdef CONFIG_NET_INGRESS
4577139b 4075 if (static_key_false(&ingress_needed)) {
1f211a1b 4076 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
4577139b 4077 if (!skb)
2c17d27c 4078 goto out;
e687ad60
PN
4079
4080 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0)
2c17d27c 4081 goto out;
4577139b 4082 }
1cf51900
PN
4083#endif
4084#ifdef CONFIG_NET_CLS_ACT
4577139b 4085 skb->tc_verd = 0;
1da177e4
LT
4086ncls:
4087#endif
9754e293 4088 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
4089 goto drop;
4090
df8a39de 4091 if (skb_vlan_tag_present(skb)) {
2425717b
JF
4092 if (pt_prev) {
4093 ret = deliver_skb(skb, pt_prev, orig_dev);
4094 pt_prev = NULL;
4095 }
48cc32d3 4096 if (vlan_do_receive(&skb))
2425717b
JF
4097 goto another_round;
4098 else if (unlikely(!skb))
2c17d27c 4099 goto out;
2425717b
JF
4100 }
4101
48cc32d3 4102 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
4103 if (rx_handler) {
4104 if (pt_prev) {
4105 ret = deliver_skb(skb, pt_prev, orig_dev);
4106 pt_prev = NULL;
4107 }
8a4eb573
JP
4108 switch (rx_handler(&skb)) {
4109 case RX_HANDLER_CONSUMED:
3bc1b1ad 4110 ret = NET_RX_SUCCESS;
2c17d27c 4111 goto out;
8a4eb573 4112 case RX_HANDLER_ANOTHER:
63d8ea7f 4113 goto another_round;
8a4eb573
JP
4114 case RX_HANDLER_EXACT:
4115 deliver_exact = true;
4116 case RX_HANDLER_PASS:
4117 break;
4118 default:
4119 BUG();
4120 }
ab95bfe0 4121 }
1da177e4 4122
df8a39de
JP
4123 if (unlikely(skb_vlan_tag_present(skb))) {
4124 if (skb_vlan_tag_get_id(skb))
d4b812de
ED
4125 skb->pkt_type = PACKET_OTHERHOST;
4126 /* Note: we might in the future use prio bits
4127 * and set skb->priority like in vlan_do_receive()
4128 * For the time being, just ignore Priority Code Point
4129 */
4130 skb->vlan_tci = 0;
4131 }
48cc32d3 4132
7866a621
SN
4133 type = skb->protocol;
4134
63d8ea7f 4135 /* deliver only exact match when indicated */
7866a621
SN
4136 if (likely(!deliver_exact)) {
4137 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4138 &ptype_base[ntohs(type) &
4139 PTYPE_HASH_MASK]);
4140 }
1f3c8804 4141
7866a621
SN
4142 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4143 &orig_dev->ptype_specific);
4144
4145 if (unlikely(skb->dev != orig_dev)) {
4146 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
4147 &skb->dev->ptype_specific);
1da177e4
LT
4148 }
4149
4150 if (pt_prev) {
1080e512 4151 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
0e698bf6 4152 goto drop;
1080e512
MT
4153 else
4154 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 4155 } else {
b4b9e355 4156drop:
caf586e5 4157 atomic_long_inc(&skb->dev->rx_dropped);
1da177e4
LT
4158 kfree_skb(skb);
4159 /* Jamal, now you will not able to escape explaining
4160 * me how you were going to use this. :-)
4161 */
4162 ret = NET_RX_DROP;
4163 }
4164
2c17d27c 4165out:
9754e293
DM
4166 return ret;
4167}
4168
4169static int __netif_receive_skb(struct sk_buff *skb)
4170{
4171 int ret;
4172
4173 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
4174 unsigned long pflags = current->flags;
4175
4176 /*
4177 * PFMEMALLOC skbs are special, they should
4178 * - be delivered to SOCK_MEMALLOC sockets only
4179 * - stay away from userspace
4180 * - have bounded memory usage
4181 *
4182 * Use PF_MEMALLOC as this saves us from propagating the allocation
4183 * context down to all allocation sites.
4184 */
4185 current->flags |= PF_MEMALLOC;
4186 ret = __netif_receive_skb_core(skb, true);
4187 tsk_restore_flags(current, pflags, PF_MEMALLOC);
4188 } else
4189 ret = __netif_receive_skb_core(skb, false);
4190
1da177e4
LT
4191 return ret;
4192}
0a9627f2 4193
ae78dbfa 4194static int netif_receive_skb_internal(struct sk_buff *skb)
0a9627f2 4195{
2c17d27c
JA
4196 int ret;
4197
588f0330 4198 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 4199
c1f19b51
RC
4200 if (skb_defer_rx_timestamp(skb))
4201 return NET_RX_SUCCESS;
4202
2c17d27c
JA
4203 rcu_read_lock();
4204
df334545 4205#ifdef CONFIG_RPS
c5905afb 4206 if (static_key_false(&rps_needed)) {
3b098e2d 4207 struct rps_dev_flow voidflow, *rflow = &voidflow;
2c17d27c 4208 int cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 4209
3b098e2d
ED
4210 if (cpu >= 0) {
4211 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
4212 rcu_read_unlock();
adc9300e 4213 return ret;
3b098e2d 4214 }
fec5e652 4215 }
1e94d72f 4216#endif
2c17d27c
JA
4217 ret = __netif_receive_skb(skb);
4218 rcu_read_unlock();
4219 return ret;
0a9627f2 4220}
ae78dbfa
BH
4221
4222/**
4223 * netif_receive_skb - process receive buffer from network
4224 * @skb: buffer to process
4225 *
4226 * netif_receive_skb() is the main receive data processing function.
4227 * It always succeeds. The buffer may be dropped during processing
4228 * for congestion control or by the protocol layers.
4229 *
4230 * This function may only be called from softirq context and interrupts
4231 * should be enabled.
4232 *
4233 * Return values (usually ignored):
4234 * NET_RX_SUCCESS: no congestion
4235 * NET_RX_DROP: packet was dropped
4236 */
04eb4489 4237int netif_receive_skb(struct sk_buff *skb)
ae78dbfa
BH
4238{
4239 trace_netif_receive_skb_entry(skb);
4240
4241 return netif_receive_skb_internal(skb);
4242}
04eb4489 4243EXPORT_SYMBOL(netif_receive_skb);
1da177e4 4244
88751275
ED
4245/* Network device is going away, flush any packets still pending
4246 * Called with irqs disabled.
4247 */
152102c7 4248static void flush_backlog(void *arg)
6e583ce5 4249{
152102c7 4250 struct net_device *dev = arg;
903ceff7 4251 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6e583ce5
SH
4252 struct sk_buff *skb, *tmp;
4253
e36fa2f7 4254 rps_lock(sd);
6e7676c1 4255 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 4256 if (skb->dev == dev) {
e36fa2f7 4257 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 4258 kfree_skb(skb);
76cc8b13 4259 input_queue_head_incr(sd);
6e583ce5 4260 }
6e7676c1 4261 }
e36fa2f7 4262 rps_unlock(sd);
6e7676c1
CG
4263
4264 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
4265 if (skb->dev == dev) {
4266 __skb_unlink(skb, &sd->process_queue);
4267 kfree_skb(skb);
76cc8b13 4268 input_queue_head_incr(sd);
6e7676c1
CG
4269 }
4270 }
6e583ce5
SH
4271}
4272
d565b0a1
HX
4273static int napi_gro_complete(struct sk_buff *skb)
4274{
22061d80 4275 struct packet_offload *ptype;
d565b0a1 4276 __be16 type = skb->protocol;
22061d80 4277 struct list_head *head = &offload_base;
d565b0a1
HX
4278 int err = -ENOENT;
4279
c3c7c254
ED
4280 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
4281
fc59f9a3
HX
4282 if (NAPI_GRO_CB(skb)->count == 1) {
4283 skb_shinfo(skb)->gso_size = 0;
d565b0a1 4284 goto out;
fc59f9a3 4285 }
d565b0a1
HX
4286
4287 rcu_read_lock();
4288 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4289 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
4290 continue;
4291
299603e8 4292 err = ptype->callbacks.gro_complete(skb, 0);
d565b0a1
HX
4293 break;
4294 }
4295 rcu_read_unlock();
4296
4297 if (err) {
4298 WARN_ON(&ptype->list == head);
4299 kfree_skb(skb);
4300 return NET_RX_SUCCESS;
4301 }
4302
4303out:
ae78dbfa 4304 return netif_receive_skb_internal(skb);
d565b0a1
HX
4305}
4306
2e71a6f8
ED
4307/* napi->gro_list contains packets ordered by age.
4308 * youngest packets at the head of it.
4309 * Complete skbs in reverse order to reduce latencies.
4310 */
4311void napi_gro_flush(struct napi_struct *napi, bool flush_old)
d565b0a1 4312{
2e71a6f8 4313 struct sk_buff *skb, *prev = NULL;
d565b0a1 4314
2e71a6f8
ED
4315 /* scan list and build reverse chain */
4316 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
4317 skb->prev = prev;
4318 prev = skb;
4319 }
4320
4321 for (skb = prev; skb; skb = prev) {
d565b0a1 4322 skb->next = NULL;
2e71a6f8
ED
4323
4324 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
4325 return;
4326
4327 prev = skb->prev;
d565b0a1 4328 napi_gro_complete(skb);
2e71a6f8 4329 napi->gro_count--;
d565b0a1
HX
4330 }
4331
4332 napi->gro_list = NULL;
4333}
86cac58b 4334EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 4335
89c5fa33
ED
4336static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
4337{
4338 struct sk_buff *p;
4339 unsigned int maclen = skb->dev->hard_header_len;
0b4cec8c 4340 u32 hash = skb_get_hash_raw(skb);
89c5fa33
ED
4341
4342 for (p = napi->gro_list; p; p = p->next) {
4343 unsigned long diffs;
4344
0b4cec8c
TH
4345 NAPI_GRO_CB(p)->flush = 0;
4346
4347 if (hash != skb_get_hash_raw(p)) {
4348 NAPI_GRO_CB(p)->same_flow = 0;
4349 continue;
4350 }
4351
89c5fa33
ED
4352 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
4353 diffs |= p->vlan_tci ^ skb->vlan_tci;
ce87fc6c 4354 diffs |= skb_metadata_dst_cmp(p, skb);
89c5fa33
ED
4355 if (maclen == ETH_HLEN)
4356 diffs |= compare_ether_header(skb_mac_header(p),
a50e233c 4357 skb_mac_header(skb));
89c5fa33
ED
4358 else if (!diffs)
4359 diffs = memcmp(skb_mac_header(p),
a50e233c 4360 skb_mac_header(skb),
89c5fa33
ED
4361 maclen);
4362 NAPI_GRO_CB(p)->same_flow = !diffs;
89c5fa33
ED
4363 }
4364}
4365
299603e8
JC
4366static void skb_gro_reset_offset(struct sk_buff *skb)
4367{
4368 const struct skb_shared_info *pinfo = skb_shinfo(skb);
4369 const skb_frag_t *frag0 = &pinfo->frags[0];
4370
4371 NAPI_GRO_CB(skb)->data_offset = 0;
4372 NAPI_GRO_CB(skb)->frag0 = NULL;
4373 NAPI_GRO_CB(skb)->frag0_len = 0;
4374
4375 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4376 pinfo->nr_frags &&
4377 !PageHighMem(skb_frag_page(frag0))) {
4378 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4379 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
89c5fa33
ED
4380 }
4381}
4382
a50e233c
ED
4383static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4384{
4385 struct skb_shared_info *pinfo = skb_shinfo(skb);
4386
4387 BUG_ON(skb->end - skb->tail < grow);
4388
4389 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4390
4391 skb->data_len -= grow;
4392 skb->tail += grow;
4393
4394 pinfo->frags[0].page_offset += grow;
4395 skb_frag_size_sub(&pinfo->frags[0], grow);
4396
4397 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4398 skb_frag_unref(skb, 0);
4399 memmove(pinfo->frags, pinfo->frags + 1,
4400 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4401 }
4402}
4403
bb728820 4404static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
4405{
4406 struct sk_buff **pp = NULL;
22061d80 4407 struct packet_offload *ptype;
d565b0a1 4408 __be16 type = skb->protocol;
22061d80 4409 struct list_head *head = &offload_base;
0da2afd5 4410 int same_flow;
5b252f0c 4411 enum gro_result ret;
a50e233c 4412 int grow;
d565b0a1 4413
9c62a68d 4414 if (!(skb->dev->features & NETIF_F_GRO))
d565b0a1
HX
4415 goto normal;
4416
5a212329 4417 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
f17f5c91
HX
4418 goto normal;
4419
89c5fa33
ED
4420 gro_list_prepare(napi, skb);
4421
d565b0a1
HX
4422 rcu_read_lock();
4423 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4424 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
4425 continue;
4426
86911732 4427 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 4428 skb_reset_mac_len(skb);
d565b0a1
HX
4429 NAPI_GRO_CB(skb)->same_flow = 0;
4430 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 4431 NAPI_GRO_CB(skb)->free = 0;
b582ef09 4432 NAPI_GRO_CB(skb)->udp_mark = 0;
15e2396d 4433 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
d565b0a1 4434
662880f4
TH
4435 /* Setup for GRO checksum validation */
4436 switch (skb->ip_summed) {
4437 case CHECKSUM_COMPLETE:
4438 NAPI_GRO_CB(skb)->csum = skb->csum;
4439 NAPI_GRO_CB(skb)->csum_valid = 1;
4440 NAPI_GRO_CB(skb)->csum_cnt = 0;
4441 break;
4442 case CHECKSUM_UNNECESSARY:
4443 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4444 NAPI_GRO_CB(skb)->csum_valid = 0;
4445 break;
4446 default:
4447 NAPI_GRO_CB(skb)->csum_cnt = 0;
4448 NAPI_GRO_CB(skb)->csum_valid = 0;
4449 }
d565b0a1 4450
f191a1d1 4451 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
d565b0a1
HX
4452 break;
4453 }
4454 rcu_read_unlock();
4455
4456 if (&ptype->list == head)
4457 goto normal;
4458
0da2afd5 4459 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 4460 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 4461
d565b0a1
HX
4462 if (pp) {
4463 struct sk_buff *nskb = *pp;
4464
4465 *pp = nskb->next;
4466 nskb->next = NULL;
4467 napi_gro_complete(nskb);
4ae5544f 4468 napi->gro_count--;
d565b0a1
HX
4469 }
4470
0da2afd5 4471 if (same_flow)
d565b0a1
HX
4472 goto ok;
4473
600adc18 4474 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 4475 goto normal;
d565b0a1 4476
600adc18
ED
4477 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4478 struct sk_buff *nskb = napi->gro_list;
4479
4480 /* locate the end of the list to select the 'oldest' flow */
4481 while (nskb->next) {
4482 pp = &nskb->next;
4483 nskb = *pp;
4484 }
4485 *pp = NULL;
4486 nskb->next = NULL;
4487 napi_gro_complete(nskb);
4488 } else {
4489 napi->gro_count++;
4490 }
d565b0a1 4491 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 4492 NAPI_GRO_CB(skb)->age = jiffies;
29e98242 4493 NAPI_GRO_CB(skb)->last = skb;
86911732 4494 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
4495 skb->next = napi->gro_list;
4496 napi->gro_list = skb;
5d0d9be8 4497 ret = GRO_HELD;
d565b0a1 4498
ad0f9904 4499pull:
a50e233c
ED
4500 grow = skb_gro_offset(skb) - skb_headlen(skb);
4501 if (grow > 0)
4502 gro_pull_from_frag0(skb, grow);
d565b0a1 4503ok:
5d0d9be8 4504 return ret;
d565b0a1
HX
4505
4506normal:
ad0f9904
HX
4507 ret = GRO_NORMAL;
4508 goto pull;
5d38a079 4509}
96e93eab 4510
bf5a755f
JC
4511struct packet_offload *gro_find_receive_by_type(__be16 type)
4512{
4513 struct list_head *offload_head = &offload_base;
4514 struct packet_offload *ptype;
4515
4516 list_for_each_entry_rcu(ptype, offload_head, list) {
4517 if (ptype->type != type || !ptype->callbacks.gro_receive)
4518 continue;
4519 return ptype;
4520 }
4521 return NULL;
4522}
e27a2f83 4523EXPORT_SYMBOL(gro_find_receive_by_type);
bf5a755f
JC
4524
4525struct packet_offload *gro_find_complete_by_type(__be16 type)
4526{
4527 struct list_head *offload_head = &offload_base;
4528 struct packet_offload *ptype;
4529
4530 list_for_each_entry_rcu(ptype, offload_head, list) {
4531 if (ptype->type != type || !ptype->callbacks.gro_complete)
4532 continue;
4533 return ptype;
4534 }
4535 return NULL;
4536}
e27a2f83 4537EXPORT_SYMBOL(gro_find_complete_by_type);
5d38a079 4538
bb728820 4539static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 4540{
5d0d9be8
HX
4541 switch (ret) {
4542 case GRO_NORMAL:
ae78dbfa 4543 if (netif_receive_skb_internal(skb))
c7c4b3b6
BH
4544 ret = GRO_DROP;
4545 break;
5d38a079 4546
5d0d9be8 4547 case GRO_DROP:
5d38a079
HX
4548 kfree_skb(skb);
4549 break;
5b252f0c 4550
daa86548 4551 case GRO_MERGED_FREE:
ce87fc6c
JG
4552 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
4553 skb_dst_drop(skb);
d7e8883c 4554 kmem_cache_free(skbuff_head_cache, skb);
ce87fc6c 4555 } else {
d7e8883c 4556 __kfree_skb(skb);
ce87fc6c 4557 }
daa86548
ED
4558 break;
4559
5b252f0c
BH
4560 case GRO_HELD:
4561 case GRO_MERGED:
4562 break;
5d38a079
HX
4563 }
4564
c7c4b3b6 4565 return ret;
5d0d9be8 4566}
5d0d9be8 4567
c7c4b3b6 4568gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 4569{
93f93a44 4570 skb_mark_napi_id(skb, napi);
ae78dbfa 4571 trace_napi_gro_receive_entry(skb);
86911732 4572
a50e233c
ED
4573 skb_gro_reset_offset(skb);
4574
89c5fa33 4575 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
d565b0a1
HX
4576}
4577EXPORT_SYMBOL(napi_gro_receive);
4578
d0c2b0d2 4579static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 4580{
93a35f59
ED
4581 if (unlikely(skb->pfmemalloc)) {
4582 consume_skb(skb);
4583 return;
4584 }
96e93eab 4585 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
4586 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4587 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3701e513 4588 skb->vlan_tci = 0;
66c46d74 4589 skb->dev = napi->dev;
6d152e23 4590 skb->skb_iif = 0;
c3caf119
JC
4591 skb->encapsulation = 0;
4592 skb_shinfo(skb)->gso_type = 0;
e33d0ba8 4593 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
96e93eab
HX
4594
4595 napi->skb = skb;
4596}
96e93eab 4597
76620aaf 4598struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 4599{
5d38a079 4600 struct sk_buff *skb = napi->skb;
5d38a079
HX
4601
4602 if (!skb) {
fd11a83d 4603 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
e2f9dc3b
ED
4604 if (skb) {
4605 napi->skb = skb;
4606 skb_mark_napi_id(skb, napi);
4607 }
80595d59 4608 }
96e93eab
HX
4609 return skb;
4610}
76620aaf 4611EXPORT_SYMBOL(napi_get_frags);
96e93eab 4612
a50e233c
ED
4613static gro_result_t napi_frags_finish(struct napi_struct *napi,
4614 struct sk_buff *skb,
4615 gro_result_t ret)
96e93eab 4616{
5d0d9be8
HX
4617 switch (ret) {
4618 case GRO_NORMAL:
a50e233c
ED
4619 case GRO_HELD:
4620 __skb_push(skb, ETH_HLEN);
4621 skb->protocol = eth_type_trans(skb, skb->dev);
4622 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
c7c4b3b6 4623 ret = GRO_DROP;
86911732 4624 break;
5d38a079 4625
5d0d9be8 4626 case GRO_DROP:
5d0d9be8
HX
4627 case GRO_MERGED_FREE:
4628 napi_reuse_skb(napi, skb);
4629 break;
5b252f0c
BH
4630
4631 case GRO_MERGED:
4632 break;
5d0d9be8 4633 }
5d38a079 4634
c7c4b3b6 4635 return ret;
5d38a079 4636}
5d0d9be8 4637
a50e233c
ED
4638/* Upper GRO stack assumes network header starts at gro_offset=0
4639 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4640 * We copy ethernet header into skb->data to have a common layout.
4641 */
4adb9c4a 4642static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
4643{
4644 struct sk_buff *skb = napi->skb;
a50e233c
ED
4645 const struct ethhdr *eth;
4646 unsigned int hlen = sizeof(*eth);
76620aaf
HX
4647
4648 napi->skb = NULL;
4649
a50e233c
ED
4650 skb_reset_mac_header(skb);
4651 skb_gro_reset_offset(skb);
4652
4653 eth = skb_gro_header_fast(skb, 0);
4654 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4655 eth = skb_gro_header_slow(skb, hlen, 0);
4656 if (unlikely(!eth)) {
4657 napi_reuse_skb(napi, skb);
4658 return NULL;
4659 }
4660 } else {
4661 gro_pull_from_frag0(skb, hlen);
4662 NAPI_GRO_CB(skb)->frag0 += hlen;
4663 NAPI_GRO_CB(skb)->frag0_len -= hlen;
76620aaf 4664 }
a50e233c
ED
4665 __skb_pull(skb, hlen);
4666
4667 /*
4668 * This works because the only protocols we care about don't require
4669 * special handling.
4670 * We'll fix it up properly in napi_frags_finish()
4671 */
4672 skb->protocol = eth->h_proto;
76620aaf 4673
76620aaf
HX
4674 return skb;
4675}
76620aaf 4676
c7c4b3b6 4677gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 4678{
76620aaf 4679 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
4680
4681 if (!skb)
c7c4b3b6 4682 return GRO_DROP;
5d0d9be8 4683
ae78dbfa
BH
4684 trace_napi_gro_frags_entry(skb);
4685
89c5fa33 4686 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5d0d9be8 4687}
5d38a079
HX
4688EXPORT_SYMBOL(napi_gro_frags);
4689
573e8fca
TH
4690/* Compute the checksum from gro_offset and return the folded value
4691 * after adding in any pseudo checksum.
4692 */
4693__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4694{
4695 __wsum wsum;
4696 __sum16 sum;
4697
4698 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4699
4700 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4701 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4702 if (likely(!sum)) {
4703 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4704 !skb->csum_complete_sw)
4705 netdev_rx_csum_fault(skb->dev);
4706 }
4707
4708 NAPI_GRO_CB(skb)->csum = wsum;
4709 NAPI_GRO_CB(skb)->csum_valid = 1;
4710
4711 return sum;
4712}
4713EXPORT_SYMBOL(__skb_gro_checksum_complete);
4714
e326bed2 4715/*
855abcf0 4716 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
4717 * Note: called with local irq disabled, but exits with local irq enabled.
4718 */
4719static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4720{
4721#ifdef CONFIG_RPS
4722 struct softnet_data *remsd = sd->rps_ipi_list;
4723
4724 if (remsd) {
4725 sd->rps_ipi_list = NULL;
4726
4727 local_irq_enable();
4728
4729 /* Send pending IPI's to kick RPS processing on remote cpus. */
4730 while (remsd) {
4731 struct softnet_data *next = remsd->rps_ipi_next;
4732
4733 if (cpu_online(remsd->cpu))
c46fff2a 4734 smp_call_function_single_async(remsd->cpu,
fce8ad15 4735 &remsd->csd);
e326bed2
ED
4736 remsd = next;
4737 }
4738 } else
4739#endif
4740 local_irq_enable();
4741}
4742
d75b1ade
ED
4743static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4744{
4745#ifdef CONFIG_RPS
4746 return sd->rps_ipi_list != NULL;
4747#else
4748 return false;
4749#endif
4750}
4751
bea3348e 4752static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
4753{
4754 int work = 0;
eecfd7c4 4755 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 4756
e326bed2
ED
4757 /* Check if we have pending ipi, its better to send them now,
4758 * not waiting net_rx_action() end.
4759 */
d75b1ade 4760 if (sd_has_rps_ipi_waiting(sd)) {
e326bed2
ED
4761 local_irq_disable();
4762 net_rps_action_and_irq_enable(sd);
4763 }
d75b1ade 4764
bea3348e 4765 napi->weight = weight_p;
6e7676c1 4766 local_irq_disable();
11ef7a89 4767 while (1) {
1da177e4 4768 struct sk_buff *skb;
6e7676c1
CG
4769
4770 while ((skb = __skb_dequeue(&sd->process_queue))) {
2c17d27c 4771 rcu_read_lock();
6e7676c1
CG
4772 local_irq_enable();
4773 __netif_receive_skb(skb);
2c17d27c 4774 rcu_read_unlock();
6e7676c1 4775 local_irq_disable();
76cc8b13
TH
4776 input_queue_head_incr(sd);
4777 if (++work >= quota) {
4778 local_irq_enable();
4779 return work;
4780 }
6e7676c1 4781 }
1da177e4 4782
e36fa2f7 4783 rps_lock(sd);
11ef7a89 4784 if (skb_queue_empty(&sd->input_pkt_queue)) {
eecfd7c4
ED
4785 /*
4786 * Inline a custom version of __napi_complete().
4787 * only current cpu owns and manipulates this napi,
11ef7a89
TH
4788 * and NAPI_STATE_SCHED is the only possible flag set
4789 * on backlog.
4790 * We can use a plain write instead of clear_bit(),
eecfd7c4
ED
4791 * and we dont need an smp_mb() memory barrier.
4792 */
eecfd7c4 4793 napi->state = 0;
11ef7a89 4794 rps_unlock(sd);
eecfd7c4 4795
11ef7a89 4796 break;
bea3348e 4797 }
11ef7a89
TH
4798
4799 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4800 &sd->process_queue);
e36fa2f7 4801 rps_unlock(sd);
6e7676c1
CG
4802 }
4803 local_irq_enable();
1da177e4 4804
bea3348e
SH
4805 return work;
4806}
1da177e4 4807
bea3348e
SH
4808/**
4809 * __napi_schedule - schedule for receive
c4ea43c5 4810 * @n: entry to schedule
bea3348e 4811 *
bc9ad166
ED
4812 * The entry's receive function will be scheduled to run.
4813 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
bea3348e 4814 */
b5606c2d 4815void __napi_schedule(struct napi_struct *n)
bea3348e
SH
4816{
4817 unsigned long flags;
1da177e4 4818
bea3348e 4819 local_irq_save(flags);
903ceff7 4820 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
bea3348e 4821 local_irq_restore(flags);
1da177e4 4822}
bea3348e
SH
4823EXPORT_SYMBOL(__napi_schedule);
4824
bc9ad166
ED
4825/**
4826 * __napi_schedule_irqoff - schedule for receive
4827 * @n: entry to schedule
4828 *
4829 * Variant of __napi_schedule() assuming hard irqs are masked
4830 */
4831void __napi_schedule_irqoff(struct napi_struct *n)
4832{
4833 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4834}
4835EXPORT_SYMBOL(__napi_schedule_irqoff);
4836
d565b0a1
HX
4837void __napi_complete(struct napi_struct *n)
4838{
4839 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
d565b0a1 4840
d75b1ade 4841 list_del_init(&n->poll_list);
4e857c58 4842 smp_mb__before_atomic();
d565b0a1
HX
4843 clear_bit(NAPI_STATE_SCHED, &n->state);
4844}
4845EXPORT_SYMBOL(__napi_complete);
4846
3b47d303 4847void napi_complete_done(struct napi_struct *n, int work_done)
d565b0a1
HX
4848{
4849 unsigned long flags;
4850
4851 /*
4852 * don't let napi dequeue from the cpu poll list
4853 * just in case its running on a different cpu
4854 */
4855 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4856 return;
4857
3b47d303
ED
4858 if (n->gro_list) {
4859 unsigned long timeout = 0;
d75b1ade 4860
3b47d303
ED
4861 if (work_done)
4862 timeout = n->dev->gro_flush_timeout;
4863
4864 if (timeout)
4865 hrtimer_start(&n->timer, ns_to_ktime(timeout),
4866 HRTIMER_MODE_REL_PINNED);
4867 else
4868 napi_gro_flush(n, false);
4869 }
d75b1ade
ED
4870 if (likely(list_empty(&n->poll_list))) {
4871 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4872 } else {
4873 /* If n->poll_list is not empty, we need to mask irqs */
4874 local_irq_save(flags);
4875 __napi_complete(n);
4876 local_irq_restore(flags);
4877 }
d565b0a1 4878}
3b47d303 4879EXPORT_SYMBOL(napi_complete_done);
d565b0a1 4880
af12fa6e 4881/* must be called under rcu_read_lock(), as we dont take a reference */
02d62e86 4882static struct napi_struct *napi_by_id(unsigned int napi_id)
af12fa6e
ET
4883{
4884 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4885 struct napi_struct *napi;
4886
4887 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4888 if (napi->napi_id == napi_id)
4889 return napi;
4890
4891 return NULL;
4892}
02d62e86
ED
4893
4894#if defined(CONFIG_NET_RX_BUSY_POLL)
ce6aea93 4895#define BUSY_POLL_BUDGET 8
02d62e86
ED
4896bool sk_busy_loop(struct sock *sk, int nonblock)
4897{
4898 unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
ce6aea93 4899 int (*busy_poll)(struct napi_struct *dev);
02d62e86
ED
4900 struct napi_struct *napi;
4901 int rc = false;
4902
2a028ecb 4903 rcu_read_lock();
02d62e86
ED
4904
4905 napi = napi_by_id(sk->sk_napi_id);
4906 if (!napi)
4907 goto out;
4908
ce6aea93
ED
4909 /* Note: ndo_busy_poll method is optional in linux-4.5 */
4910 busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
02d62e86
ED
4911
4912 do {
ce6aea93 4913 rc = 0;
2a028ecb 4914 local_bh_disable();
ce6aea93
ED
4915 if (busy_poll) {
4916 rc = busy_poll(napi);
4917 } else if (napi_schedule_prep(napi)) {
4918 void *have = netpoll_poll_lock(napi);
4919
4920 if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
4921 rc = napi->poll(napi, BUSY_POLL_BUDGET);
4922 trace_napi_poll(napi);
4923 if (rc == BUSY_POLL_BUDGET) {
4924 napi_complete_done(napi, rc);
4925 napi_schedule(napi);
4926 }
4927 }
4928 netpoll_poll_unlock(have);
4929 }
2a028ecb
ED
4930 if (rc > 0)
4931 NET_ADD_STATS_BH(sock_net(sk),
4932 LINUX_MIB_BUSYPOLLRXPACKETS, rc);
4933 local_bh_enable();
02d62e86
ED
4934
4935 if (rc == LL_FLUSH_FAILED)
4936 break; /* permanent failure */
4937
02d62e86 4938 cpu_relax();
02d62e86
ED
4939 } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
4940 !need_resched() && !busy_loop_timeout(end_time));
4941
4942 rc = !skb_queue_empty(&sk->sk_receive_queue);
4943out:
2a028ecb 4944 rcu_read_unlock();
02d62e86
ED
4945 return rc;
4946}
4947EXPORT_SYMBOL(sk_busy_loop);
4948
4949#endif /* CONFIG_NET_RX_BUSY_POLL */
af12fa6e
ET
4950
4951void napi_hash_add(struct napi_struct *napi)
4952{
d64b5e85
ED
4953 if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
4954 test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
52bd2d62 4955 return;
af12fa6e 4956
52bd2d62 4957 spin_lock(&napi_hash_lock);
af12fa6e 4958
52bd2d62
ED
4959 /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
4960 do {
4961 if (unlikely(++napi_gen_id < NR_CPUS + 1))
4962 napi_gen_id = NR_CPUS + 1;
4963 } while (napi_by_id(napi_gen_id));
4964 napi->napi_id = napi_gen_id;
af12fa6e 4965
52bd2d62
ED
4966 hlist_add_head_rcu(&napi->napi_hash_node,
4967 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
af12fa6e 4968
52bd2d62 4969 spin_unlock(&napi_hash_lock);
af12fa6e
ET
4970}
4971EXPORT_SYMBOL_GPL(napi_hash_add);
4972
4973/* Warning : caller is responsible to make sure rcu grace period
4974 * is respected before freeing memory containing @napi
4975 */
34cbe27e 4976bool napi_hash_del(struct napi_struct *napi)
af12fa6e 4977{
34cbe27e
ED
4978 bool rcu_sync_needed = false;
4979
af12fa6e
ET
4980 spin_lock(&napi_hash_lock);
4981
34cbe27e
ED
4982 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
4983 rcu_sync_needed = true;
af12fa6e 4984 hlist_del_rcu(&napi->napi_hash_node);
34cbe27e 4985 }
af12fa6e 4986 spin_unlock(&napi_hash_lock);
34cbe27e 4987 return rcu_sync_needed;
af12fa6e
ET
4988}
4989EXPORT_SYMBOL_GPL(napi_hash_del);
4990
3b47d303
ED
4991static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4992{
4993 struct napi_struct *napi;
4994
4995 napi = container_of(timer, struct napi_struct, timer);
4996 if (napi->gro_list)
4997 napi_schedule(napi);
4998
4999 return HRTIMER_NORESTART;
5000}
5001
d565b0a1
HX
5002void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
5003 int (*poll)(struct napi_struct *, int), int weight)
5004{
5005 INIT_LIST_HEAD(&napi->poll_list);
3b47d303
ED
5006 hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
5007 napi->timer.function = napi_watchdog;
4ae5544f 5008 napi->gro_count = 0;
d565b0a1 5009 napi->gro_list = NULL;
5d38a079 5010 napi->skb = NULL;
d565b0a1 5011 napi->poll = poll;
82dc3c63
ED
5012 if (weight > NAPI_POLL_WEIGHT)
5013 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
5014 weight, dev->name);
d565b0a1
HX
5015 napi->weight = weight;
5016 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 5017 napi->dev = dev;
5d38a079 5018#ifdef CONFIG_NETPOLL
d565b0a1
HX
5019 spin_lock_init(&napi->poll_lock);
5020 napi->poll_owner = -1;
5021#endif
5022 set_bit(NAPI_STATE_SCHED, &napi->state);
93d05d4a 5023 napi_hash_add(napi);
d565b0a1
HX
5024}
5025EXPORT_SYMBOL(netif_napi_add);
5026
3b47d303
ED
5027void napi_disable(struct napi_struct *n)
5028{
5029 might_sleep();
5030 set_bit(NAPI_STATE_DISABLE, &n->state);
5031
5032 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
5033 msleep(1);
2d8bff12
NH
5034 while (test_and_set_bit(NAPI_STATE_NPSVC, &n->state))
5035 msleep(1);
3b47d303
ED
5036
5037 hrtimer_cancel(&n->timer);
5038
5039 clear_bit(NAPI_STATE_DISABLE, &n->state);
5040}
5041EXPORT_SYMBOL(napi_disable);
5042
93d05d4a 5043/* Must be called in process context */
d565b0a1
HX
5044void netif_napi_del(struct napi_struct *napi)
5045{
93d05d4a
ED
5046 might_sleep();
5047 if (napi_hash_del(napi))
5048 synchronize_net();
d7b06636 5049 list_del_init(&napi->dev_list);
76620aaf 5050 napi_free_frags(napi);
d565b0a1 5051
289dccbe 5052 kfree_skb_list(napi->gro_list);
d565b0a1 5053 napi->gro_list = NULL;
4ae5544f 5054 napi->gro_count = 0;
d565b0a1
HX
5055}
5056EXPORT_SYMBOL(netif_napi_del);
5057
726ce70e
HX
5058static int napi_poll(struct napi_struct *n, struct list_head *repoll)
5059{
5060 void *have;
5061 int work, weight;
5062
5063 list_del_init(&n->poll_list);
5064
5065 have = netpoll_poll_lock(n);
5066
5067 weight = n->weight;
5068
5069 /* This NAPI_STATE_SCHED test is for avoiding a race
5070 * with netpoll's poll_napi(). Only the entity which
5071 * obtains the lock and sees NAPI_STATE_SCHED set will
5072 * actually make the ->poll() call. Therefore we avoid
5073 * accidentally calling ->poll() when NAPI is not scheduled.
5074 */
5075 work = 0;
5076 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
5077 work = n->poll(n, weight);
5078 trace_napi_poll(n);
5079 }
5080
5081 WARN_ON_ONCE(work > weight);
5082
5083 if (likely(work < weight))
5084 goto out_unlock;
5085
5086 /* Drivers must not modify the NAPI state if they
5087 * consume the entire weight. In such cases this code
5088 * still "owns" the NAPI instance and therefore can
5089 * move the instance around on the list at-will.
5090 */
5091 if (unlikely(napi_disable_pending(n))) {
5092 napi_complete(n);
5093 goto out_unlock;
5094 }
5095
5096 if (n->gro_list) {
5097 /* flush too old packets
5098 * If HZ < 1000, flush all packets.
5099 */
5100 napi_gro_flush(n, HZ >= 1000);
5101 }
5102
001ce546
HX
5103 /* Some drivers may have called napi_schedule
5104 * prior to exhausting their budget.
5105 */
5106 if (unlikely(!list_empty(&n->poll_list))) {
5107 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
5108 n->dev ? n->dev->name : "backlog");
5109 goto out_unlock;
5110 }
5111
726ce70e
HX
5112 list_add_tail(&n->poll_list, repoll);
5113
5114out_unlock:
5115 netpoll_poll_unlock(have);
5116
5117 return work;
5118}
5119
1da177e4
LT
5120static void net_rx_action(struct softirq_action *h)
5121{
903ceff7 5122 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
24f8b238 5123 unsigned long time_limit = jiffies + 2;
51b0bded 5124 int budget = netdev_budget;
d75b1ade
ED
5125 LIST_HEAD(list);
5126 LIST_HEAD(repoll);
53fb95d3 5127
1da177e4 5128 local_irq_disable();
d75b1ade
ED
5129 list_splice_init(&sd->poll_list, &list);
5130 local_irq_enable();
1da177e4 5131
ceb8d5bf 5132 for (;;) {
bea3348e 5133 struct napi_struct *n;
1da177e4 5134
ceb8d5bf
HX
5135 if (list_empty(&list)) {
5136 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
5137 return;
5138 break;
5139 }
5140
6bd373eb
HX
5141 n = list_first_entry(&list, struct napi_struct, poll_list);
5142 budget -= napi_poll(n, &repoll);
5143
d75b1ade 5144 /* If softirq window is exhausted then punt.
24f8b238
SH
5145 * Allow this to run for 2 jiffies since which will allow
5146 * an average latency of 1.5/HZ.
bea3348e 5147 */
ceb8d5bf
HX
5148 if (unlikely(budget <= 0 ||
5149 time_after_eq(jiffies, time_limit))) {
5150 sd->time_squeeze++;
5151 break;
5152 }
1da177e4 5153 }
d75b1ade 5154
d75b1ade
ED
5155 local_irq_disable();
5156
5157 list_splice_tail_init(&sd->poll_list, &list);
5158 list_splice_tail(&repoll, &list);
5159 list_splice(&list, &sd->poll_list);
5160 if (!list_empty(&sd->poll_list))
5161 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
5162
e326bed2 5163 net_rps_action_and_irq_enable(sd);
1da177e4
LT
5164}
5165
aa9d8560 5166struct netdev_adjacent {
9ff162a8 5167 struct net_device *dev;
5d261913
VF
5168
5169 /* upper master flag, there can only be one master device per list */
9ff162a8 5170 bool master;
5d261913 5171
5d261913
VF
5172 /* counter for the number of times this device was added to us */
5173 u16 ref_nr;
5174
402dae96
VF
5175 /* private field for the users */
5176 void *private;
5177
9ff162a8
JP
5178 struct list_head list;
5179 struct rcu_head rcu;
9ff162a8
JP
5180};
5181
6ea29da1 5182static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
2f268f12 5183 struct list_head *adj_list)
9ff162a8 5184{
5d261913 5185 struct netdev_adjacent *adj;
5d261913 5186
2f268f12 5187 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
5188 if (adj->dev == adj_dev)
5189 return adj;
9ff162a8
JP
5190 }
5191 return NULL;
5192}
5193
5194/**
5195 * netdev_has_upper_dev - Check if device is linked to an upper device
5196 * @dev: device
5197 * @upper_dev: upper device to check
5198 *
5199 * Find out if a device is linked to specified upper device and return true
5200 * in case it is. Note that this checks only immediate upper device,
5201 * not through a complete stack of devices. The caller must hold the RTNL lock.
5202 */
5203bool netdev_has_upper_dev(struct net_device *dev,
5204 struct net_device *upper_dev)
5205{
5206 ASSERT_RTNL();
5207
6ea29da1 5208 return __netdev_find_adj(upper_dev, &dev->all_adj_list.upper);
9ff162a8
JP
5209}
5210EXPORT_SYMBOL(netdev_has_upper_dev);
5211
5212/**
5213 * netdev_has_any_upper_dev - Check if device is linked to some device
5214 * @dev: device
5215 *
5216 * Find out if a device is linked to an upper device and return true in case
5217 * it is. The caller must hold the RTNL lock.
5218 */
1d143d9f 5219static bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
5220{
5221 ASSERT_RTNL();
5222
2f268f12 5223 return !list_empty(&dev->all_adj_list.upper);
9ff162a8 5224}
9ff162a8
JP
5225
5226/**
5227 * netdev_master_upper_dev_get - Get master upper device
5228 * @dev: device
5229 *
5230 * Find a master upper device and return pointer to it or NULL in case
5231 * it's not there. The caller must hold the RTNL lock.
5232 */
5233struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
5234{
aa9d8560 5235 struct netdev_adjacent *upper;
9ff162a8
JP
5236
5237 ASSERT_RTNL();
5238
2f268f12 5239 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
5240 return NULL;
5241
2f268f12 5242 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 5243 struct netdev_adjacent, list);
9ff162a8
JP
5244 if (likely(upper->master))
5245 return upper->dev;
5246 return NULL;
5247}
5248EXPORT_SYMBOL(netdev_master_upper_dev_get);
5249
b6ccba4c
VF
5250void *netdev_adjacent_get_private(struct list_head *adj_list)
5251{
5252 struct netdev_adjacent *adj;
5253
5254 adj = list_entry(adj_list, struct netdev_adjacent, list);
5255
5256 return adj->private;
5257}
5258EXPORT_SYMBOL(netdev_adjacent_get_private);
5259
44a40855
VY
5260/**
5261 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
5262 * @dev: device
5263 * @iter: list_head ** of the current position
5264 *
5265 * Gets the next device from the dev's upper list, starting from iter
5266 * position. The caller must hold RCU read lock.
5267 */
5268struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
5269 struct list_head **iter)
5270{
5271 struct netdev_adjacent *upper;
5272
5273 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
5274
5275 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5276
5277 if (&upper->list == &dev->adj_list.upper)
5278 return NULL;
5279
5280 *iter = &upper->list;
5281
5282 return upper->dev;
5283}
5284EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
5285
31088a11
VF
5286/**
5287 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
48311f46
VF
5288 * @dev: device
5289 * @iter: list_head ** of the current position
5290 *
5291 * Gets the next device from the dev's upper list, starting from iter
5292 * position. The caller must hold RCU read lock.
5293 */
2f268f12
VF
5294struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
5295 struct list_head **iter)
48311f46
VF
5296{
5297 struct netdev_adjacent *upper;
5298
85328240 5299 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
48311f46
VF
5300
5301 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5302
2f268f12 5303 if (&upper->list == &dev->all_adj_list.upper)
48311f46
VF
5304 return NULL;
5305
5306 *iter = &upper->list;
5307
5308 return upper->dev;
5309}
2f268f12 5310EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
48311f46 5311
31088a11
VF
5312/**
5313 * netdev_lower_get_next_private - Get the next ->private from the
5314 * lower neighbour list
5315 * @dev: device
5316 * @iter: list_head ** of the current position
5317 *
5318 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5319 * list, starting from iter position. The caller must hold either hold the
5320 * RTNL lock or its own locking that guarantees that the neighbour lower
b469139e 5321 * list will remain unchanged.
31088a11
VF
5322 */
5323void *netdev_lower_get_next_private(struct net_device *dev,
5324 struct list_head **iter)
5325{
5326 struct netdev_adjacent *lower;
5327
5328 lower = list_entry(*iter, struct netdev_adjacent, list);
5329
5330 if (&lower->list == &dev->adj_list.lower)
5331 return NULL;
5332
6859e7df 5333 *iter = lower->list.next;
31088a11
VF
5334
5335 return lower->private;
5336}
5337EXPORT_SYMBOL(netdev_lower_get_next_private);
5338
5339/**
5340 * netdev_lower_get_next_private_rcu - Get the next ->private from the
5341 * lower neighbour list, RCU
5342 * variant
5343 * @dev: device
5344 * @iter: list_head ** of the current position
5345 *
5346 * Gets the next netdev_adjacent->private from the dev's lower neighbour
5347 * list, starting from iter position. The caller must hold RCU read lock.
5348 */
5349void *netdev_lower_get_next_private_rcu(struct net_device *dev,
5350 struct list_head **iter)
5351{
5352 struct netdev_adjacent *lower;
5353
5354 WARN_ON_ONCE(!rcu_read_lock_held());
5355
5356 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
5357
5358 if (&lower->list == &dev->adj_list.lower)
5359 return NULL;
5360
6859e7df 5361 *iter = &lower->list;
31088a11
VF
5362
5363 return lower->private;
5364}
5365EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
5366
4085ebe8
VY
5367/**
5368 * netdev_lower_get_next - Get the next device from the lower neighbour
5369 * list
5370 * @dev: device
5371 * @iter: list_head ** of the current position
5372 *
5373 * Gets the next netdev_adjacent from the dev's lower neighbour
5374 * list, starting from iter position. The caller must hold RTNL lock or
5375 * its own locking that guarantees that the neighbour lower
b469139e 5376 * list will remain unchanged.
4085ebe8
VY
5377 */
5378void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
5379{
5380 struct netdev_adjacent *lower;
5381
5382 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
5383
5384 if (&lower->list == &dev->adj_list.lower)
5385 return NULL;
5386
5387 *iter = &lower->list;
5388
5389 return lower->dev;
5390}
5391EXPORT_SYMBOL(netdev_lower_get_next);
5392
e001bfad 5393/**
5394 * netdev_lower_get_first_private_rcu - Get the first ->private from the
5395 * lower neighbour list, RCU
5396 * variant
5397 * @dev: device
5398 *
5399 * Gets the first netdev_adjacent->private from the dev's lower neighbour
5400 * list. The caller must hold RCU read lock.
5401 */
5402void *netdev_lower_get_first_private_rcu(struct net_device *dev)
5403{
5404 struct netdev_adjacent *lower;
5405
5406 lower = list_first_or_null_rcu(&dev->adj_list.lower,
5407 struct netdev_adjacent, list);
5408 if (lower)
5409 return lower->private;
5410 return NULL;
5411}
5412EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
5413
9ff162a8
JP
5414/**
5415 * netdev_master_upper_dev_get_rcu - Get master upper device
5416 * @dev: device
5417 *
5418 * Find a master upper device and return pointer to it or NULL in case
5419 * it's not there. The caller must hold the RCU read lock.
5420 */
5421struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
5422{
aa9d8560 5423 struct netdev_adjacent *upper;
9ff162a8 5424
2f268f12 5425 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 5426 struct netdev_adjacent, list);
9ff162a8
JP
5427 if (upper && likely(upper->master))
5428 return upper->dev;
5429 return NULL;
5430}
5431EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
5432
0a59f3a9 5433static int netdev_adjacent_sysfs_add(struct net_device *dev,
3ee32707
VF
5434 struct net_device *adj_dev,
5435 struct list_head *dev_list)
5436{
5437 char linkname[IFNAMSIZ+7];
5438 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5439 "upper_%s" : "lower_%s", adj_dev->name);
5440 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
5441 linkname);
5442}
0a59f3a9 5443static void netdev_adjacent_sysfs_del(struct net_device *dev,
3ee32707
VF
5444 char *name,
5445 struct list_head *dev_list)
5446{
5447 char linkname[IFNAMSIZ+7];
5448 sprintf(linkname, dev_list == &dev->adj_list.upper ?
5449 "upper_%s" : "lower_%s", name);
5450 sysfs_remove_link(&(dev->dev.kobj), linkname);
5451}
5452
7ce64c79
AF
5453static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5454 struct net_device *adj_dev,
5455 struct list_head *dev_list)
5456{
5457 return (dev_list == &dev->adj_list.upper ||
5458 dev_list == &dev->adj_list.lower) &&
5459 net_eq(dev_net(dev), dev_net(adj_dev));
5460}
3ee32707 5461
5d261913
VF
5462static int __netdev_adjacent_dev_insert(struct net_device *dev,
5463 struct net_device *adj_dev,
7863c054 5464 struct list_head *dev_list,
402dae96 5465 void *private, bool master)
5d261913
VF
5466{
5467 struct netdev_adjacent *adj;
842d67a7 5468 int ret;
5d261913 5469
6ea29da1 5470 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913
VF
5471
5472 if (adj) {
5d261913
VF
5473 adj->ref_nr++;
5474 return 0;
5475 }
5476
5477 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5478 if (!adj)
5479 return -ENOMEM;
5480
5481 adj->dev = adj_dev;
5482 adj->master = master;
5d261913 5483 adj->ref_nr = 1;
402dae96 5484 adj->private = private;
5d261913 5485 dev_hold(adj_dev);
2f268f12
VF
5486
5487 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5488 adj_dev->name, dev->name, adj_dev->name);
5d261913 5489
7ce64c79 5490 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
3ee32707 5491 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5831d66e
VF
5492 if (ret)
5493 goto free_adj;
5494 }
5495
7863c054 5496 /* Ensure that master link is always the first item in list. */
842d67a7
VF
5497 if (master) {
5498 ret = sysfs_create_link(&(dev->dev.kobj),
5499 &(adj_dev->dev.kobj), "master");
5500 if (ret)
5831d66e 5501 goto remove_symlinks;
842d67a7 5502
7863c054 5503 list_add_rcu(&adj->list, dev_list);
842d67a7 5504 } else {
7863c054 5505 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 5506 }
5d261913
VF
5507
5508 return 0;
842d67a7 5509
5831d66e 5510remove_symlinks:
7ce64c79 5511 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 5512 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
842d67a7
VF
5513free_adj:
5514 kfree(adj);
974daef7 5515 dev_put(adj_dev);
842d67a7
VF
5516
5517 return ret;
5d261913
VF
5518}
5519
1d143d9f 5520static void __netdev_adjacent_dev_remove(struct net_device *dev,
5521 struct net_device *adj_dev,
5522 struct list_head *dev_list)
5d261913
VF
5523{
5524 struct netdev_adjacent *adj;
5525
6ea29da1 5526 adj = __netdev_find_adj(adj_dev, dev_list);
5d261913 5527
2f268f12
VF
5528 if (!adj) {
5529 pr_err("tried to remove device %s from %s\n",
5530 dev->name, adj_dev->name);
5d261913 5531 BUG();
2f268f12 5532 }
5d261913
VF
5533
5534 if (adj->ref_nr > 1) {
2f268f12
VF
5535 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5536 adj->ref_nr-1);
5d261913
VF
5537 adj->ref_nr--;
5538 return;
5539 }
5540
842d67a7
VF
5541 if (adj->master)
5542 sysfs_remove_link(&(dev->dev.kobj), "master");
5543
7ce64c79 5544 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 5545 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5831d66e 5546
5d261913 5547 list_del_rcu(&adj->list);
2f268f12
VF
5548 pr_debug("dev_put for %s, because link removed from %s to %s\n",
5549 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
5550 dev_put(adj_dev);
5551 kfree_rcu(adj, rcu);
5552}
5553
1d143d9f 5554static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5555 struct net_device *upper_dev,
5556 struct list_head *up_list,
5557 struct list_head *down_list,
5558 void *private, bool master)
5d261913
VF
5559{
5560 int ret;
5561
402dae96
VF
5562 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5563 master);
5d261913
VF
5564 if (ret)
5565 return ret;
5566
402dae96
VF
5567 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5568 false);
5d261913 5569 if (ret) {
2f268f12 5570 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5d261913
VF
5571 return ret;
5572 }
5573
5574 return 0;
5575}
5576
1d143d9f 5577static int __netdev_adjacent_dev_link(struct net_device *dev,
5578 struct net_device *upper_dev)
5d261913 5579{
2f268f12
VF
5580 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5581 &dev->all_adj_list.upper,
5582 &upper_dev->all_adj_list.lower,
402dae96 5583 NULL, false);
5d261913
VF
5584}
5585
1d143d9f 5586static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5587 struct net_device *upper_dev,
5588 struct list_head *up_list,
5589 struct list_head *down_list)
5d261913 5590{
2f268f12
VF
5591 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5592 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5d261913
VF
5593}
5594
1d143d9f 5595static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5596 struct net_device *upper_dev)
5d261913 5597{
2f268f12
VF
5598 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5599 &dev->all_adj_list.upper,
5600 &upper_dev->all_adj_list.lower);
5601}
5602
1d143d9f 5603static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5604 struct net_device *upper_dev,
5605 void *private, bool master)
2f268f12
VF
5606{
5607 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5608
5609 if (ret)
5610 return ret;
5611
5612 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5613 &dev->adj_list.upper,
5614 &upper_dev->adj_list.lower,
402dae96 5615 private, master);
2f268f12
VF
5616 if (ret) {
5617 __netdev_adjacent_dev_unlink(dev, upper_dev);
5618 return ret;
5619 }
5620
5621 return 0;
5d261913
VF
5622}
5623
1d143d9f 5624static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5625 struct net_device *upper_dev)
2f268f12
VF
5626{
5627 __netdev_adjacent_dev_unlink(dev, upper_dev);
5628 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5629 &dev->adj_list.upper,
5630 &upper_dev->adj_list.lower);
5631}
5d261913 5632
9ff162a8 5633static int __netdev_upper_dev_link(struct net_device *dev,
402dae96 5634 struct net_device *upper_dev, bool master,
29bf24af 5635 void *upper_priv, void *upper_info)
9ff162a8 5636{
0e4ead9d 5637 struct netdev_notifier_changeupper_info changeupper_info;
5d261913
VF
5638 struct netdev_adjacent *i, *j, *to_i, *to_j;
5639 int ret = 0;
9ff162a8
JP
5640
5641 ASSERT_RTNL();
5642
5643 if (dev == upper_dev)
5644 return -EBUSY;
5645
5646 /* To prevent loops, check if dev is not upper device to upper_dev. */
6ea29da1 5647 if (__netdev_find_adj(dev, &upper_dev->all_adj_list.upper))
9ff162a8
JP
5648 return -EBUSY;
5649
6ea29da1 5650 if (__netdev_find_adj(upper_dev, &dev->adj_list.upper))
9ff162a8
JP
5651 return -EEXIST;
5652
5653 if (master && netdev_master_upper_dev_get(dev))
5654 return -EBUSY;
5655
0e4ead9d
JP
5656 changeupper_info.upper_dev = upper_dev;
5657 changeupper_info.master = master;
5658 changeupper_info.linking = true;
29bf24af 5659 changeupper_info.upper_info = upper_info;
0e4ead9d 5660
573c7ba0
JP
5661 ret = call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5662 &changeupper_info.info);
5663 ret = notifier_to_errno(ret);
5664 if (ret)
5665 return ret;
5666
6dffb044 5667 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, upper_priv,
402dae96 5668 master);
5d261913
VF
5669 if (ret)
5670 return ret;
9ff162a8 5671
5d261913 5672 /* Now that we linked these devs, make all the upper_dev's
2f268f12 5673 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5d261913
VF
5674 * versa, and don't forget the devices itself. All of these
5675 * links are non-neighbours.
5676 */
2f268f12
VF
5677 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5678 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5679 pr_debug("Interlinking %s with %s, non-neighbour\n",
5680 i->dev->name, j->dev->name);
5d261913
VF
5681 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5682 if (ret)
5683 goto rollback_mesh;
5684 }
5685 }
5686
5687 /* add dev to every upper_dev's upper device */
2f268f12
VF
5688 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5689 pr_debug("linking %s's upper device %s with %s\n",
5690 upper_dev->name, i->dev->name, dev->name);
5d261913
VF
5691 ret = __netdev_adjacent_dev_link(dev, i->dev);
5692 if (ret)
5693 goto rollback_upper_mesh;
5694 }
5695
5696 /* add upper_dev to every dev's lower device */
2f268f12
VF
5697 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5698 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5699 i->dev->name, upper_dev->name);
5d261913
VF
5700 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5701 if (ret)
5702 goto rollback_lower_mesh;
5703 }
9ff162a8 5704
b03804e7
IS
5705 ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5706 &changeupper_info.info);
5707 ret = notifier_to_errno(ret);
5708 if (ret)
5709 goto rollback_lower_mesh;
5710
9ff162a8 5711 return 0;
5d261913
VF
5712
5713rollback_lower_mesh:
5714 to_i = i;
2f268f12 5715 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5d261913
VF
5716 if (i == to_i)
5717 break;
5718 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5719 }
5720
5721 i = NULL;
5722
5723rollback_upper_mesh:
5724 to_i = i;
2f268f12 5725 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5726 if (i == to_i)
5727 break;
5728 __netdev_adjacent_dev_unlink(dev, i->dev);
5729 }
5730
5731 i = j = NULL;
5732
5733rollback_mesh:
5734 to_i = i;
5735 to_j = j;
2f268f12
VF
5736 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5737 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5738 if (i == to_i && j == to_j)
5739 break;
5740 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5741 }
5742 if (i == to_i)
5743 break;
5744 }
5745
2f268f12 5746 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5747
5748 return ret;
9ff162a8
JP
5749}
5750
5751/**
5752 * netdev_upper_dev_link - Add a link to the upper device
5753 * @dev: device
5754 * @upper_dev: new upper device
5755 *
5756 * Adds a link to device which is upper to this one. The caller must hold
5757 * the RTNL lock. On a failure a negative errno code is returned.
5758 * On success the reference counts are adjusted and the function
5759 * returns zero.
5760 */
5761int netdev_upper_dev_link(struct net_device *dev,
5762 struct net_device *upper_dev)
5763{
29bf24af 5764 return __netdev_upper_dev_link(dev, upper_dev, false, NULL, NULL);
9ff162a8
JP
5765}
5766EXPORT_SYMBOL(netdev_upper_dev_link);
5767
5768/**
5769 * netdev_master_upper_dev_link - Add a master link to the upper device
5770 * @dev: device
5771 * @upper_dev: new upper device
6dffb044 5772 * @upper_priv: upper device private
29bf24af 5773 * @upper_info: upper info to be passed down via notifier
9ff162a8
JP
5774 *
5775 * Adds a link to device which is upper to this one. In this case, only
5776 * one master upper device can be linked, although other non-master devices
5777 * might be linked as well. The caller must hold the RTNL lock.
5778 * On a failure a negative errno code is returned. On success the reference
5779 * counts are adjusted and the function returns zero.
5780 */
5781int netdev_master_upper_dev_link(struct net_device *dev,
6dffb044 5782 struct net_device *upper_dev,
29bf24af 5783 void *upper_priv, void *upper_info)
9ff162a8 5784{
29bf24af
JP
5785 return __netdev_upper_dev_link(dev, upper_dev, true,
5786 upper_priv, upper_info);
9ff162a8
JP
5787}
5788EXPORT_SYMBOL(netdev_master_upper_dev_link);
5789
5790/**
5791 * netdev_upper_dev_unlink - Removes a link to upper device
5792 * @dev: device
5793 * @upper_dev: new upper device
5794 *
5795 * Removes a link to device which is upper to this one. The caller must hold
5796 * the RTNL lock.
5797 */
5798void netdev_upper_dev_unlink(struct net_device *dev,
5799 struct net_device *upper_dev)
5800{
0e4ead9d 5801 struct netdev_notifier_changeupper_info changeupper_info;
5d261913 5802 struct netdev_adjacent *i, *j;
9ff162a8
JP
5803 ASSERT_RTNL();
5804
0e4ead9d
JP
5805 changeupper_info.upper_dev = upper_dev;
5806 changeupper_info.master = netdev_master_upper_dev_get(dev) == upper_dev;
5807 changeupper_info.linking = false;
5808
573c7ba0
JP
5809 call_netdevice_notifiers_info(NETDEV_PRECHANGEUPPER, dev,
5810 &changeupper_info.info);
5811
2f268f12 5812 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5813
5814 /* Here is the tricky part. We must remove all dev's lower
5815 * devices from all upper_dev's upper devices and vice
5816 * versa, to maintain the graph relationship.
5817 */
2f268f12
VF
5818 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5819 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5820 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5821
5822 /* remove also the devices itself from lower/upper device
5823 * list
5824 */
2f268f12 5825 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5d261913
VF
5826 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5827
2f268f12 5828 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5829 __netdev_adjacent_dev_unlink(dev, i->dev);
5830
0e4ead9d
JP
5831 call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
5832 &changeupper_info.info);
9ff162a8
JP
5833}
5834EXPORT_SYMBOL(netdev_upper_dev_unlink);
5835
61bd3857
MS
5836/**
5837 * netdev_bonding_info_change - Dispatch event about slave change
5838 * @dev: device
4a26e453 5839 * @bonding_info: info to dispatch
61bd3857
MS
5840 *
5841 * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5842 * The caller must hold the RTNL lock.
5843 */
5844void netdev_bonding_info_change(struct net_device *dev,
5845 struct netdev_bonding_info *bonding_info)
5846{
5847 struct netdev_notifier_bonding_info info;
5848
5849 memcpy(&info.bonding_info, bonding_info,
5850 sizeof(struct netdev_bonding_info));
5851 call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5852 &info.info);
5853}
5854EXPORT_SYMBOL(netdev_bonding_info_change);
5855
2ce1ee17 5856static void netdev_adjacent_add_links(struct net_device *dev)
4c75431a
AF
5857{
5858 struct netdev_adjacent *iter;
5859
5860 struct net *net = dev_net(dev);
5861
5862 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5863 if (!net_eq(net,dev_net(iter->dev)))
5864 continue;
5865 netdev_adjacent_sysfs_add(iter->dev, dev,
5866 &iter->dev->adj_list.lower);
5867 netdev_adjacent_sysfs_add(dev, iter->dev,
5868 &dev->adj_list.upper);
5869 }
5870
5871 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5872 if (!net_eq(net,dev_net(iter->dev)))
5873 continue;
5874 netdev_adjacent_sysfs_add(iter->dev, dev,
5875 &iter->dev->adj_list.upper);
5876 netdev_adjacent_sysfs_add(dev, iter->dev,
5877 &dev->adj_list.lower);
5878 }
5879}
5880
2ce1ee17 5881static void netdev_adjacent_del_links(struct net_device *dev)
4c75431a
AF
5882{
5883 struct netdev_adjacent *iter;
5884
5885 struct net *net = dev_net(dev);
5886
5887 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5888 if (!net_eq(net,dev_net(iter->dev)))
5889 continue;
5890 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5891 &iter->dev->adj_list.lower);
5892 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5893 &dev->adj_list.upper);
5894 }
5895
5896 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5897 if (!net_eq(net,dev_net(iter->dev)))
5898 continue;
5899 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5900 &iter->dev->adj_list.upper);
5901 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5902 &dev->adj_list.lower);
5903 }
5904}
5905
5bb025fa 5906void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
402dae96 5907{
5bb025fa 5908 struct netdev_adjacent *iter;
402dae96 5909
4c75431a
AF
5910 struct net *net = dev_net(dev);
5911
5bb025fa 5912 list_for_each_entry(iter, &dev->adj_list.upper, list) {
4c75431a
AF
5913 if (!net_eq(net,dev_net(iter->dev)))
5914 continue;
5bb025fa
VF
5915 netdev_adjacent_sysfs_del(iter->dev, oldname,
5916 &iter->dev->adj_list.lower);
5917 netdev_adjacent_sysfs_add(iter->dev, dev,
5918 &iter->dev->adj_list.lower);
5919 }
402dae96 5920
5bb025fa 5921 list_for_each_entry(iter, &dev->adj_list.lower, list) {
4c75431a
AF
5922 if (!net_eq(net,dev_net(iter->dev)))
5923 continue;
5bb025fa
VF
5924 netdev_adjacent_sysfs_del(iter->dev, oldname,
5925 &iter->dev->adj_list.upper);
5926 netdev_adjacent_sysfs_add(iter->dev, dev,
5927 &iter->dev->adj_list.upper);
5928 }
402dae96 5929}
402dae96
VF
5930
5931void *netdev_lower_dev_get_private(struct net_device *dev,
5932 struct net_device *lower_dev)
5933{
5934 struct netdev_adjacent *lower;
5935
5936 if (!lower_dev)
5937 return NULL;
6ea29da1 5938 lower = __netdev_find_adj(lower_dev, &dev->adj_list.lower);
402dae96
VF
5939 if (!lower)
5940 return NULL;
5941
5942 return lower->private;
5943}
5944EXPORT_SYMBOL(netdev_lower_dev_get_private);
5945
4085ebe8
VY
5946
5947int dev_get_nest_level(struct net_device *dev,
b618aaa9 5948 bool (*type_check)(const struct net_device *dev))
4085ebe8
VY
5949{
5950 struct net_device *lower = NULL;
5951 struct list_head *iter;
5952 int max_nest = -1;
5953 int nest;
5954
5955 ASSERT_RTNL();
5956
5957 netdev_for_each_lower_dev(dev, lower, iter) {
5958 nest = dev_get_nest_level(lower, type_check);
5959 if (max_nest < nest)
5960 max_nest = nest;
5961 }
5962
5963 if (type_check(dev))
5964 max_nest++;
5965
5966 return max_nest;
5967}
5968EXPORT_SYMBOL(dev_get_nest_level);
5969
04d48266
JP
5970/**
5971 * netdev_lower_change - Dispatch event about lower device state change
5972 * @lower_dev: device
5973 * @lower_state_info: state to dispatch
5974 *
5975 * Send NETDEV_CHANGELOWERSTATE to netdev notifiers with info.
5976 * The caller must hold the RTNL lock.
5977 */
5978void netdev_lower_state_changed(struct net_device *lower_dev,
5979 void *lower_state_info)
5980{
5981 struct netdev_notifier_changelowerstate_info changelowerstate_info;
5982
5983 ASSERT_RTNL();
5984 changelowerstate_info.lower_state_info = lower_state_info;
5985 call_netdevice_notifiers_info(NETDEV_CHANGELOWERSTATE, lower_dev,
5986 &changelowerstate_info.info);
5987}
5988EXPORT_SYMBOL(netdev_lower_state_changed);
5989
b6c40d68
PM
5990static void dev_change_rx_flags(struct net_device *dev, int flags)
5991{
d314774c
SH
5992 const struct net_device_ops *ops = dev->netdev_ops;
5993
d2615bf4 5994 if (ops->ndo_change_rx_flags)
d314774c 5995 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
5996}
5997
991fb3f7 5998static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 5999{
b536db93 6000 unsigned int old_flags = dev->flags;
d04a48b0
EB
6001 kuid_t uid;
6002 kgid_t gid;
1da177e4 6003
24023451
PM
6004 ASSERT_RTNL();
6005
dad9b335
WC
6006 dev->flags |= IFF_PROMISC;
6007 dev->promiscuity += inc;
6008 if (dev->promiscuity == 0) {
6009 /*
6010 * Avoid overflow.
6011 * If inc causes overflow, untouch promisc and return error.
6012 */
6013 if (inc < 0)
6014 dev->flags &= ~IFF_PROMISC;
6015 else {
6016 dev->promiscuity -= inc;
7b6cd1ce
JP
6017 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
6018 dev->name);
dad9b335
WC
6019 return -EOVERFLOW;
6020 }
6021 }
52609c0b 6022 if (dev->flags != old_flags) {
7b6cd1ce
JP
6023 pr_info("device %s %s promiscuous mode\n",
6024 dev->name,
6025 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
6026 if (audit_enabled) {
6027 current_uid_gid(&uid, &gid);
7759db82
KHK
6028 audit_log(current->audit_context, GFP_ATOMIC,
6029 AUDIT_ANOM_PROMISCUOUS,
6030 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
6031 dev->name, (dev->flags & IFF_PROMISC),
6032 (old_flags & IFF_PROMISC),
e1760bd5 6033 from_kuid(&init_user_ns, audit_get_loginuid(current)),
d04a48b0
EB
6034 from_kuid(&init_user_ns, uid),
6035 from_kgid(&init_user_ns, gid),
7759db82 6036 audit_get_sessionid(current));
8192b0c4 6037 }
24023451 6038
b6c40d68 6039 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 6040 }
991fb3f7
ND
6041 if (notify)
6042 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 6043 return 0;
1da177e4
LT
6044}
6045
4417da66
PM
6046/**
6047 * dev_set_promiscuity - update promiscuity count on a device
6048 * @dev: device
6049 * @inc: modifier
6050 *
6051 * Add or remove promiscuity from a device. While the count in the device
6052 * remains above zero the interface remains promiscuous. Once it hits zero
6053 * the device reverts back to normal filtering operation. A negative inc
6054 * value is used to drop promiscuity on the device.
dad9b335 6055 * Return 0 if successful or a negative errno code on error.
4417da66 6056 */
dad9b335 6057int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 6058{
b536db93 6059 unsigned int old_flags = dev->flags;
dad9b335 6060 int err;
4417da66 6061
991fb3f7 6062 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 6063 if (err < 0)
dad9b335 6064 return err;
4417da66
PM
6065 if (dev->flags != old_flags)
6066 dev_set_rx_mode(dev);
dad9b335 6067 return err;
4417da66 6068}
d1b19dff 6069EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 6070
991fb3f7 6071static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 6072{
991fb3f7 6073 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 6074
24023451
PM
6075 ASSERT_RTNL();
6076
1da177e4 6077 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
6078 dev->allmulti += inc;
6079 if (dev->allmulti == 0) {
6080 /*
6081 * Avoid overflow.
6082 * If inc causes overflow, untouch allmulti and return error.
6083 */
6084 if (inc < 0)
6085 dev->flags &= ~IFF_ALLMULTI;
6086 else {
6087 dev->allmulti -= inc;
7b6cd1ce
JP
6088 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
6089 dev->name);
dad9b335
WC
6090 return -EOVERFLOW;
6091 }
6092 }
24023451 6093 if (dev->flags ^ old_flags) {
b6c40d68 6094 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 6095 dev_set_rx_mode(dev);
991fb3f7
ND
6096 if (notify)
6097 __dev_notify_flags(dev, old_flags,
6098 dev->gflags ^ old_gflags);
24023451 6099 }
dad9b335 6100 return 0;
4417da66 6101}
991fb3f7
ND
6102
6103/**
6104 * dev_set_allmulti - update allmulti count on a device
6105 * @dev: device
6106 * @inc: modifier
6107 *
6108 * Add or remove reception of all multicast frames to a device. While the
6109 * count in the device remains above zero the interface remains listening
6110 * to all interfaces. Once it hits zero the device reverts back to normal
6111 * filtering operation. A negative @inc value is used to drop the counter
6112 * when releasing a resource needing all multicasts.
6113 * Return 0 if successful or a negative errno code on error.
6114 */
6115
6116int dev_set_allmulti(struct net_device *dev, int inc)
6117{
6118 return __dev_set_allmulti(dev, inc, true);
6119}
d1b19dff 6120EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
6121
6122/*
6123 * Upload unicast and multicast address lists to device and
6124 * configure RX filtering. When the device doesn't support unicast
53ccaae1 6125 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
6126 * are present.
6127 */
6128void __dev_set_rx_mode(struct net_device *dev)
6129{
d314774c
SH
6130 const struct net_device_ops *ops = dev->netdev_ops;
6131
4417da66
PM
6132 /* dev_open will call this function so the list will stay sane. */
6133 if (!(dev->flags&IFF_UP))
6134 return;
6135
6136 if (!netif_device_present(dev))
40b77c94 6137 return;
4417da66 6138
01789349 6139 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
6140 /* Unicast addresses changes may only happen under the rtnl,
6141 * therefore calling __dev_set_promiscuity here is safe.
6142 */
32e7bfc4 6143 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 6144 __dev_set_promiscuity(dev, 1, false);
2d348d1f 6145 dev->uc_promisc = true;
32e7bfc4 6146 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 6147 __dev_set_promiscuity(dev, -1, false);
2d348d1f 6148 dev->uc_promisc = false;
4417da66 6149 }
4417da66 6150 }
01789349
JP
6151
6152 if (ops->ndo_set_rx_mode)
6153 ops->ndo_set_rx_mode(dev);
4417da66
PM
6154}
6155
6156void dev_set_rx_mode(struct net_device *dev)
6157{
b9e40857 6158 netif_addr_lock_bh(dev);
4417da66 6159 __dev_set_rx_mode(dev);
b9e40857 6160 netif_addr_unlock_bh(dev);
1da177e4
LT
6161}
6162
f0db275a
SH
6163/**
6164 * dev_get_flags - get flags reported to userspace
6165 * @dev: device
6166 *
6167 * Get the combination of flag bits exported through APIs to userspace.
6168 */
95c96174 6169unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 6170{
95c96174 6171 unsigned int flags;
1da177e4
LT
6172
6173 flags = (dev->flags & ~(IFF_PROMISC |
6174 IFF_ALLMULTI |
b00055aa
SR
6175 IFF_RUNNING |
6176 IFF_LOWER_UP |
6177 IFF_DORMANT)) |
1da177e4
LT
6178 (dev->gflags & (IFF_PROMISC |
6179 IFF_ALLMULTI));
6180
b00055aa
SR
6181 if (netif_running(dev)) {
6182 if (netif_oper_up(dev))
6183 flags |= IFF_RUNNING;
6184 if (netif_carrier_ok(dev))
6185 flags |= IFF_LOWER_UP;
6186 if (netif_dormant(dev))
6187 flags |= IFF_DORMANT;
6188 }
1da177e4
LT
6189
6190 return flags;
6191}
d1b19dff 6192EXPORT_SYMBOL(dev_get_flags);
1da177e4 6193
bd380811 6194int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 6195{
b536db93 6196 unsigned int old_flags = dev->flags;
bd380811 6197 int ret;
1da177e4 6198
24023451
PM
6199 ASSERT_RTNL();
6200
1da177e4
LT
6201 /*
6202 * Set the flags on our device.
6203 */
6204
6205 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
6206 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
6207 IFF_AUTOMEDIA)) |
6208 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
6209 IFF_ALLMULTI));
6210
6211 /*
6212 * Load in the correct multicast list now the flags have changed.
6213 */
6214
b6c40d68
PM
6215 if ((old_flags ^ flags) & IFF_MULTICAST)
6216 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 6217
4417da66 6218 dev_set_rx_mode(dev);
1da177e4
LT
6219
6220 /*
6221 * Have we downed the interface. We handle IFF_UP ourselves
6222 * according to user attempts to set it, rather than blindly
6223 * setting it.
6224 */
6225
6226 ret = 0;
d215d10f 6227 if ((old_flags ^ flags) & IFF_UP)
bd380811 6228 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4 6229
1da177e4 6230 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 6231 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 6232 unsigned int old_flags = dev->flags;
d1b19dff 6233
1da177e4 6234 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
6235
6236 if (__dev_set_promiscuity(dev, inc, false) >= 0)
6237 if (dev->flags != old_flags)
6238 dev_set_rx_mode(dev);
1da177e4
LT
6239 }
6240
6241 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
6242 is important. Some (broken) drivers set IFF_PROMISC, when
6243 IFF_ALLMULTI is requested not asking us and not reporting.
6244 */
6245 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
6246 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
6247
1da177e4 6248 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 6249 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
6250 }
6251
bd380811
PM
6252 return ret;
6253}
6254
a528c219
ND
6255void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
6256 unsigned int gchanges)
bd380811
PM
6257{
6258 unsigned int changes = dev->flags ^ old_flags;
6259
a528c219 6260 if (gchanges)
7f294054 6261 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 6262
bd380811
PM
6263 if (changes & IFF_UP) {
6264 if (dev->flags & IFF_UP)
6265 call_netdevice_notifiers(NETDEV_UP, dev);
6266 else
6267 call_netdevice_notifiers(NETDEV_DOWN, dev);
6268 }
6269
6270 if (dev->flags & IFF_UP &&
be9efd36
JP
6271 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
6272 struct netdev_notifier_change_info change_info;
6273
6274 change_info.flags_changed = changes;
6275 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
6276 &change_info.info);
6277 }
bd380811
PM
6278}
6279
6280/**
6281 * dev_change_flags - change device settings
6282 * @dev: device
6283 * @flags: device state flags
6284 *
6285 * Change settings on device based state flags. The flags are
6286 * in the userspace exported format.
6287 */
b536db93 6288int dev_change_flags(struct net_device *dev, unsigned int flags)
bd380811 6289{
b536db93 6290 int ret;
991fb3f7 6291 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811
PM
6292
6293 ret = __dev_change_flags(dev, flags);
6294 if (ret < 0)
6295 return ret;
6296
991fb3f7 6297 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 6298 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
6299 return ret;
6300}
d1b19dff 6301EXPORT_SYMBOL(dev_change_flags);
1da177e4 6302
2315dc91
VF
6303static int __dev_set_mtu(struct net_device *dev, int new_mtu)
6304{
6305 const struct net_device_ops *ops = dev->netdev_ops;
6306
6307 if (ops->ndo_change_mtu)
6308 return ops->ndo_change_mtu(dev, new_mtu);
6309
6310 dev->mtu = new_mtu;
6311 return 0;
6312}
6313
f0db275a
SH
6314/**
6315 * dev_set_mtu - Change maximum transfer unit
6316 * @dev: device
6317 * @new_mtu: new transfer unit
6318 *
6319 * Change the maximum transfer size of the network device.
6320 */
1da177e4
LT
6321int dev_set_mtu(struct net_device *dev, int new_mtu)
6322{
2315dc91 6323 int err, orig_mtu;
1da177e4
LT
6324
6325 if (new_mtu == dev->mtu)
6326 return 0;
6327
6328 /* MTU must be positive. */
6329 if (new_mtu < 0)
6330 return -EINVAL;
6331
6332 if (!netif_device_present(dev))
6333 return -ENODEV;
6334
1d486bfb
VF
6335 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
6336 err = notifier_to_errno(err);
6337 if (err)
6338 return err;
d314774c 6339
2315dc91
VF
6340 orig_mtu = dev->mtu;
6341 err = __dev_set_mtu(dev, new_mtu);
d314774c 6342
2315dc91
VF
6343 if (!err) {
6344 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6345 err = notifier_to_errno(err);
6346 if (err) {
6347 /* setting mtu back and notifying everyone again,
6348 * so that they have a chance to revert changes.
6349 */
6350 __dev_set_mtu(dev, orig_mtu);
6351 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
6352 }
6353 }
1da177e4
LT
6354 return err;
6355}
d1b19dff 6356EXPORT_SYMBOL(dev_set_mtu);
1da177e4 6357
cbda10fa
VD
6358/**
6359 * dev_set_group - Change group this device belongs to
6360 * @dev: device
6361 * @new_group: group this device should belong to
6362 */
6363void dev_set_group(struct net_device *dev, int new_group)
6364{
6365 dev->group = new_group;
6366}
6367EXPORT_SYMBOL(dev_set_group);
6368
f0db275a
SH
6369/**
6370 * dev_set_mac_address - Change Media Access Control Address
6371 * @dev: device
6372 * @sa: new address
6373 *
6374 * Change the hardware (MAC) address of the device
6375 */
1da177e4
LT
6376int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
6377{
d314774c 6378 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
6379 int err;
6380
d314774c 6381 if (!ops->ndo_set_mac_address)
1da177e4
LT
6382 return -EOPNOTSUPP;
6383 if (sa->sa_family != dev->type)
6384 return -EINVAL;
6385 if (!netif_device_present(dev))
6386 return -ENODEV;
d314774c 6387 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
6388 if (err)
6389 return err;
fbdeca2d 6390 dev->addr_assign_type = NET_ADDR_SET;
f6521516 6391 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 6392 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 6393 return 0;
1da177e4 6394}
d1b19dff 6395EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 6396
4bf84c35
JP
6397/**
6398 * dev_change_carrier - Change device carrier
6399 * @dev: device
691b3b7e 6400 * @new_carrier: new value
4bf84c35
JP
6401 *
6402 * Change device carrier
6403 */
6404int dev_change_carrier(struct net_device *dev, bool new_carrier)
6405{
6406 const struct net_device_ops *ops = dev->netdev_ops;
6407
6408 if (!ops->ndo_change_carrier)
6409 return -EOPNOTSUPP;
6410 if (!netif_device_present(dev))
6411 return -ENODEV;
6412 return ops->ndo_change_carrier(dev, new_carrier);
6413}
6414EXPORT_SYMBOL(dev_change_carrier);
6415
66b52b0d
JP
6416/**
6417 * dev_get_phys_port_id - Get device physical port ID
6418 * @dev: device
6419 * @ppid: port ID
6420 *
6421 * Get device physical port ID
6422 */
6423int dev_get_phys_port_id(struct net_device *dev,
02637fce 6424 struct netdev_phys_item_id *ppid)
66b52b0d
JP
6425{
6426 const struct net_device_ops *ops = dev->netdev_ops;
6427
6428 if (!ops->ndo_get_phys_port_id)
6429 return -EOPNOTSUPP;
6430 return ops->ndo_get_phys_port_id(dev, ppid);
6431}
6432EXPORT_SYMBOL(dev_get_phys_port_id);
6433
db24a904
DA
6434/**
6435 * dev_get_phys_port_name - Get device physical port name
6436 * @dev: device
6437 * @name: port name
6438 *
6439 * Get device physical port name
6440 */
6441int dev_get_phys_port_name(struct net_device *dev,
6442 char *name, size_t len)
6443{
6444 const struct net_device_ops *ops = dev->netdev_ops;
6445
6446 if (!ops->ndo_get_phys_port_name)
6447 return -EOPNOTSUPP;
6448 return ops->ndo_get_phys_port_name(dev, name, len);
6449}
6450EXPORT_SYMBOL(dev_get_phys_port_name);
6451
d746d707
AK
6452/**
6453 * dev_change_proto_down - update protocol port state information
6454 * @dev: device
6455 * @proto_down: new value
6456 *
6457 * This info can be used by switch drivers to set the phys state of the
6458 * port.
6459 */
6460int dev_change_proto_down(struct net_device *dev, bool proto_down)
6461{
6462 const struct net_device_ops *ops = dev->netdev_ops;
6463
6464 if (!ops->ndo_change_proto_down)
6465 return -EOPNOTSUPP;
6466 if (!netif_device_present(dev))
6467 return -ENODEV;
6468 return ops->ndo_change_proto_down(dev, proto_down);
6469}
6470EXPORT_SYMBOL(dev_change_proto_down);
6471
1da177e4
LT
6472/**
6473 * dev_new_index - allocate an ifindex
c4ea43c5 6474 * @net: the applicable net namespace
1da177e4
LT
6475 *
6476 * Returns a suitable unique value for a new device interface
6477 * number. The caller must hold the rtnl semaphore or the
6478 * dev_base_lock to be sure it remains unique.
6479 */
881d966b 6480static int dev_new_index(struct net *net)
1da177e4 6481{
aa79e66e 6482 int ifindex = net->ifindex;
1da177e4
LT
6483 for (;;) {
6484 if (++ifindex <= 0)
6485 ifindex = 1;
881d966b 6486 if (!__dev_get_by_index(net, ifindex))
aa79e66e 6487 return net->ifindex = ifindex;
1da177e4
LT
6488 }
6489}
6490
1da177e4 6491/* Delayed registration/unregisteration */
3b5b34fd 6492static LIST_HEAD(net_todo_list);
200b916f 6493DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 6494
6f05f629 6495static void net_set_todo(struct net_device *dev)
1da177e4 6496{
1da177e4 6497 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 6498 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
6499}
6500
9b5e383c 6501static void rollback_registered_many(struct list_head *head)
93ee31f1 6502{
e93737b0 6503 struct net_device *dev, *tmp;
5cde2829 6504 LIST_HEAD(close_head);
9b5e383c 6505
93ee31f1
DL
6506 BUG_ON(dev_boot_phase);
6507 ASSERT_RTNL();
6508
e93737b0 6509 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 6510 /* Some devices call without registering
e93737b0
KK
6511 * for initialization unwind. Remove those
6512 * devices and proceed with the remaining.
9b5e383c
ED
6513 */
6514 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
6515 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6516 dev->name, dev);
93ee31f1 6517
9b5e383c 6518 WARN_ON(1);
e93737b0
KK
6519 list_del(&dev->unreg_list);
6520 continue;
9b5e383c 6521 }
449f4544 6522 dev->dismantle = true;
9b5e383c 6523 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 6524 }
93ee31f1 6525
44345724 6526 /* If device is running, close it first. */
5cde2829
EB
6527 list_for_each_entry(dev, head, unreg_list)
6528 list_add_tail(&dev->close_list, &close_head);
99c4a26a 6529 dev_close_many(&close_head, true);
93ee31f1 6530
44345724 6531 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
6532 /* And unlink it from device chain. */
6533 unlist_netdevice(dev);
93ee31f1 6534
9b5e383c 6535 dev->reg_state = NETREG_UNREGISTERING;
e9e4dd32 6536 on_each_cpu(flush_backlog, dev, 1);
9b5e383c 6537 }
93ee31f1
DL
6538
6539 synchronize_net();
6540
9b5e383c 6541 list_for_each_entry(dev, head, unreg_list) {
395eea6c
MB
6542 struct sk_buff *skb = NULL;
6543
9b5e383c
ED
6544 /* Shutdown queueing discipline. */
6545 dev_shutdown(dev);
93ee31f1
DL
6546
6547
9b5e383c
ED
6548 /* Notify protocols, that we are about to destroy
6549 this device. They should clean all the things.
6550 */
6551 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 6552
395eea6c
MB
6553 if (!dev->rtnl_link_ops ||
6554 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6555 skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6556 GFP_KERNEL);
6557
9b5e383c
ED
6558 /*
6559 * Flush the unicast and multicast chains
6560 */
a748ee24 6561 dev_uc_flush(dev);
22bedad3 6562 dev_mc_flush(dev);
93ee31f1 6563
9b5e383c
ED
6564 if (dev->netdev_ops->ndo_uninit)
6565 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 6566
395eea6c
MB
6567 if (skb)
6568 rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
56bfa7ee 6569
9ff162a8
JP
6570 /* Notifier chain MUST detach us all upper devices. */
6571 WARN_ON(netdev_has_any_upper_dev(dev));
93ee31f1 6572
9b5e383c
ED
6573 /* Remove entries from kobject tree */
6574 netdev_unregister_kobject(dev);
024e9679
AD
6575#ifdef CONFIG_XPS
6576 /* Remove XPS queueing entries */
6577 netif_reset_xps_queues_gt(dev, 0);
6578#endif
9b5e383c 6579 }
93ee31f1 6580
850a545b 6581 synchronize_net();
395264d5 6582
a5ee1551 6583 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
6584 dev_put(dev);
6585}
6586
6587static void rollback_registered(struct net_device *dev)
6588{
6589 LIST_HEAD(single);
6590
6591 list_add(&dev->unreg_list, &single);
6592 rollback_registered_many(&single);
ceaaec98 6593 list_del(&single);
93ee31f1
DL
6594}
6595
fd867d51
JW
6596static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
6597 struct net_device *upper, netdev_features_t features)
6598{
6599 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6600 netdev_features_t feature;
5ba3f7d6 6601 int feature_bit;
fd867d51 6602
5ba3f7d6
JW
6603 for_each_netdev_feature(&upper_disables, feature_bit) {
6604 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
6605 if (!(upper->wanted_features & feature)
6606 && (features & feature)) {
6607 netdev_dbg(lower, "Dropping feature %pNF, upper dev %s has it off.\n",
6608 &feature, upper->name);
6609 features &= ~feature;
6610 }
6611 }
6612
6613 return features;
6614}
6615
6616static void netdev_sync_lower_features(struct net_device *upper,
6617 struct net_device *lower, netdev_features_t features)
6618{
6619 netdev_features_t upper_disables = NETIF_F_UPPER_DISABLES;
6620 netdev_features_t feature;
5ba3f7d6 6621 int feature_bit;
fd867d51 6622
5ba3f7d6
JW
6623 for_each_netdev_feature(&upper_disables, feature_bit) {
6624 feature = __NETIF_F_BIT(feature_bit);
fd867d51
JW
6625 if (!(features & feature) && (lower->features & feature)) {
6626 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
6627 &feature, lower->name);
6628 lower->wanted_features &= ~feature;
6629 netdev_update_features(lower);
6630
6631 if (unlikely(lower->features & feature))
6632 netdev_WARN(upper, "failed to disable %pNF on %s!\n",
6633 &feature, lower->name);
6634 }
6635 }
6636}
6637
c8f44aff
MM
6638static netdev_features_t netdev_fix_features(struct net_device *dev,
6639 netdev_features_t features)
b63365a2 6640{
57422dc5
MM
6641 /* Fix illegal checksum combinations */
6642 if ((features & NETIF_F_HW_CSUM) &&
6643 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 6644 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
6645 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6646 }
6647
b63365a2 6648 /* TSO requires that SG is present as well. */
ea2d3688 6649 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 6650 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 6651 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
6652 }
6653
ec5f0615
PS
6654 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6655 !(features & NETIF_F_IP_CSUM)) {
6656 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6657 features &= ~NETIF_F_TSO;
6658 features &= ~NETIF_F_TSO_ECN;
6659 }
6660
6661 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6662 !(features & NETIF_F_IPV6_CSUM)) {
6663 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6664 features &= ~NETIF_F_TSO6;
6665 }
6666
31d8b9e0
BH
6667 /* TSO ECN requires that TSO is present as well. */
6668 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6669 features &= ~NETIF_F_TSO_ECN;
6670
212b573f
MM
6671 /* Software GSO depends on SG. */
6672 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 6673 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
6674 features &= ~NETIF_F_GSO;
6675 }
6676
acd1130e 6677 /* UFO needs SG and checksumming */
b63365a2 6678 if (features & NETIF_F_UFO) {
79032644 6679 /* maybe split UFO into V4 and V6? */
c8cd0989
TH
6680 if (!(features & NETIF_F_HW_CSUM) &&
6681 ((features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) !=
6682 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))) {
6f404e44 6683 netdev_dbg(dev,
acd1130e 6684 "Dropping NETIF_F_UFO since no checksum offload features.\n");
b63365a2
HX
6685 features &= ~NETIF_F_UFO;
6686 }
6687
6688 if (!(features & NETIF_F_SG)) {
6f404e44 6689 netdev_dbg(dev,
acd1130e 6690 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
b63365a2
HX
6691 features &= ~NETIF_F_UFO;
6692 }
6693 }
6694
d0290214
JP
6695#ifdef CONFIG_NET_RX_BUSY_POLL
6696 if (dev->netdev_ops->ndo_busy_poll)
6697 features |= NETIF_F_BUSY_POLL;
6698 else
6699#endif
6700 features &= ~NETIF_F_BUSY_POLL;
6701
b63365a2
HX
6702 return features;
6703}
b63365a2 6704
6cb6a27c 6705int __netdev_update_features(struct net_device *dev)
5455c699 6706{
fd867d51 6707 struct net_device *upper, *lower;
c8f44aff 6708 netdev_features_t features;
fd867d51 6709 struct list_head *iter;
e7868a85 6710 int err = -1;
5455c699 6711
87267485
MM
6712 ASSERT_RTNL();
6713
5455c699
MM
6714 features = netdev_get_wanted_features(dev);
6715
6716 if (dev->netdev_ops->ndo_fix_features)
6717 features = dev->netdev_ops->ndo_fix_features(dev, features);
6718
6719 /* driver might be less strict about feature dependencies */
6720 features = netdev_fix_features(dev, features);
6721
fd867d51
JW
6722 /* some features can't be enabled if they're off an an upper device */
6723 netdev_for_each_upper_dev_rcu(dev, upper, iter)
6724 features = netdev_sync_upper_features(dev, upper, features);
6725
5455c699 6726 if (dev->features == features)
e7868a85 6727 goto sync_lower;
5455c699 6728
c8f44aff
MM
6729 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6730 &dev->features, &features);
5455c699
MM
6731
6732 if (dev->netdev_ops->ndo_set_features)
6733 err = dev->netdev_ops->ndo_set_features(dev, features);
5f8dc33e
NA
6734 else
6735 err = 0;
5455c699 6736
6cb6a27c 6737 if (unlikely(err < 0)) {
5455c699 6738 netdev_err(dev,
c8f44aff
MM
6739 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6740 err, &features, &dev->features);
17b85d29
NA
6741 /* return non-0 since some features might have changed and
6742 * it's better to fire a spurious notification than miss it
6743 */
6744 return -1;
6cb6a27c
MM
6745 }
6746
e7868a85 6747sync_lower:
fd867d51
JW
6748 /* some features must be disabled on lower devices when disabled
6749 * on an upper device (think: bonding master or bridge)
6750 */
6751 netdev_for_each_lower_dev(dev, lower, iter)
6752 netdev_sync_lower_features(dev, lower, features);
6753
6cb6a27c
MM
6754 if (!err)
6755 dev->features = features;
6756
e7868a85 6757 return err < 0 ? 0 : 1;
6cb6a27c
MM
6758}
6759
afe12cc8
MM
6760/**
6761 * netdev_update_features - recalculate device features
6762 * @dev: the device to check
6763 *
6764 * Recalculate dev->features set and send notifications if it
6765 * has changed. Should be called after driver or hardware dependent
6766 * conditions might have changed that influence the features.
6767 */
6cb6a27c
MM
6768void netdev_update_features(struct net_device *dev)
6769{
6770 if (__netdev_update_features(dev))
6771 netdev_features_change(dev);
5455c699
MM
6772}
6773EXPORT_SYMBOL(netdev_update_features);
6774
afe12cc8
MM
6775/**
6776 * netdev_change_features - recalculate device features
6777 * @dev: the device to check
6778 *
6779 * Recalculate dev->features set and send notifications even
6780 * if they have not changed. Should be called instead of
6781 * netdev_update_features() if also dev->vlan_features might
6782 * have changed to allow the changes to be propagated to stacked
6783 * VLAN devices.
6784 */
6785void netdev_change_features(struct net_device *dev)
6786{
6787 __netdev_update_features(dev);
6788 netdev_features_change(dev);
6789}
6790EXPORT_SYMBOL(netdev_change_features);
6791
fc4a7489
PM
6792/**
6793 * netif_stacked_transfer_operstate - transfer operstate
6794 * @rootdev: the root or lower level device to transfer state from
6795 * @dev: the device to transfer operstate to
6796 *
6797 * Transfer operational state from root to device. This is normally
6798 * called when a stacking relationship exists between the root
6799 * device and the device(a leaf device).
6800 */
6801void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6802 struct net_device *dev)
6803{
6804 if (rootdev->operstate == IF_OPER_DORMANT)
6805 netif_dormant_on(dev);
6806 else
6807 netif_dormant_off(dev);
6808
6809 if (netif_carrier_ok(rootdev)) {
6810 if (!netif_carrier_ok(dev))
6811 netif_carrier_on(dev);
6812 } else {
6813 if (netif_carrier_ok(dev))
6814 netif_carrier_off(dev);
6815 }
6816}
6817EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6818
a953be53 6819#ifdef CONFIG_SYSFS
1b4bf461
ED
6820static int netif_alloc_rx_queues(struct net_device *dev)
6821{
1b4bf461 6822 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 6823 struct netdev_rx_queue *rx;
10595902 6824 size_t sz = count * sizeof(*rx);
1b4bf461 6825
bd25fa7b 6826 BUG_ON(count < 1);
1b4bf461 6827
10595902
PG
6828 rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6829 if (!rx) {
6830 rx = vzalloc(sz);
6831 if (!rx)
6832 return -ENOMEM;
6833 }
bd25fa7b
TH
6834 dev->_rx = rx;
6835
bd25fa7b 6836 for (i = 0; i < count; i++)
fe822240 6837 rx[i].dev = dev;
1b4bf461
ED
6838 return 0;
6839}
bf264145 6840#endif
1b4bf461 6841
aa942104
CG
6842static void netdev_init_one_queue(struct net_device *dev,
6843 struct netdev_queue *queue, void *_unused)
6844{
6845 /* Initialize queue lock */
6846 spin_lock_init(&queue->_xmit_lock);
6847 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6848 queue->xmit_lock_owner = -1;
b236da69 6849 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 6850 queue->dev = dev;
114cf580
TH
6851#ifdef CONFIG_BQL
6852 dql_init(&queue->dql, HZ);
6853#endif
aa942104
CG
6854}
6855
60877a32
ED
6856static void netif_free_tx_queues(struct net_device *dev)
6857{
4cb28970 6858 kvfree(dev->_tx);
60877a32
ED
6859}
6860
e6484930
TH
6861static int netif_alloc_netdev_queues(struct net_device *dev)
6862{
6863 unsigned int count = dev->num_tx_queues;
6864 struct netdev_queue *tx;
60877a32 6865 size_t sz = count * sizeof(*tx);
e6484930 6866
d339727c
ED
6867 if (count < 1 || count > 0xffff)
6868 return -EINVAL;
62b5942a 6869
60877a32
ED
6870 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6871 if (!tx) {
6872 tx = vzalloc(sz);
6873 if (!tx)
6874 return -ENOMEM;
6875 }
e6484930 6876 dev->_tx = tx;
1d24eb48 6877
e6484930
TH
6878 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6879 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
6880
6881 return 0;
e6484930
TH
6882}
6883
a2029240
DV
6884void netif_tx_stop_all_queues(struct net_device *dev)
6885{
6886 unsigned int i;
6887
6888 for (i = 0; i < dev->num_tx_queues; i++) {
6889 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
6890 netif_tx_stop_queue(txq);
6891 }
6892}
6893EXPORT_SYMBOL(netif_tx_stop_all_queues);
6894
1da177e4
LT
6895/**
6896 * register_netdevice - register a network device
6897 * @dev: device to register
6898 *
6899 * Take a completed network device structure and add it to the kernel
6900 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6901 * chain. 0 is returned on success. A negative errno code is returned
6902 * on a failure to set up the device, or if the name is a duplicate.
6903 *
6904 * Callers must hold the rtnl semaphore. You may want
6905 * register_netdev() instead of this.
6906 *
6907 * BUGS:
6908 * The locking appears insufficient to guarantee two parallel registers
6909 * will not get the same name.
6910 */
6911
6912int register_netdevice(struct net_device *dev)
6913{
1da177e4 6914 int ret;
d314774c 6915 struct net *net = dev_net(dev);
1da177e4
LT
6916
6917 BUG_ON(dev_boot_phase);
6918 ASSERT_RTNL();
6919
b17a7c17
SH
6920 might_sleep();
6921
1da177e4
LT
6922 /* When net_device's are persistent, this will be fatal. */
6923 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 6924 BUG_ON(!net);
1da177e4 6925
f1f28aa3 6926 spin_lock_init(&dev->addr_list_lock);
cf508b12 6927 netdev_set_addr_lockdep_class(dev);
1da177e4 6928
828de4f6 6929 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
6930 if (ret < 0)
6931 goto out;
6932
1da177e4 6933 /* Init, if this function is available */
d314774c
SH
6934 if (dev->netdev_ops->ndo_init) {
6935 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
6936 if (ret) {
6937 if (ret > 0)
6938 ret = -EIO;
90833aa4 6939 goto out;
1da177e4
LT
6940 }
6941 }
4ec93edb 6942
f646968f
PM
6943 if (((dev->hw_features | dev->features) &
6944 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
6945 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6946 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6947 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6948 ret = -EINVAL;
6949 goto err_uninit;
6950 }
6951
9c7dafbf
PE
6952 ret = -EBUSY;
6953 if (!dev->ifindex)
6954 dev->ifindex = dev_new_index(net);
6955 else if (__dev_get_by_index(net, dev->ifindex))
6956 goto err_uninit;
6957
5455c699
MM
6958 /* Transfer changeable features to wanted_features and enable
6959 * software offloads (GSO and GRO).
6960 */
6961 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f
MM
6962 dev->features |= NETIF_F_SOFT_FEATURES;
6963 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 6964
34324dc2
MM
6965 if (!(dev->flags & IFF_LOOPBACK)) {
6966 dev->hw_features |= NETIF_F_NOCACHE_COPY;
c6e1a0d1
TH
6967 }
6968
1180e7d6 6969 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 6970 */
1180e7d6 6971 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 6972
ee579677
PS
6973 /* Make NETIF_F_SG inheritable to tunnel devices.
6974 */
6975 dev->hw_enc_features |= NETIF_F_SG;
6976
0d89d203
SH
6977 /* Make NETIF_F_SG inheritable to MPLS.
6978 */
6979 dev->mpls_features |= NETIF_F_SG;
6980
7ffbe3fd
JB
6981 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6982 ret = notifier_to_errno(ret);
6983 if (ret)
6984 goto err_uninit;
6985
8b41d188 6986 ret = netdev_register_kobject(dev);
b17a7c17 6987 if (ret)
7ce1b0ed 6988 goto err_uninit;
b17a7c17
SH
6989 dev->reg_state = NETREG_REGISTERED;
6990
6cb6a27c 6991 __netdev_update_features(dev);
8e9b59b2 6992
1da177e4
LT
6993 /*
6994 * Default initial state at registry is that the
6995 * device is present.
6996 */
6997
6998 set_bit(__LINK_STATE_PRESENT, &dev->state);
6999
8f4cccbb
BH
7000 linkwatch_init_dev(dev);
7001
1da177e4 7002 dev_init_scheduler(dev);
1da177e4 7003 dev_hold(dev);
ce286d32 7004 list_netdevice(dev);
7bf23575 7005 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 7006
948b337e
JP
7007 /* If the device has permanent device address, driver should
7008 * set dev_addr and also addr_assign_type should be set to
7009 * NET_ADDR_PERM (default value).
7010 */
7011 if (dev->addr_assign_type == NET_ADDR_PERM)
7012 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
7013
1da177e4 7014 /* Notify protocols, that a new device appeared. */
056925ab 7015 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 7016 ret = notifier_to_errno(ret);
93ee31f1
DL
7017 if (ret) {
7018 rollback_registered(dev);
7019 dev->reg_state = NETREG_UNREGISTERED;
7020 }
d90a909e
EB
7021 /*
7022 * Prevent userspace races by waiting until the network
7023 * device is fully setup before sending notifications.
7024 */
a2835763
PM
7025 if (!dev->rtnl_link_ops ||
7026 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 7027 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
7028
7029out:
7030 return ret;
7ce1b0ed
HX
7031
7032err_uninit:
d314774c
SH
7033 if (dev->netdev_ops->ndo_uninit)
7034 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 7035 goto out;
1da177e4 7036}
d1b19dff 7037EXPORT_SYMBOL(register_netdevice);
1da177e4 7038
937f1ba5
BH
7039/**
7040 * init_dummy_netdev - init a dummy network device for NAPI
7041 * @dev: device to init
7042 *
7043 * This takes a network device structure and initialize the minimum
7044 * amount of fields so it can be used to schedule NAPI polls without
7045 * registering a full blown interface. This is to be used by drivers
7046 * that need to tie several hardware interfaces to a single NAPI
7047 * poll scheduler due to HW limitations.
7048 */
7049int init_dummy_netdev(struct net_device *dev)
7050{
7051 /* Clear everything. Note we don't initialize spinlocks
7052 * are they aren't supposed to be taken by any of the
7053 * NAPI code and this dummy netdev is supposed to be
7054 * only ever used for NAPI polls
7055 */
7056 memset(dev, 0, sizeof(struct net_device));
7057
7058 /* make sure we BUG if trying to hit standard
7059 * register/unregister code path
7060 */
7061 dev->reg_state = NETREG_DUMMY;
7062
937f1ba5
BH
7063 /* NAPI wants this */
7064 INIT_LIST_HEAD(&dev->napi_list);
7065
7066 /* a dummy interface is started by default */
7067 set_bit(__LINK_STATE_PRESENT, &dev->state);
7068 set_bit(__LINK_STATE_START, &dev->state);
7069
29b4433d
ED
7070 /* Note : We dont allocate pcpu_refcnt for dummy devices,
7071 * because users of this 'device' dont need to change
7072 * its refcount.
7073 */
7074
937f1ba5
BH
7075 return 0;
7076}
7077EXPORT_SYMBOL_GPL(init_dummy_netdev);
7078
7079
1da177e4
LT
7080/**
7081 * register_netdev - register a network device
7082 * @dev: device to register
7083 *
7084 * Take a completed network device structure and add it to the kernel
7085 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
7086 * chain. 0 is returned on success. A negative errno code is returned
7087 * on a failure to set up the device, or if the name is a duplicate.
7088 *
38b4da38 7089 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
7090 * and expands the device name if you passed a format string to
7091 * alloc_netdev.
7092 */
7093int register_netdev(struct net_device *dev)
7094{
7095 int err;
7096
7097 rtnl_lock();
1da177e4 7098 err = register_netdevice(dev);
1da177e4
LT
7099 rtnl_unlock();
7100 return err;
7101}
7102EXPORT_SYMBOL(register_netdev);
7103
29b4433d
ED
7104int netdev_refcnt_read(const struct net_device *dev)
7105{
7106 int i, refcnt = 0;
7107
7108 for_each_possible_cpu(i)
7109 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
7110 return refcnt;
7111}
7112EXPORT_SYMBOL(netdev_refcnt_read);
7113
2c53040f 7114/**
1da177e4 7115 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 7116 * @dev: target net_device
1da177e4
LT
7117 *
7118 * This is called when unregistering network devices.
7119 *
7120 * Any protocol or device that holds a reference should register
7121 * for netdevice notification, and cleanup and put back the
7122 * reference if they receive an UNREGISTER event.
7123 * We can get stuck here if buggy protocols don't correctly
4ec93edb 7124 * call dev_put.
1da177e4
LT
7125 */
7126static void netdev_wait_allrefs(struct net_device *dev)
7127{
7128 unsigned long rebroadcast_time, warning_time;
29b4433d 7129 int refcnt;
1da177e4 7130
e014debe
ED
7131 linkwatch_forget_dev(dev);
7132
1da177e4 7133 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
7134 refcnt = netdev_refcnt_read(dev);
7135
7136 while (refcnt != 0) {
1da177e4 7137 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 7138 rtnl_lock();
1da177e4
LT
7139
7140 /* Rebroadcast unregister notification */
056925ab 7141 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 7142
748e2d93 7143 __rtnl_unlock();
0115e8e3 7144 rcu_barrier();
748e2d93
ED
7145 rtnl_lock();
7146
0115e8e3 7147 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
1da177e4
LT
7148 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
7149 &dev->state)) {
7150 /* We must not have linkwatch events
7151 * pending on unregister. If this
7152 * happens, we simply run the queue
7153 * unscheduled, resulting in a noop
7154 * for this device.
7155 */
7156 linkwatch_run_queue();
7157 }
7158
6756ae4b 7159 __rtnl_unlock();
1da177e4
LT
7160
7161 rebroadcast_time = jiffies;
7162 }
7163
7164 msleep(250);
7165
29b4433d
ED
7166 refcnt = netdev_refcnt_read(dev);
7167
1da177e4 7168 if (time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
7169 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
7170 dev->name, refcnt);
1da177e4
LT
7171 warning_time = jiffies;
7172 }
7173 }
7174}
7175
7176/* The sequence is:
7177 *
7178 * rtnl_lock();
7179 * ...
7180 * register_netdevice(x1);
7181 * register_netdevice(x2);
7182 * ...
7183 * unregister_netdevice(y1);
7184 * unregister_netdevice(y2);
7185 * ...
7186 * rtnl_unlock();
7187 * free_netdev(y1);
7188 * free_netdev(y2);
7189 *
58ec3b4d 7190 * We are invoked by rtnl_unlock().
1da177e4 7191 * This allows us to deal with problems:
b17a7c17 7192 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
7193 * without deadlocking with linkwatch via keventd.
7194 * 2) Since we run with the RTNL semaphore not held, we can sleep
7195 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
7196 *
7197 * We must not return until all unregister events added during
7198 * the interval the lock was held have been completed.
1da177e4 7199 */
1da177e4
LT
7200void netdev_run_todo(void)
7201{
626ab0e6 7202 struct list_head list;
1da177e4 7203
1da177e4 7204 /* Snapshot list, allow later requests */
626ab0e6 7205 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
7206
7207 __rtnl_unlock();
626ab0e6 7208
0115e8e3
ED
7209
7210 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
7211 if (!list_empty(&list))
7212 rcu_barrier();
7213
1da177e4
LT
7214 while (!list_empty(&list)) {
7215 struct net_device *dev
e5e26d75 7216 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
7217 list_del(&dev->todo_list);
7218
748e2d93 7219 rtnl_lock();
0115e8e3 7220 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
748e2d93 7221 __rtnl_unlock();
0115e8e3 7222
b17a7c17 7223 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 7224 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
7225 dev->name, dev->reg_state);
7226 dump_stack();
7227 continue;
7228 }
1da177e4 7229
b17a7c17 7230 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 7231
b17a7c17 7232 netdev_wait_allrefs(dev);
1da177e4 7233
b17a7c17 7234 /* paranoia */
29b4433d 7235 BUG_ON(netdev_refcnt_read(dev));
7866a621
SN
7236 BUG_ON(!list_empty(&dev->ptype_all));
7237 BUG_ON(!list_empty(&dev->ptype_specific));
33d480ce
ED
7238 WARN_ON(rcu_access_pointer(dev->ip_ptr));
7239 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
547b792c 7240 WARN_ON(dev->dn_ptr);
1da177e4 7241
b17a7c17
SH
7242 if (dev->destructor)
7243 dev->destructor(dev);
9093bbb2 7244
50624c93
EB
7245 /* Report a network device has been unregistered */
7246 rtnl_lock();
7247 dev_net(dev)->dev_unreg_count--;
7248 __rtnl_unlock();
7249 wake_up(&netdev_unregistering_wq);
7250
9093bbb2
SH
7251 /* Free network device */
7252 kobject_put(&dev->dev.kobj);
1da177e4 7253 }
1da177e4
LT
7254}
7255
3cfde79c
BH
7256/* Convert net_device_stats to rtnl_link_stats64. They have the same
7257 * fields in the same order, with only the type differing.
7258 */
77a1abf5
ED
7259void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
7260 const struct net_device_stats *netdev_stats)
3cfde79c
BH
7261{
7262#if BITS_PER_LONG == 64
77a1abf5
ED
7263 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
7264 memcpy(stats64, netdev_stats, sizeof(*stats64));
3cfde79c
BH
7265#else
7266 size_t i, n = sizeof(*stats64) / sizeof(u64);
7267 const unsigned long *src = (const unsigned long *)netdev_stats;
7268 u64 *dst = (u64 *)stats64;
7269
7270 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
7271 sizeof(*stats64) / sizeof(u64));
7272 for (i = 0; i < n; i++)
7273 dst[i] = src[i];
7274#endif
7275}
77a1abf5 7276EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 7277
eeda3fd6
SH
7278/**
7279 * dev_get_stats - get network device statistics
7280 * @dev: device to get statistics from
28172739 7281 * @storage: place to store stats
eeda3fd6 7282 *
d7753516
BH
7283 * Get network statistics from device. Return @storage.
7284 * The device driver may provide its own method by setting
7285 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
7286 * otherwise the internal statistics structure is used.
eeda3fd6 7287 */
d7753516
BH
7288struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
7289 struct rtnl_link_stats64 *storage)
7004bf25 7290{
eeda3fd6
SH
7291 const struct net_device_ops *ops = dev->netdev_ops;
7292
28172739
ED
7293 if (ops->ndo_get_stats64) {
7294 memset(storage, 0, sizeof(*storage));
caf586e5
ED
7295 ops->ndo_get_stats64(dev, storage);
7296 } else if (ops->ndo_get_stats) {
3cfde79c 7297 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
7298 } else {
7299 netdev_stats_to_stats64(storage, &dev->stats);
28172739 7300 }
caf586e5 7301 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
015f0688 7302 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
28172739 7303 return storage;
c45d286e 7304}
eeda3fd6 7305EXPORT_SYMBOL(dev_get_stats);
c45d286e 7306
24824a09 7307struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 7308{
24824a09 7309 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 7310
24824a09
ED
7311#ifdef CONFIG_NET_CLS_ACT
7312 if (queue)
7313 return queue;
7314 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
7315 if (!queue)
7316 return NULL;
7317 netdev_init_one_queue(dev, queue, NULL);
2ce1ee17 7318 RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
24824a09
ED
7319 queue->qdisc_sleeping = &noop_qdisc;
7320 rcu_assign_pointer(dev->ingress_queue, queue);
7321#endif
7322 return queue;
bb949fbd
DM
7323}
7324
2c60db03
ED
7325static const struct ethtool_ops default_ethtool_ops;
7326
d07d7507
SG
7327void netdev_set_default_ethtool_ops(struct net_device *dev,
7328 const struct ethtool_ops *ops)
7329{
7330 if (dev->ethtool_ops == &default_ethtool_ops)
7331 dev->ethtool_ops = ops;
7332}
7333EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
7334
74d332c1
ED
7335void netdev_freemem(struct net_device *dev)
7336{
7337 char *addr = (char *)dev - dev->padded;
7338
4cb28970 7339 kvfree(addr);
74d332c1
ED
7340}
7341
1da177e4 7342/**
36909ea4 7343 * alloc_netdev_mqs - allocate network device
c835a677
TG
7344 * @sizeof_priv: size of private data to allocate space for
7345 * @name: device name format string
7346 * @name_assign_type: origin of device name
7347 * @setup: callback to initialize device
7348 * @txqs: the number of TX subqueues to allocate
7349 * @rxqs: the number of RX subqueues to allocate
1da177e4
LT
7350 *
7351 * Allocates a struct net_device with private data area for driver use
90e51adf 7352 * and performs basic initialization. Also allocates subqueue structs
36909ea4 7353 * for each queue on the device.
1da177e4 7354 */
36909ea4 7355struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
c835a677 7356 unsigned char name_assign_type,
36909ea4
TH
7357 void (*setup)(struct net_device *),
7358 unsigned int txqs, unsigned int rxqs)
1da177e4 7359{
1da177e4 7360 struct net_device *dev;
7943986c 7361 size_t alloc_size;
1ce8e7b5 7362 struct net_device *p;
1da177e4 7363
b6fe17d6
SH
7364 BUG_ON(strlen(name) >= sizeof(dev->name));
7365
36909ea4 7366 if (txqs < 1) {
7b6cd1ce 7367 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
7368 return NULL;
7369 }
7370
a953be53 7371#ifdef CONFIG_SYSFS
36909ea4 7372 if (rxqs < 1) {
7b6cd1ce 7373 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
7374 return NULL;
7375 }
7376#endif
7377
fd2ea0a7 7378 alloc_size = sizeof(struct net_device);
d1643d24
AD
7379 if (sizeof_priv) {
7380 /* ensure 32-byte alignment of private area */
1ce8e7b5 7381 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
7382 alloc_size += sizeof_priv;
7383 }
7384 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 7385 alloc_size += NETDEV_ALIGN - 1;
1da177e4 7386
74d332c1
ED
7387 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
7388 if (!p)
7389 p = vzalloc(alloc_size);
62b5942a 7390 if (!p)
1da177e4 7391 return NULL;
1da177e4 7392
1ce8e7b5 7393 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 7394 dev->padded = (char *)dev - (char *)p;
ab9c73cc 7395
29b4433d
ED
7396 dev->pcpu_refcnt = alloc_percpu(int);
7397 if (!dev->pcpu_refcnt)
74d332c1 7398 goto free_dev;
ab9c73cc 7399
ab9c73cc 7400 if (dev_addr_init(dev))
29b4433d 7401 goto free_pcpu;
ab9c73cc 7402
22bedad3 7403 dev_mc_init(dev);
a748ee24 7404 dev_uc_init(dev);
ccffad25 7405
c346dca1 7406 dev_net_set(dev, &init_net);
1da177e4 7407
8d3bdbd5 7408 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 7409 dev->gso_max_segs = GSO_MAX_SEGS;
fcbeb976 7410 dev->gso_min_segs = 0;
8d3bdbd5 7411
8d3bdbd5
DM
7412 INIT_LIST_HEAD(&dev->napi_list);
7413 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 7414 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 7415 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
7416 INIT_LIST_HEAD(&dev->adj_list.upper);
7417 INIT_LIST_HEAD(&dev->adj_list.lower);
7418 INIT_LIST_HEAD(&dev->all_adj_list.upper);
7419 INIT_LIST_HEAD(&dev->all_adj_list.lower);
7866a621
SN
7420 INIT_LIST_HEAD(&dev->ptype_all);
7421 INIT_LIST_HEAD(&dev->ptype_specific);
02875878 7422 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8d3bdbd5
DM
7423 setup(dev);
7424
906470c1 7425 if (!dev->tx_queue_len)
f84bb1ea 7426 dev->priv_flags |= IFF_NO_QUEUE;
906470c1 7427
36909ea4
TH
7428 dev->num_tx_queues = txqs;
7429 dev->real_num_tx_queues = txqs;
ed9af2e8 7430 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 7431 goto free_all;
e8a0464c 7432
a953be53 7433#ifdef CONFIG_SYSFS
36909ea4
TH
7434 dev->num_rx_queues = rxqs;
7435 dev->real_num_rx_queues = rxqs;
fe822240 7436 if (netif_alloc_rx_queues(dev))
8d3bdbd5 7437 goto free_all;
df334545 7438#endif
0a9627f2 7439
1da177e4 7440 strcpy(dev->name, name);
c835a677 7441 dev->name_assign_type = name_assign_type;
cbda10fa 7442 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
7443 if (!dev->ethtool_ops)
7444 dev->ethtool_ops = &default_ethtool_ops;
e687ad60
PN
7445
7446 nf_hook_ingress_init(dev);
7447
1da177e4 7448 return dev;
ab9c73cc 7449
8d3bdbd5
DM
7450free_all:
7451 free_netdev(dev);
7452 return NULL;
7453
29b4433d
ED
7454free_pcpu:
7455 free_percpu(dev->pcpu_refcnt);
74d332c1
ED
7456free_dev:
7457 netdev_freemem(dev);
ab9c73cc 7458 return NULL;
1da177e4 7459}
36909ea4 7460EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
7461
7462/**
7463 * free_netdev - free network device
7464 * @dev: device
7465 *
4ec93edb
YH
7466 * This function does the last stage of destroying an allocated device
7467 * interface. The reference to the device object is released.
1da177e4 7468 * If this is the last reference then it will be freed.
93d05d4a 7469 * Must be called in process context.
1da177e4
LT
7470 */
7471void free_netdev(struct net_device *dev)
7472{
d565b0a1
HX
7473 struct napi_struct *p, *n;
7474
93d05d4a 7475 might_sleep();
60877a32 7476 netif_free_tx_queues(dev);
a953be53 7477#ifdef CONFIG_SYSFS
10595902 7478 kvfree(dev->_rx);
fe822240 7479#endif
e8a0464c 7480
33d480ce 7481 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 7482
f001fde5
JP
7483 /* Flush device addresses */
7484 dev_addr_flush(dev);
7485
d565b0a1
HX
7486 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
7487 netif_napi_del(p);
7488
29b4433d
ED
7489 free_percpu(dev->pcpu_refcnt);
7490 dev->pcpu_refcnt = NULL;
7491
3041a069 7492 /* Compatibility with error handling in drivers */
1da177e4 7493 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 7494 netdev_freemem(dev);
1da177e4
LT
7495 return;
7496 }
7497
7498 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
7499 dev->reg_state = NETREG_RELEASED;
7500
43cb76d9
GKH
7501 /* will free via device release */
7502 put_device(&dev->dev);
1da177e4 7503}
d1b19dff 7504EXPORT_SYMBOL(free_netdev);
4ec93edb 7505
f0db275a
SH
7506/**
7507 * synchronize_net - Synchronize with packet receive processing
7508 *
7509 * Wait for packets currently being received to be done.
7510 * Does not block later packets from starting.
7511 */
4ec93edb 7512void synchronize_net(void)
1da177e4
LT
7513{
7514 might_sleep();
be3fc413
ED
7515 if (rtnl_is_locked())
7516 synchronize_rcu_expedited();
7517 else
7518 synchronize_rcu();
1da177e4 7519}
d1b19dff 7520EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
7521
7522/**
44a0873d 7523 * unregister_netdevice_queue - remove device from the kernel
1da177e4 7524 * @dev: device
44a0873d 7525 * @head: list
6ebfbc06 7526 *
1da177e4 7527 * This function shuts down a device interface and removes it
d59b54b1 7528 * from the kernel tables.
44a0873d 7529 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
7530 *
7531 * Callers must hold the rtnl semaphore. You may want
7532 * unregister_netdev() instead of this.
7533 */
7534
44a0873d 7535void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 7536{
a6620712
HX
7537 ASSERT_RTNL();
7538
44a0873d 7539 if (head) {
9fdce099 7540 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
7541 } else {
7542 rollback_registered(dev);
7543 /* Finish processing unregister after unlock */
7544 net_set_todo(dev);
7545 }
1da177e4 7546}
44a0873d 7547EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 7548
9b5e383c
ED
7549/**
7550 * unregister_netdevice_many - unregister many devices
7551 * @head: list of devices
87757a91
ED
7552 *
7553 * Note: As most callers use a stack allocated list_head,
7554 * we force a list_del() to make sure stack wont be corrupted later.
9b5e383c
ED
7555 */
7556void unregister_netdevice_many(struct list_head *head)
7557{
7558 struct net_device *dev;
7559
7560 if (!list_empty(head)) {
7561 rollback_registered_many(head);
7562 list_for_each_entry(dev, head, unreg_list)
7563 net_set_todo(dev);
87757a91 7564 list_del(head);
9b5e383c
ED
7565 }
7566}
63c8099d 7567EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 7568
1da177e4
LT
7569/**
7570 * unregister_netdev - remove device from the kernel
7571 * @dev: device
7572 *
7573 * This function shuts down a device interface and removes it
d59b54b1 7574 * from the kernel tables.
1da177e4
LT
7575 *
7576 * This is just a wrapper for unregister_netdevice that takes
7577 * the rtnl semaphore. In general you want to use this and not
7578 * unregister_netdevice.
7579 */
7580void unregister_netdev(struct net_device *dev)
7581{
7582 rtnl_lock();
7583 unregister_netdevice(dev);
7584 rtnl_unlock();
7585}
1da177e4
LT
7586EXPORT_SYMBOL(unregister_netdev);
7587
ce286d32
EB
7588/**
7589 * dev_change_net_namespace - move device to different nethost namespace
7590 * @dev: device
7591 * @net: network namespace
7592 * @pat: If not NULL name pattern to try if the current device name
7593 * is already taken in the destination network namespace.
7594 *
7595 * This function shuts down a device interface and moves it
7596 * to a new network namespace. On success 0 is returned, on
7597 * a failure a netagive errno code is returned.
7598 *
7599 * Callers must hold the rtnl semaphore.
7600 */
7601
7602int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7603{
ce286d32
EB
7604 int err;
7605
7606 ASSERT_RTNL();
7607
7608 /* Don't allow namespace local devices to be moved. */
7609 err = -EINVAL;
7610 if (dev->features & NETIF_F_NETNS_LOCAL)
7611 goto out;
7612
7613 /* Ensure the device has been registrered */
ce286d32
EB
7614 if (dev->reg_state != NETREG_REGISTERED)
7615 goto out;
7616
7617 /* Get out if there is nothing todo */
7618 err = 0;
878628fb 7619 if (net_eq(dev_net(dev), net))
ce286d32
EB
7620 goto out;
7621
7622 /* Pick the destination device name, and ensure
7623 * we can use it in the destination network namespace.
7624 */
7625 err = -EEXIST;
d9031024 7626 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
7627 /* We get here if we can't use the current device name */
7628 if (!pat)
7629 goto out;
828de4f6 7630 if (dev_get_valid_name(net, dev, pat) < 0)
ce286d32
EB
7631 goto out;
7632 }
7633
7634 /*
7635 * And now a mini version of register_netdevice unregister_netdevice.
7636 */
7637
7638 /* If device is running close it first. */
9b772652 7639 dev_close(dev);
ce286d32
EB
7640
7641 /* And unlink it from device chain */
7642 err = -ENODEV;
7643 unlist_netdevice(dev);
7644
7645 synchronize_net();
7646
7647 /* Shutdown queueing discipline. */
7648 dev_shutdown(dev);
7649
7650 /* Notify protocols, that we are about to destroy
7651 this device. They should clean all the things.
3b27e105
DL
7652
7653 Note that dev->reg_state stays at NETREG_REGISTERED.
7654 This is wanted because this way 8021q and macvlan know
7655 the device is just moving and can keep their slaves up.
ce286d32
EB
7656 */
7657 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43
G
7658 rcu_barrier();
7659 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7f294054 7660 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
ce286d32
EB
7661
7662 /*
7663 * Flush the unicast and multicast chains
7664 */
a748ee24 7665 dev_uc_flush(dev);
22bedad3 7666 dev_mc_flush(dev);
ce286d32 7667
4e66ae2e
SH
7668 /* Send a netdev-removed uevent to the old namespace */
7669 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
4c75431a 7670 netdev_adjacent_del_links(dev);
4e66ae2e 7671
ce286d32 7672 /* Actually switch the network namespace */
c346dca1 7673 dev_net_set(dev, net);
ce286d32 7674
ce286d32 7675 /* If there is an ifindex conflict assign a new one */
7a66bbc9 7676 if (__dev_get_by_index(net, dev->ifindex))
ce286d32 7677 dev->ifindex = dev_new_index(net);
ce286d32 7678
4e66ae2e
SH
7679 /* Send a netdev-add uevent to the new namespace */
7680 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
4c75431a 7681 netdev_adjacent_add_links(dev);
4e66ae2e 7682
8b41d188 7683 /* Fixup kobjects */
a1b3f594 7684 err = device_rename(&dev->dev, dev->name);
8b41d188 7685 WARN_ON(err);
ce286d32
EB
7686
7687 /* Add the device back in the hashes */
7688 list_netdevice(dev);
7689
7690 /* Notify protocols, that a new device appeared. */
7691 call_netdevice_notifiers(NETDEV_REGISTER, dev);
7692
d90a909e
EB
7693 /*
7694 * Prevent userspace races by waiting until the network
7695 * device is fully setup before sending notifications.
7696 */
7f294054 7697 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 7698
ce286d32
EB
7699 synchronize_net();
7700 err = 0;
7701out:
7702 return err;
7703}
463d0183 7704EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 7705
1da177e4
LT
7706static int dev_cpu_callback(struct notifier_block *nfb,
7707 unsigned long action,
7708 void *ocpu)
7709{
7710 struct sk_buff **list_skb;
1da177e4
LT
7711 struct sk_buff *skb;
7712 unsigned int cpu, oldcpu = (unsigned long)ocpu;
7713 struct softnet_data *sd, *oldsd;
7714
8bb78442 7715 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
7716 return NOTIFY_OK;
7717
7718 local_irq_disable();
7719 cpu = smp_processor_id();
7720 sd = &per_cpu(softnet_data, cpu);
7721 oldsd = &per_cpu(softnet_data, oldcpu);
7722
7723 /* Find end of our completion_queue. */
7724 list_skb = &sd->completion_queue;
7725 while (*list_skb)
7726 list_skb = &(*list_skb)->next;
7727 /* Append completion queue from offline CPU. */
7728 *list_skb = oldsd->completion_queue;
7729 oldsd->completion_queue = NULL;
7730
1da177e4 7731 /* Append output queue from offline CPU. */
a9cbd588
CG
7732 if (oldsd->output_queue) {
7733 *sd->output_queue_tailp = oldsd->output_queue;
7734 sd->output_queue_tailp = oldsd->output_queue_tailp;
7735 oldsd->output_queue = NULL;
7736 oldsd->output_queue_tailp = &oldsd->output_queue;
7737 }
ac64da0b
ED
7738 /* Append NAPI poll list from offline CPU, with one exception :
7739 * process_backlog() must be called by cpu owning percpu backlog.
7740 * We properly handle process_queue & input_pkt_queue later.
7741 */
7742 while (!list_empty(&oldsd->poll_list)) {
7743 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7744 struct napi_struct,
7745 poll_list);
7746
7747 list_del_init(&napi->poll_list);
7748 if (napi->poll == process_backlog)
7749 napi->state = 0;
7750 else
7751 ____napi_schedule(sd, napi);
264524d5 7752 }
1da177e4
LT
7753
7754 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7755 local_irq_enable();
7756
7757 /* Process offline CPU's input_pkt_queue */
76cc8b13 7758 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
91e83133 7759 netif_rx_ni(skb);
76cc8b13 7760 input_queue_head_incr(oldsd);
fec5e652 7761 }
ac64da0b 7762 while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
91e83133 7763 netif_rx_ni(skb);
76cc8b13
TH
7764 input_queue_head_incr(oldsd);
7765 }
1da177e4
LT
7766
7767 return NOTIFY_OK;
7768}
1da177e4
LT
7769
7770
7f353bf2 7771/**
b63365a2
HX
7772 * netdev_increment_features - increment feature set by one
7773 * @all: current feature set
7774 * @one: new feature set
7775 * @mask: mask feature set
7f353bf2
HX
7776 *
7777 * Computes a new feature set after adding a device with feature set
b63365a2
HX
7778 * @one to the master device with current feature set @all. Will not
7779 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 7780 */
c8f44aff
MM
7781netdev_features_t netdev_increment_features(netdev_features_t all,
7782 netdev_features_t one, netdev_features_t mask)
b63365a2 7783{
c8cd0989 7784 if (mask & NETIF_F_HW_CSUM)
a188222b 7785 mask |= NETIF_F_CSUM_MASK;
1742f183 7786 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 7787
a188222b 7788 all |= one & (NETIF_F_ONE_FOR_ALL | NETIF_F_CSUM_MASK) & mask;
1742f183 7789 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 7790
1742f183 7791 /* If one device supports hw checksumming, set for all. */
c8cd0989
TH
7792 if (all & NETIF_F_HW_CSUM)
7793 all &= ~(NETIF_F_CSUM_MASK & ~NETIF_F_HW_CSUM);
7f353bf2
HX
7794
7795 return all;
7796}
b63365a2 7797EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 7798
430f03cd 7799static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
7800{
7801 int i;
7802 struct hlist_head *hash;
7803
7804 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7805 if (hash != NULL)
7806 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7807 INIT_HLIST_HEAD(&hash[i]);
7808
7809 return hash;
7810}
7811
881d966b 7812/* Initialize per network namespace state */
4665079c 7813static int __net_init netdev_init(struct net *net)
881d966b 7814{
734b6541
RM
7815 if (net != &init_net)
7816 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 7817
30d97d35
PE
7818 net->dev_name_head = netdev_create_hash();
7819 if (net->dev_name_head == NULL)
7820 goto err_name;
881d966b 7821
30d97d35
PE
7822 net->dev_index_head = netdev_create_hash();
7823 if (net->dev_index_head == NULL)
7824 goto err_idx;
881d966b
EB
7825
7826 return 0;
30d97d35
PE
7827
7828err_idx:
7829 kfree(net->dev_name_head);
7830err_name:
7831 return -ENOMEM;
881d966b
EB
7832}
7833
f0db275a
SH
7834/**
7835 * netdev_drivername - network driver for the device
7836 * @dev: network device
f0db275a
SH
7837 *
7838 * Determine network driver for device.
7839 */
3019de12 7840const char *netdev_drivername(const struct net_device *dev)
6579e57b 7841{
cf04a4c7
SH
7842 const struct device_driver *driver;
7843 const struct device *parent;
3019de12 7844 const char *empty = "";
6579e57b
AV
7845
7846 parent = dev->dev.parent;
6579e57b 7847 if (!parent)
3019de12 7848 return empty;
6579e57b
AV
7849
7850 driver = parent->driver;
7851 if (driver && driver->name)
3019de12
DM
7852 return driver->name;
7853 return empty;
6579e57b
AV
7854}
7855
6ea754eb
JP
7856static void __netdev_printk(const char *level, const struct net_device *dev,
7857 struct va_format *vaf)
256df2f3 7858{
b004ff49 7859 if (dev && dev->dev.parent) {
6ea754eb
JP
7860 dev_printk_emit(level[1] - '0',
7861 dev->dev.parent,
7862 "%s %s %s%s: %pV",
7863 dev_driver_string(dev->dev.parent),
7864 dev_name(dev->dev.parent),
7865 netdev_name(dev), netdev_reg_state(dev),
7866 vaf);
b004ff49 7867 } else if (dev) {
6ea754eb
JP
7868 printk("%s%s%s: %pV",
7869 level, netdev_name(dev), netdev_reg_state(dev), vaf);
b004ff49 7870 } else {
6ea754eb 7871 printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 7872 }
256df2f3
JP
7873}
7874
6ea754eb
JP
7875void netdev_printk(const char *level, const struct net_device *dev,
7876 const char *format, ...)
256df2f3
JP
7877{
7878 struct va_format vaf;
7879 va_list args;
256df2f3
JP
7880
7881 va_start(args, format);
7882
7883 vaf.fmt = format;
7884 vaf.va = &args;
7885
6ea754eb 7886 __netdev_printk(level, dev, &vaf);
b004ff49 7887
256df2f3 7888 va_end(args);
256df2f3
JP
7889}
7890EXPORT_SYMBOL(netdev_printk);
7891
7892#define define_netdev_printk_level(func, level) \
6ea754eb 7893void func(const struct net_device *dev, const char *fmt, ...) \
256df2f3 7894{ \
256df2f3
JP
7895 struct va_format vaf; \
7896 va_list args; \
7897 \
7898 va_start(args, fmt); \
7899 \
7900 vaf.fmt = fmt; \
7901 vaf.va = &args; \
7902 \
6ea754eb 7903 __netdev_printk(level, dev, &vaf); \
b004ff49 7904 \
256df2f3 7905 va_end(args); \
256df2f3
JP
7906} \
7907EXPORT_SYMBOL(func);
7908
7909define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7910define_netdev_printk_level(netdev_alert, KERN_ALERT);
7911define_netdev_printk_level(netdev_crit, KERN_CRIT);
7912define_netdev_printk_level(netdev_err, KERN_ERR);
7913define_netdev_printk_level(netdev_warn, KERN_WARNING);
7914define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7915define_netdev_printk_level(netdev_info, KERN_INFO);
7916
4665079c 7917static void __net_exit netdev_exit(struct net *net)
881d966b
EB
7918{
7919 kfree(net->dev_name_head);
7920 kfree(net->dev_index_head);
7921}
7922
022cbae6 7923static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
7924 .init = netdev_init,
7925 .exit = netdev_exit,
7926};
7927
4665079c 7928static void __net_exit default_device_exit(struct net *net)
ce286d32 7929{
e008b5fc 7930 struct net_device *dev, *aux;
ce286d32 7931 /*
e008b5fc 7932 * Push all migratable network devices back to the
ce286d32
EB
7933 * initial network namespace
7934 */
7935 rtnl_lock();
e008b5fc 7936 for_each_netdev_safe(net, dev, aux) {
ce286d32 7937 int err;
aca51397 7938 char fb_name[IFNAMSIZ];
ce286d32
EB
7939
7940 /* Ignore unmoveable devices (i.e. loopback) */
7941 if (dev->features & NETIF_F_NETNS_LOCAL)
7942 continue;
7943
e008b5fc
EB
7944 /* Leave virtual devices for the generic cleanup */
7945 if (dev->rtnl_link_ops)
7946 continue;
d0c082ce 7947
25985edc 7948 /* Push remaining network devices to init_net */
aca51397
PE
7949 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7950 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 7951 if (err) {
7b6cd1ce
JP
7952 pr_emerg("%s: failed to move %s to init_net: %d\n",
7953 __func__, dev->name, err);
aca51397 7954 BUG();
ce286d32
EB
7955 }
7956 }
7957 rtnl_unlock();
7958}
7959
50624c93
EB
7960static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7961{
7962 /* Return with the rtnl_lock held when there are no network
7963 * devices unregistering in any network namespace in net_list.
7964 */
7965 struct net *net;
7966 bool unregistering;
ff960a73 7967 DEFINE_WAIT_FUNC(wait, woken_wake_function);
50624c93 7968
ff960a73 7969 add_wait_queue(&netdev_unregistering_wq, &wait);
50624c93 7970 for (;;) {
50624c93
EB
7971 unregistering = false;
7972 rtnl_lock();
7973 list_for_each_entry(net, net_list, exit_list) {
7974 if (net->dev_unreg_count > 0) {
7975 unregistering = true;
7976 break;
7977 }
7978 }
7979 if (!unregistering)
7980 break;
7981 __rtnl_unlock();
ff960a73
PZ
7982
7983 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
50624c93 7984 }
ff960a73 7985 remove_wait_queue(&netdev_unregistering_wq, &wait);
50624c93
EB
7986}
7987
04dc7f6b
EB
7988static void __net_exit default_device_exit_batch(struct list_head *net_list)
7989{
7990 /* At exit all network devices most be removed from a network
b595076a 7991 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
7992 * Do this across as many network namespaces as possible to
7993 * improve batching efficiency.
7994 */
7995 struct net_device *dev;
7996 struct net *net;
7997 LIST_HEAD(dev_kill_list);
7998
50624c93
EB
7999 /* To prevent network device cleanup code from dereferencing
8000 * loopback devices or network devices that have been freed
8001 * wait here for all pending unregistrations to complete,
8002 * before unregistring the loopback device and allowing the
8003 * network namespace be freed.
8004 *
8005 * The netdev todo list containing all network devices
8006 * unregistrations that happen in default_device_exit_batch
8007 * will run in the rtnl_unlock() at the end of
8008 * default_device_exit_batch.
8009 */
8010 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
8011 list_for_each_entry(net, net_list, exit_list) {
8012 for_each_netdev_reverse(net, dev) {
b0ab2fab 8013 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
04dc7f6b
EB
8014 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
8015 else
8016 unregister_netdevice_queue(dev, &dev_kill_list);
8017 }
8018 }
8019 unregister_netdevice_many(&dev_kill_list);
8020 rtnl_unlock();
8021}
8022
022cbae6 8023static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 8024 .exit = default_device_exit,
04dc7f6b 8025 .exit_batch = default_device_exit_batch,
ce286d32
EB
8026};
8027
1da177e4
LT
8028/*
8029 * Initialize the DEV module. At boot time this walks the device list and
8030 * unhooks any devices that fail to initialise (normally hardware not
8031 * present) and leaves us with a valid list of present and active devices.
8032 *
8033 */
8034
8035/*
8036 * This is called single threaded during boot, so no need
8037 * to take the rtnl semaphore.
8038 */
8039static int __init net_dev_init(void)
8040{
8041 int i, rc = -ENOMEM;
8042
8043 BUG_ON(!dev_boot_phase);
8044
1da177e4
LT
8045 if (dev_proc_init())
8046 goto out;
8047
8b41d188 8048 if (netdev_kobject_init())
1da177e4
LT
8049 goto out;
8050
8051 INIT_LIST_HEAD(&ptype_all);
82d8a867 8052 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
8053 INIT_LIST_HEAD(&ptype_base[i]);
8054
62532da9
VY
8055 INIT_LIST_HEAD(&offload_base);
8056
881d966b
EB
8057 if (register_pernet_subsys(&netdev_net_ops))
8058 goto out;
1da177e4
LT
8059
8060 /*
8061 * Initialise the packet receive queues.
8062 */
8063
6f912042 8064 for_each_possible_cpu(i) {
e36fa2f7 8065 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 8066
e36fa2f7 8067 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 8068 skb_queue_head_init(&sd->process_queue);
e36fa2f7 8069 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588 8070 sd->output_queue_tailp = &sd->output_queue;
df334545 8071#ifdef CONFIG_RPS
e36fa2f7
ED
8072 sd->csd.func = rps_trigger_softirq;
8073 sd->csd.info = sd;
e36fa2f7 8074 sd->cpu = i;
1e94d72f 8075#endif
0a9627f2 8076
e36fa2f7
ED
8077 sd->backlog.poll = process_backlog;
8078 sd->backlog.weight = weight_p;
1da177e4
LT
8079 }
8080
1da177e4
LT
8081 dev_boot_phase = 0;
8082
505d4f73
EB
8083 /* The loopback device is special if any other network devices
8084 * is present in a network namespace the loopback device must
8085 * be present. Since we now dynamically allocate and free the
8086 * loopback device ensure this invariant is maintained by
8087 * keeping the loopback device as the first device on the
8088 * list of network devices. Ensuring the loopback devices
8089 * is the first device that appears and the last network device
8090 * that disappears.
8091 */
8092 if (register_pernet_device(&loopback_net_ops))
8093 goto out;
8094
8095 if (register_pernet_device(&default_device_ops))
8096 goto out;
8097
962cf36c
CM
8098 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
8099 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
8100
8101 hotcpu_notifier(dev_cpu_callback, 0);
f38a9eb1 8102 dst_subsys_init();
1da177e4
LT
8103 rc = 0;
8104out:
8105 return rc;
8106}
8107
8108subsys_initcall(net_dev_init);
This page took 2.345464 seconds and 5 git commands to generate.