net: shrink struct softnet_data
[deliverable/linux.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
1da177e4 76#include <linux/bitops.h>
4fc268d2 77#include <linux/capability.h>
1da177e4
LT
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
08e9897d 81#include <linux/hash.h>
5a0e3ad6 82#include <linux/slab.h>
1da177e4 83#include <linux/sched.h>
4a3e2f71 84#include <linux/mutex.h>
1da177e4
LT
85#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
0187bdfb 94#include <linux/ethtool.h>
1da177e4
LT
95#include <linux/notifier.h>
96#include <linux/skbuff.h>
457c4cbc 97#include <net/net_namespace.h>
1da177e4
LT
98#include <net/sock.h>
99#include <linux/rtnetlink.h>
1da177e4 100#include <linux/stat.h>
1da177e4
LT
101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
44540960 104#include <net/xfrm.h>
1da177e4
LT
105#include <linux/highmem.h>
106#include <linux/init.h>
1da177e4 107#include <linux/module.h>
1da177e4
LT
108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
1da177e4 111#include <net/iw_handler.h>
1da177e4 112#include <asm/current.h>
5bdb9886 113#include <linux/audit.h>
db217334 114#include <linux/dmaengine.h>
f6a78bfc 115#include <linux/err.h>
c7fa9d18 116#include <linux/ctype.h>
723e98b7 117#include <linux/if_arp.h>
6de329e2 118#include <linux/if_vlan.h>
8f0f2223 119#include <linux/ip.h>
ad55dcaf 120#include <net/ip.h>
8f0f2223
DM
121#include <linux/ipv6.h>
122#include <linux/in.h>
b6b2fed1
DM
123#include <linux/jhash.h>
124#include <linux/random.h>
9cbc1cb8 125#include <trace/events/napi.h>
cf66ba58 126#include <trace/events/net.h>
07dc22e7 127#include <trace/events/skb.h>
5acbbd42 128#include <linux/pci.h>
caeda9b9 129#include <linux/inetdevice.h>
c445477d 130#include <linux/cpu_rmap.h>
c5905afb 131#include <linux/static_key.h>
af12fa6e 132#include <linux/hashtable.h>
60877a32 133#include <linux/vmalloc.h>
529d0489 134#include <linux/if_macvlan.h>
e7fd2885 135#include <linux/errqueue.h>
1da177e4 136
342709ef
PE
137#include "net-sysfs.h"
138
d565b0a1
HX
139/* Instead of increasing this, you should create a hash table. */
140#define MAX_GRO_SKBS 8
141
5d38a079
HX
142/* This should be increased if a protocol with a bigger head is added. */
143#define GRO_MAX_HEAD (MAX_HEADER + 128)
144
1da177e4 145static DEFINE_SPINLOCK(ptype_lock);
62532da9 146static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
147struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
148struct list_head ptype_all __read_mostly; /* Taps */
62532da9 149static struct list_head offload_base __read_mostly;
1da177e4 150
ae78dbfa 151static int netif_rx_internal(struct sk_buff *skb);
54951194
LP
152static int call_netdevice_notifiers_info(unsigned long val,
153 struct net_device *dev,
154 struct netdev_notifier_info *info);
ae78dbfa 155
1da177e4 156/*
7562f876 157 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
158 * semaphore.
159 *
c6d14c84 160 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
161 *
162 * Writers must hold the rtnl semaphore while they loop through the
7562f876 163 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
164 * actual updates. This allows pure readers to access the list even
165 * while a writer is preparing to update it.
166 *
167 * To put it another way, dev_base_lock is held for writing only to
168 * protect against pure readers; the rtnl semaphore provides the
169 * protection against other writers.
170 *
171 * See, for example usages, register_netdevice() and
172 * unregister_netdevice(), which must be called with the rtnl
173 * semaphore held.
174 */
1da177e4 175DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
176EXPORT_SYMBOL(dev_base_lock);
177
af12fa6e
ET
178/* protects napi_hash addition/deletion and napi_gen_id */
179static DEFINE_SPINLOCK(napi_hash_lock);
180
181static unsigned int napi_gen_id;
182static DEFINE_HASHTABLE(napi_hash, 8);
183
18afa4b0 184static seqcount_t devnet_rename_seq;
c91f6df2 185
4e985ada
TG
186static inline void dev_base_seq_inc(struct net *net)
187{
188 while (++net->dev_base_seq == 0);
189}
190
881d966b 191static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 192{
95c96174
ED
193 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
194
08e9897d 195 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
196}
197
881d966b 198static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 199{
7c28bd0b 200 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
201}
202
e36fa2f7 203static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
204{
205#ifdef CONFIG_RPS
e36fa2f7 206 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
207#endif
208}
209
e36fa2f7 210static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
211{
212#ifdef CONFIG_RPS
e36fa2f7 213 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
214#endif
215}
216
ce286d32 217/* Device list insertion */
53759be9 218static void list_netdevice(struct net_device *dev)
ce286d32 219{
c346dca1 220 struct net *net = dev_net(dev);
ce286d32
EB
221
222 ASSERT_RTNL();
223
224 write_lock_bh(&dev_base_lock);
c6d14c84 225 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 226 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
227 hlist_add_head_rcu(&dev->index_hlist,
228 dev_index_hash(net, dev->ifindex));
ce286d32 229 write_unlock_bh(&dev_base_lock);
4e985ada
TG
230
231 dev_base_seq_inc(net);
ce286d32
EB
232}
233
fb699dfd
ED
234/* Device list removal
235 * caller must respect a RCU grace period before freeing/reusing dev
236 */
ce286d32
EB
237static void unlist_netdevice(struct net_device *dev)
238{
239 ASSERT_RTNL();
240
241 /* Unlink dev from the device chain */
242 write_lock_bh(&dev_base_lock);
c6d14c84 243 list_del_rcu(&dev->dev_list);
72c9528b 244 hlist_del_rcu(&dev->name_hlist);
fb699dfd 245 hlist_del_rcu(&dev->index_hlist);
ce286d32 246 write_unlock_bh(&dev_base_lock);
4e985ada
TG
247
248 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
249}
250
1da177e4
LT
251/*
252 * Our notifier list
253 */
254
f07d5b94 255static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
256
257/*
258 * Device drivers call our routines to queue packets here. We empty the
259 * queue in the local softnet handler.
260 */
bea3348e 261
9958da05 262DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 263EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 264
cf508b12 265#ifdef CONFIG_LOCKDEP
723e98b7 266/*
c773e847 267 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
268 * according to dev->type
269 */
270static const unsigned short netdev_lock_type[] =
271 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
272 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
273 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
274 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
275 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
276 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
277 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
278 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
279 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
280 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
281 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
282 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
211ed865
PG
283 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
284 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
285 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
723e98b7 286
36cbd3dc 287static const char *const netdev_lock_name[] =
723e98b7
JP
288 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
289 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
290 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
291 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
292 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
293 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
294 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
295 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
296 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
297 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
298 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
299 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
211ed865
PG
300 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
301 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
302 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
303
304static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 305static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
306
307static inline unsigned short netdev_lock_pos(unsigned short dev_type)
308{
309 int i;
310
311 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
312 if (netdev_lock_type[i] == dev_type)
313 return i;
314 /* the last key is used by default */
315 return ARRAY_SIZE(netdev_lock_type) - 1;
316}
317
cf508b12
DM
318static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
319 unsigned short dev_type)
723e98b7
JP
320{
321 int i;
322
323 i = netdev_lock_pos(dev_type);
324 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
325 netdev_lock_name[i]);
326}
cf508b12
DM
327
328static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
329{
330 int i;
331
332 i = netdev_lock_pos(dev->type);
333 lockdep_set_class_and_name(&dev->addr_list_lock,
334 &netdev_addr_lock_key[i],
335 netdev_lock_name[i]);
336}
723e98b7 337#else
cf508b12
DM
338static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
339 unsigned short dev_type)
340{
341}
342static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
343{
344}
345#endif
1da177e4
LT
346
347/*******************************************************************************
348
349 Protocol management and registration routines
350
351*******************************************************************************/
352
1da177e4
LT
353/*
354 * Add a protocol ID to the list. Now that the input handler is
355 * smarter we can dispense with all the messy stuff that used to be
356 * here.
357 *
358 * BEWARE!!! Protocol handlers, mangling input packets,
359 * MUST BE last in hash buckets and checking protocol handlers
360 * MUST start from promiscuous ptype_all chain in net_bh.
361 * It is true now, do not change it.
362 * Explanation follows: if protocol handler, mangling packet, will
363 * be the first on list, it is not able to sense, that packet
364 * is cloned and should be copied-on-write, so that it will
365 * change it and subsequent readers will get broken packet.
366 * --ANK (980803)
367 */
368
c07b68e8
ED
369static inline struct list_head *ptype_head(const struct packet_type *pt)
370{
371 if (pt->type == htons(ETH_P_ALL))
372 return &ptype_all;
373 else
374 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
375}
376
1da177e4
LT
377/**
378 * dev_add_pack - add packet handler
379 * @pt: packet type declaration
380 *
381 * Add a protocol handler to the networking stack. The passed &packet_type
382 * is linked into kernel lists and may not be freed until it has been
383 * removed from the kernel lists.
384 *
4ec93edb 385 * This call does not sleep therefore it can not
1da177e4
LT
386 * guarantee all CPU's that are in middle of receiving packets
387 * will see the new packet type (until the next received packet).
388 */
389
390void dev_add_pack(struct packet_type *pt)
391{
c07b68e8 392 struct list_head *head = ptype_head(pt);
1da177e4 393
c07b68e8
ED
394 spin_lock(&ptype_lock);
395 list_add_rcu(&pt->list, head);
396 spin_unlock(&ptype_lock);
1da177e4 397}
d1b19dff 398EXPORT_SYMBOL(dev_add_pack);
1da177e4 399
1da177e4
LT
400/**
401 * __dev_remove_pack - remove packet handler
402 * @pt: packet type declaration
403 *
404 * Remove a protocol handler that was previously added to the kernel
405 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
406 * from the kernel lists and can be freed or reused once this function
4ec93edb 407 * returns.
1da177e4
LT
408 *
409 * The packet type might still be in use by receivers
410 * and must not be freed until after all the CPU's have gone
411 * through a quiescent state.
412 */
413void __dev_remove_pack(struct packet_type *pt)
414{
c07b68e8 415 struct list_head *head = ptype_head(pt);
1da177e4
LT
416 struct packet_type *pt1;
417
c07b68e8 418 spin_lock(&ptype_lock);
1da177e4
LT
419
420 list_for_each_entry(pt1, head, list) {
421 if (pt == pt1) {
422 list_del_rcu(&pt->list);
423 goto out;
424 }
425 }
426
7b6cd1ce 427 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 428out:
c07b68e8 429 spin_unlock(&ptype_lock);
1da177e4 430}
d1b19dff
ED
431EXPORT_SYMBOL(__dev_remove_pack);
432
1da177e4
LT
433/**
434 * dev_remove_pack - remove packet handler
435 * @pt: packet type declaration
436 *
437 * Remove a protocol handler that was previously added to the kernel
438 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
439 * from the kernel lists and can be freed or reused once this function
440 * returns.
441 *
442 * This call sleeps to guarantee that no CPU is looking at the packet
443 * type after return.
444 */
445void dev_remove_pack(struct packet_type *pt)
446{
447 __dev_remove_pack(pt);
4ec93edb 448
1da177e4
LT
449 synchronize_net();
450}
d1b19dff 451EXPORT_SYMBOL(dev_remove_pack);
1da177e4 452
62532da9
VY
453
454/**
455 * dev_add_offload - register offload handlers
456 * @po: protocol offload declaration
457 *
458 * Add protocol offload handlers to the networking stack. The passed
459 * &proto_offload is linked into kernel lists and may not be freed until
460 * it has been removed from the kernel lists.
461 *
462 * This call does not sleep therefore it can not
463 * guarantee all CPU's that are in middle of receiving packets
464 * will see the new offload handlers (until the next received packet).
465 */
466void dev_add_offload(struct packet_offload *po)
467{
468 struct list_head *head = &offload_base;
469
470 spin_lock(&offload_lock);
471 list_add_rcu(&po->list, head);
472 spin_unlock(&offload_lock);
473}
474EXPORT_SYMBOL(dev_add_offload);
475
476/**
477 * __dev_remove_offload - remove offload handler
478 * @po: packet offload declaration
479 *
480 * Remove a protocol offload handler that was previously added to the
481 * kernel offload handlers by dev_add_offload(). The passed &offload_type
482 * is removed from the kernel lists and can be freed or reused once this
483 * function returns.
484 *
485 * The packet type might still be in use by receivers
486 * and must not be freed until after all the CPU's have gone
487 * through a quiescent state.
488 */
1d143d9f 489static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
490{
491 struct list_head *head = &offload_base;
492 struct packet_offload *po1;
493
c53aa505 494 spin_lock(&offload_lock);
62532da9
VY
495
496 list_for_each_entry(po1, head, list) {
497 if (po == po1) {
498 list_del_rcu(&po->list);
499 goto out;
500 }
501 }
502
503 pr_warn("dev_remove_offload: %p not found\n", po);
504out:
c53aa505 505 spin_unlock(&offload_lock);
62532da9 506}
62532da9
VY
507
508/**
509 * dev_remove_offload - remove packet offload handler
510 * @po: packet offload declaration
511 *
512 * Remove a packet offload handler that was previously added to the kernel
513 * offload handlers by dev_add_offload(). The passed &offload_type is
514 * removed from the kernel lists and can be freed or reused once this
515 * function returns.
516 *
517 * This call sleeps to guarantee that no CPU is looking at the packet
518 * type after return.
519 */
520void dev_remove_offload(struct packet_offload *po)
521{
522 __dev_remove_offload(po);
523
524 synchronize_net();
525}
526EXPORT_SYMBOL(dev_remove_offload);
527
1da177e4
LT
528/******************************************************************************
529
530 Device Boot-time Settings Routines
531
532*******************************************************************************/
533
534/* Boot time configuration table */
535static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
536
537/**
538 * netdev_boot_setup_add - add new setup entry
539 * @name: name of the device
540 * @map: configured settings for the device
541 *
542 * Adds new setup entry to the dev_boot_setup list. The function
543 * returns 0 on error and 1 on success. This is a generic routine to
544 * all netdevices.
545 */
546static int netdev_boot_setup_add(char *name, struct ifmap *map)
547{
548 struct netdev_boot_setup *s;
549 int i;
550
551 s = dev_boot_setup;
552 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
553 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
554 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 555 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
556 memcpy(&s[i].map, map, sizeof(s[i].map));
557 break;
558 }
559 }
560
561 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
562}
563
564/**
565 * netdev_boot_setup_check - check boot time settings
566 * @dev: the netdevice
567 *
568 * Check boot time settings for the device.
569 * The found settings are set for the device to be used
570 * later in the device probing.
571 * Returns 0 if no settings found, 1 if they are.
572 */
573int netdev_boot_setup_check(struct net_device *dev)
574{
575 struct netdev_boot_setup *s = dev_boot_setup;
576 int i;
577
578 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
579 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 580 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
581 dev->irq = s[i].map.irq;
582 dev->base_addr = s[i].map.base_addr;
583 dev->mem_start = s[i].map.mem_start;
584 dev->mem_end = s[i].map.mem_end;
585 return 1;
586 }
587 }
588 return 0;
589}
d1b19dff 590EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
591
592
593/**
594 * netdev_boot_base - get address from boot time settings
595 * @prefix: prefix for network device
596 * @unit: id for network device
597 *
598 * Check boot time settings for the base address of device.
599 * The found settings are set for the device to be used
600 * later in the device probing.
601 * Returns 0 if no settings found.
602 */
603unsigned long netdev_boot_base(const char *prefix, int unit)
604{
605 const struct netdev_boot_setup *s = dev_boot_setup;
606 char name[IFNAMSIZ];
607 int i;
608
609 sprintf(name, "%s%d", prefix, unit);
610
611 /*
612 * If device already registered then return base of 1
613 * to indicate not to probe for this interface
614 */
881d966b 615 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
616 return 1;
617
618 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
619 if (!strcmp(name, s[i].name))
620 return s[i].map.base_addr;
621 return 0;
622}
623
624/*
625 * Saves at boot time configured settings for any netdevice.
626 */
627int __init netdev_boot_setup(char *str)
628{
629 int ints[5];
630 struct ifmap map;
631
632 str = get_options(str, ARRAY_SIZE(ints), ints);
633 if (!str || !*str)
634 return 0;
635
636 /* Save settings */
637 memset(&map, 0, sizeof(map));
638 if (ints[0] > 0)
639 map.irq = ints[1];
640 if (ints[0] > 1)
641 map.base_addr = ints[2];
642 if (ints[0] > 2)
643 map.mem_start = ints[3];
644 if (ints[0] > 3)
645 map.mem_end = ints[4];
646
647 /* Add new entry to the list */
648 return netdev_boot_setup_add(str, &map);
649}
650
651__setup("netdev=", netdev_boot_setup);
652
653/*******************************************************************************
654
655 Device Interface Subroutines
656
657*******************************************************************************/
658
659/**
660 * __dev_get_by_name - find a device by its name
c4ea43c5 661 * @net: the applicable net namespace
1da177e4
LT
662 * @name: name to find
663 *
664 * Find an interface by name. Must be called under RTNL semaphore
665 * or @dev_base_lock. If the name is found a pointer to the device
666 * is returned. If the name is not found then %NULL is returned. The
667 * reference counters are not incremented so the caller must be
668 * careful with locks.
669 */
670
881d966b 671struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 672{
0bd8d536
ED
673 struct net_device *dev;
674 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 675
b67bfe0d 676 hlist_for_each_entry(dev, head, name_hlist)
1da177e4
LT
677 if (!strncmp(dev->name, name, IFNAMSIZ))
678 return dev;
0bd8d536 679
1da177e4
LT
680 return NULL;
681}
d1b19dff 682EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 683
72c9528b
ED
684/**
685 * dev_get_by_name_rcu - find a device by its name
686 * @net: the applicable net namespace
687 * @name: name to find
688 *
689 * Find an interface by name.
690 * If the name is found a pointer to the device is returned.
691 * If the name is not found then %NULL is returned.
692 * The reference counters are not incremented so the caller must be
693 * careful with locks. The caller must hold RCU lock.
694 */
695
696struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
697{
72c9528b
ED
698 struct net_device *dev;
699 struct hlist_head *head = dev_name_hash(net, name);
700
b67bfe0d 701 hlist_for_each_entry_rcu(dev, head, name_hlist)
72c9528b
ED
702 if (!strncmp(dev->name, name, IFNAMSIZ))
703 return dev;
704
705 return NULL;
706}
707EXPORT_SYMBOL(dev_get_by_name_rcu);
708
1da177e4
LT
709/**
710 * dev_get_by_name - find a device by its name
c4ea43c5 711 * @net: the applicable net namespace
1da177e4
LT
712 * @name: name to find
713 *
714 * Find an interface by name. This can be called from any
715 * context and does its own locking. The returned handle has
716 * the usage count incremented and the caller must use dev_put() to
717 * release it when it is no longer needed. %NULL is returned if no
718 * matching device is found.
719 */
720
881d966b 721struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
722{
723 struct net_device *dev;
724
72c9528b
ED
725 rcu_read_lock();
726 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
727 if (dev)
728 dev_hold(dev);
72c9528b 729 rcu_read_unlock();
1da177e4
LT
730 return dev;
731}
d1b19dff 732EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
733
734/**
735 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 736 * @net: the applicable net namespace
1da177e4
LT
737 * @ifindex: index of device
738 *
739 * Search for an interface by index. Returns %NULL if the device
740 * is not found or a pointer to the device. The device has not
741 * had its reference counter increased so the caller must be careful
742 * about locking. The caller must hold either the RTNL semaphore
743 * or @dev_base_lock.
744 */
745
881d966b 746struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 747{
0bd8d536
ED
748 struct net_device *dev;
749 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 750
b67bfe0d 751 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
752 if (dev->ifindex == ifindex)
753 return dev;
0bd8d536 754
1da177e4
LT
755 return NULL;
756}
d1b19dff 757EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 758
fb699dfd
ED
759/**
760 * dev_get_by_index_rcu - find a device by its ifindex
761 * @net: the applicable net namespace
762 * @ifindex: index of device
763 *
764 * Search for an interface by index. Returns %NULL if the device
765 * is not found or a pointer to the device. The device has not
766 * had its reference counter increased so the caller must be careful
767 * about locking. The caller must hold RCU lock.
768 */
769
770struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
771{
fb699dfd
ED
772 struct net_device *dev;
773 struct hlist_head *head = dev_index_hash(net, ifindex);
774
b67bfe0d 775 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
776 if (dev->ifindex == ifindex)
777 return dev;
778
779 return NULL;
780}
781EXPORT_SYMBOL(dev_get_by_index_rcu);
782
1da177e4
LT
783
784/**
785 * dev_get_by_index - find a device by its ifindex
c4ea43c5 786 * @net: the applicable net namespace
1da177e4
LT
787 * @ifindex: index of device
788 *
789 * Search for an interface by index. Returns NULL if the device
790 * is not found or a pointer to the device. The device returned has
791 * had a reference added and the pointer is safe until the user calls
792 * dev_put to indicate they have finished with it.
793 */
794
881d966b 795struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
796{
797 struct net_device *dev;
798
fb699dfd
ED
799 rcu_read_lock();
800 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
801 if (dev)
802 dev_hold(dev);
fb699dfd 803 rcu_read_unlock();
1da177e4
LT
804 return dev;
805}
d1b19dff 806EXPORT_SYMBOL(dev_get_by_index);
1da177e4 807
5dbe7c17
NS
808/**
809 * netdev_get_name - get a netdevice name, knowing its ifindex.
810 * @net: network namespace
811 * @name: a pointer to the buffer where the name will be stored.
812 * @ifindex: the ifindex of the interface to get the name from.
813 *
814 * The use of raw_seqcount_begin() and cond_resched() before
815 * retrying is required as we want to give the writers a chance
816 * to complete when CONFIG_PREEMPT is not set.
817 */
818int netdev_get_name(struct net *net, char *name, int ifindex)
819{
820 struct net_device *dev;
821 unsigned int seq;
822
823retry:
824 seq = raw_seqcount_begin(&devnet_rename_seq);
825 rcu_read_lock();
826 dev = dev_get_by_index_rcu(net, ifindex);
827 if (!dev) {
828 rcu_read_unlock();
829 return -ENODEV;
830 }
831
832 strcpy(name, dev->name);
833 rcu_read_unlock();
834 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
835 cond_resched();
836 goto retry;
837 }
838
839 return 0;
840}
841
1da177e4 842/**
941666c2 843 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 844 * @net: the applicable net namespace
1da177e4
LT
845 * @type: media type of device
846 * @ha: hardware address
847 *
848 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
849 * is not found or a pointer to the device.
850 * The caller must hold RCU or RTNL.
941666c2 851 * The returned device has not had its ref count increased
1da177e4
LT
852 * and the caller must therefore be careful about locking
853 *
1da177e4
LT
854 */
855
941666c2
ED
856struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
857 const char *ha)
1da177e4
LT
858{
859 struct net_device *dev;
860
941666c2 861 for_each_netdev_rcu(net, dev)
1da177e4
LT
862 if (dev->type == type &&
863 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
864 return dev;
865
866 return NULL;
1da177e4 867}
941666c2 868EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 869
881d966b 870struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
871{
872 struct net_device *dev;
873
4e9cac2b 874 ASSERT_RTNL();
881d966b 875 for_each_netdev(net, dev)
4e9cac2b 876 if (dev->type == type)
7562f876
PE
877 return dev;
878
879 return NULL;
4e9cac2b 880}
4e9cac2b
PM
881EXPORT_SYMBOL(__dev_getfirstbyhwtype);
882
881d966b 883struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 884{
99fe3c39 885 struct net_device *dev, *ret = NULL;
4e9cac2b 886
99fe3c39
ED
887 rcu_read_lock();
888 for_each_netdev_rcu(net, dev)
889 if (dev->type == type) {
890 dev_hold(dev);
891 ret = dev;
892 break;
893 }
894 rcu_read_unlock();
895 return ret;
1da177e4 896}
1da177e4
LT
897EXPORT_SYMBOL(dev_getfirstbyhwtype);
898
899/**
6c555490 900 * __dev_get_by_flags - find any device with given flags
c4ea43c5 901 * @net: the applicable net namespace
1da177e4
LT
902 * @if_flags: IFF_* values
903 * @mask: bitmask of bits in if_flags to check
904 *
905 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04 906 * is not found or a pointer to the device. Must be called inside
6c555490 907 * rtnl_lock(), and result refcount is unchanged.
1da177e4
LT
908 */
909
6c555490
WC
910struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
911 unsigned short mask)
1da177e4 912{
7562f876 913 struct net_device *dev, *ret;
1da177e4 914
6c555490
WC
915 ASSERT_RTNL();
916
7562f876 917 ret = NULL;
6c555490 918 for_each_netdev(net, dev) {
1da177e4 919 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 920 ret = dev;
1da177e4
LT
921 break;
922 }
923 }
7562f876 924 return ret;
1da177e4 925}
6c555490 926EXPORT_SYMBOL(__dev_get_by_flags);
1da177e4
LT
927
928/**
929 * dev_valid_name - check if name is okay for network device
930 * @name: name string
931 *
932 * Network device names need to be valid file names to
c7fa9d18
DM
933 * to allow sysfs to work. We also disallow any kind of
934 * whitespace.
1da177e4 935 */
95f050bf 936bool dev_valid_name(const char *name)
1da177e4 937{
c7fa9d18 938 if (*name == '\0')
95f050bf 939 return false;
b6fe17d6 940 if (strlen(name) >= IFNAMSIZ)
95f050bf 941 return false;
c7fa9d18 942 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 943 return false;
c7fa9d18
DM
944
945 while (*name) {
946 if (*name == '/' || isspace(*name))
95f050bf 947 return false;
c7fa9d18
DM
948 name++;
949 }
95f050bf 950 return true;
1da177e4 951}
d1b19dff 952EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
953
954/**
b267b179
EB
955 * __dev_alloc_name - allocate a name for a device
956 * @net: network namespace to allocate the device name in
1da177e4 957 * @name: name format string
b267b179 958 * @buf: scratch buffer and result name string
1da177e4
LT
959 *
960 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
961 * id. It scans list of devices to build up a free map, then chooses
962 * the first empty slot. The caller must hold the dev_base or rtnl lock
963 * while allocating the name and adding the device in order to avoid
964 * duplicates.
965 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
966 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
967 */
968
b267b179 969static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
970{
971 int i = 0;
1da177e4
LT
972 const char *p;
973 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 974 unsigned long *inuse;
1da177e4
LT
975 struct net_device *d;
976
977 p = strnchr(name, IFNAMSIZ-1, '%');
978 if (p) {
979 /*
980 * Verify the string as this thing may have come from
981 * the user. There must be either one "%d" and no other "%"
982 * characters.
983 */
984 if (p[1] != 'd' || strchr(p + 2, '%'))
985 return -EINVAL;
986
987 /* Use one page as a bit array of possible slots */
cfcabdcc 988 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
989 if (!inuse)
990 return -ENOMEM;
991
881d966b 992 for_each_netdev(net, d) {
1da177e4
LT
993 if (!sscanf(d->name, name, &i))
994 continue;
995 if (i < 0 || i >= max_netdevices)
996 continue;
997
998 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 999 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
1000 if (!strncmp(buf, d->name, IFNAMSIZ))
1001 set_bit(i, inuse);
1002 }
1003
1004 i = find_first_zero_bit(inuse, max_netdevices);
1005 free_page((unsigned long) inuse);
1006 }
1007
d9031024
OP
1008 if (buf != name)
1009 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1010 if (!__dev_get_by_name(net, buf))
1da177e4 1011 return i;
1da177e4
LT
1012
1013 /* It is possible to run out of possible slots
1014 * when the name is long and there isn't enough space left
1015 * for the digits, or if all bits are used.
1016 */
1017 return -ENFILE;
1018}
1019
b267b179
EB
1020/**
1021 * dev_alloc_name - allocate a name for a device
1022 * @dev: device
1023 * @name: name format string
1024 *
1025 * Passed a format string - eg "lt%d" it will try and find a suitable
1026 * id. It scans list of devices to build up a free map, then chooses
1027 * the first empty slot. The caller must hold the dev_base or rtnl lock
1028 * while allocating the name and adding the device in order to avoid
1029 * duplicates.
1030 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1031 * Returns the number of the unit assigned or a negative errno code.
1032 */
1033
1034int dev_alloc_name(struct net_device *dev, const char *name)
1035{
1036 char buf[IFNAMSIZ];
1037 struct net *net;
1038 int ret;
1039
c346dca1
YH
1040 BUG_ON(!dev_net(dev));
1041 net = dev_net(dev);
b267b179
EB
1042 ret = __dev_alloc_name(net, name, buf);
1043 if (ret >= 0)
1044 strlcpy(dev->name, buf, IFNAMSIZ);
1045 return ret;
1046}
d1b19dff 1047EXPORT_SYMBOL(dev_alloc_name);
b267b179 1048
828de4f6
G
1049static int dev_alloc_name_ns(struct net *net,
1050 struct net_device *dev,
1051 const char *name)
d9031024 1052{
828de4f6
G
1053 char buf[IFNAMSIZ];
1054 int ret;
8ce6cebc 1055
828de4f6
G
1056 ret = __dev_alloc_name(net, name, buf);
1057 if (ret >= 0)
1058 strlcpy(dev->name, buf, IFNAMSIZ);
1059 return ret;
1060}
1061
1062static int dev_get_valid_name(struct net *net,
1063 struct net_device *dev,
1064 const char *name)
1065{
1066 BUG_ON(!net);
8ce6cebc 1067
d9031024
OP
1068 if (!dev_valid_name(name))
1069 return -EINVAL;
1070
1c5cae81 1071 if (strchr(name, '%'))
828de4f6 1072 return dev_alloc_name_ns(net, dev, name);
d9031024
OP
1073 else if (__dev_get_by_name(net, name))
1074 return -EEXIST;
8ce6cebc
DL
1075 else if (dev->name != name)
1076 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
1077
1078 return 0;
1079}
1da177e4
LT
1080
1081/**
1082 * dev_change_name - change name of a device
1083 * @dev: device
1084 * @newname: name (or format string) must be at least IFNAMSIZ
1085 *
1086 * Change name of a device, can pass format strings "eth%d".
1087 * for wildcarding.
1088 */
cf04a4c7 1089int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1090{
238fa362 1091 unsigned char old_assign_type;
fcc5a03a 1092 char oldname[IFNAMSIZ];
1da177e4 1093 int err = 0;
fcc5a03a 1094 int ret;
881d966b 1095 struct net *net;
1da177e4
LT
1096
1097 ASSERT_RTNL();
c346dca1 1098 BUG_ON(!dev_net(dev));
1da177e4 1099
c346dca1 1100 net = dev_net(dev);
1da177e4
LT
1101 if (dev->flags & IFF_UP)
1102 return -EBUSY;
1103
30e6c9fa 1104 write_seqcount_begin(&devnet_rename_seq);
c91f6df2
BH
1105
1106 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
30e6c9fa 1107 write_seqcount_end(&devnet_rename_seq);
c8d90dca 1108 return 0;
c91f6df2 1109 }
c8d90dca 1110
fcc5a03a
HX
1111 memcpy(oldname, dev->name, IFNAMSIZ);
1112
828de4f6 1113 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1114 if (err < 0) {
30e6c9fa 1115 write_seqcount_end(&devnet_rename_seq);
d9031024 1116 return err;
c91f6df2 1117 }
1da177e4 1118
6fe82a39
VF
1119 if (oldname[0] && !strchr(oldname, '%'))
1120 netdev_info(dev, "renamed from %s\n", oldname);
1121
238fa362
TG
1122 old_assign_type = dev->name_assign_type;
1123 dev->name_assign_type = NET_NAME_RENAMED;
1124
fcc5a03a 1125rollback:
a1b3f594
EB
1126 ret = device_rename(&dev->dev, dev->name);
1127 if (ret) {
1128 memcpy(dev->name, oldname, IFNAMSIZ);
238fa362 1129 dev->name_assign_type = old_assign_type;
30e6c9fa 1130 write_seqcount_end(&devnet_rename_seq);
a1b3f594 1131 return ret;
dcc99773 1132 }
7f988eab 1133
30e6c9fa 1134 write_seqcount_end(&devnet_rename_seq);
c91f6df2 1135
5bb025fa
VF
1136 netdev_adjacent_rename_links(dev, oldname);
1137
7f988eab 1138 write_lock_bh(&dev_base_lock);
372b2312 1139 hlist_del_rcu(&dev->name_hlist);
72c9528b
ED
1140 write_unlock_bh(&dev_base_lock);
1141
1142 synchronize_rcu();
1143
1144 write_lock_bh(&dev_base_lock);
1145 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1146 write_unlock_bh(&dev_base_lock);
1147
056925ab 1148 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1149 ret = notifier_to_errno(ret);
1150
1151 if (ret) {
91e9c07b
ED
1152 /* err >= 0 after dev_alloc_name() or stores the first errno */
1153 if (err >= 0) {
fcc5a03a 1154 err = ret;
30e6c9fa 1155 write_seqcount_begin(&devnet_rename_seq);
fcc5a03a 1156 memcpy(dev->name, oldname, IFNAMSIZ);
5bb025fa 1157 memcpy(oldname, newname, IFNAMSIZ);
238fa362
TG
1158 dev->name_assign_type = old_assign_type;
1159 old_assign_type = NET_NAME_RENAMED;
fcc5a03a 1160 goto rollback;
91e9c07b 1161 } else {
7b6cd1ce 1162 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1163 dev->name, ret);
fcc5a03a
HX
1164 }
1165 }
1da177e4
LT
1166
1167 return err;
1168}
1169
0b815a1a
SH
1170/**
1171 * dev_set_alias - change ifalias of a device
1172 * @dev: device
1173 * @alias: name up to IFALIASZ
f0db275a 1174 * @len: limit of bytes to copy from info
0b815a1a
SH
1175 *
1176 * Set ifalias for a device,
1177 */
1178int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1179{
7364e445
AK
1180 char *new_ifalias;
1181
0b815a1a
SH
1182 ASSERT_RTNL();
1183
1184 if (len >= IFALIASZ)
1185 return -EINVAL;
1186
96ca4a2c 1187 if (!len) {
388dfc2d
SK
1188 kfree(dev->ifalias);
1189 dev->ifalias = NULL;
96ca4a2c
OH
1190 return 0;
1191 }
1192
7364e445
AK
1193 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1194 if (!new_ifalias)
0b815a1a 1195 return -ENOMEM;
7364e445 1196 dev->ifalias = new_ifalias;
0b815a1a
SH
1197
1198 strlcpy(dev->ifalias, alias, len+1);
1199 return len;
1200}
1201
1202
d8a33ac4 1203/**
3041a069 1204 * netdev_features_change - device changes features
d8a33ac4
SH
1205 * @dev: device to cause notification
1206 *
1207 * Called to indicate a device has changed features.
1208 */
1209void netdev_features_change(struct net_device *dev)
1210{
056925ab 1211 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1212}
1213EXPORT_SYMBOL(netdev_features_change);
1214
1da177e4
LT
1215/**
1216 * netdev_state_change - device changes state
1217 * @dev: device to cause notification
1218 *
1219 * Called to indicate a device has changed state. This function calls
1220 * the notifier chains for netdev_chain and sends a NEWLINK message
1221 * to the routing socket.
1222 */
1223void netdev_state_change(struct net_device *dev)
1224{
1225 if (dev->flags & IFF_UP) {
54951194
LP
1226 struct netdev_notifier_change_info change_info;
1227
1228 change_info.flags_changed = 0;
1229 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1230 &change_info.info);
7f294054 1231 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1232 }
1233}
d1b19dff 1234EXPORT_SYMBOL(netdev_state_change);
1da177e4 1235
ee89bab1
AW
1236/**
1237 * netdev_notify_peers - notify network peers about existence of @dev
1238 * @dev: network device
1239 *
1240 * Generate traffic such that interested network peers are aware of
1241 * @dev, such as by generating a gratuitous ARP. This may be used when
1242 * a device wants to inform the rest of the network about some sort of
1243 * reconfiguration such as a failover event or virtual machine
1244 * migration.
1245 */
1246void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1247{
ee89bab1
AW
1248 rtnl_lock();
1249 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1250 rtnl_unlock();
c1da4ac7 1251}
ee89bab1 1252EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1253
bd380811 1254static int __dev_open(struct net_device *dev)
1da177e4 1255{
d314774c 1256 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1257 int ret;
1da177e4 1258
e46b66bc
BH
1259 ASSERT_RTNL();
1260
1da177e4
LT
1261 if (!netif_device_present(dev))
1262 return -ENODEV;
1263
ca99ca14
NH
1264 /* Block netpoll from trying to do any rx path servicing.
1265 * If we don't do this there is a chance ndo_poll_controller
1266 * or ndo_poll may be running while we open the device
1267 */
66b5552f 1268 netpoll_poll_disable(dev);
ca99ca14 1269
3b8bcfd5
JB
1270 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1271 ret = notifier_to_errno(ret);
1272 if (ret)
1273 return ret;
1274
1da177e4 1275 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1276
d314774c
SH
1277 if (ops->ndo_validate_addr)
1278 ret = ops->ndo_validate_addr(dev);
bada339b 1279
d314774c
SH
1280 if (!ret && ops->ndo_open)
1281 ret = ops->ndo_open(dev);
1da177e4 1282
66b5552f 1283 netpoll_poll_enable(dev);
ca99ca14 1284
bada339b
JG
1285 if (ret)
1286 clear_bit(__LINK_STATE_START, &dev->state);
1287 else {
1da177e4 1288 dev->flags |= IFF_UP;
4417da66 1289 dev_set_rx_mode(dev);
1da177e4 1290 dev_activate(dev);
7bf23575 1291 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1292 }
bada339b 1293
1da177e4
LT
1294 return ret;
1295}
1296
1297/**
bd380811
PM
1298 * dev_open - prepare an interface for use.
1299 * @dev: device to open
1da177e4 1300 *
bd380811
PM
1301 * Takes a device from down to up state. The device's private open
1302 * function is invoked and then the multicast lists are loaded. Finally
1303 * the device is moved into the up state and a %NETDEV_UP message is
1304 * sent to the netdev notifier chain.
1305 *
1306 * Calling this function on an active interface is a nop. On a failure
1307 * a negative errno code is returned.
1da177e4 1308 */
bd380811
PM
1309int dev_open(struct net_device *dev)
1310{
1311 int ret;
1312
bd380811
PM
1313 if (dev->flags & IFF_UP)
1314 return 0;
1315
bd380811
PM
1316 ret = __dev_open(dev);
1317 if (ret < 0)
1318 return ret;
1319
7f294054 1320 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1321 call_netdevice_notifiers(NETDEV_UP, dev);
1322
1323 return ret;
1324}
1325EXPORT_SYMBOL(dev_open);
1326
44345724 1327static int __dev_close_many(struct list_head *head)
1da177e4 1328{
44345724 1329 struct net_device *dev;
e46b66bc 1330
bd380811 1331 ASSERT_RTNL();
9d5010db
DM
1332 might_sleep();
1333
5cde2829 1334 list_for_each_entry(dev, head, close_list) {
3f4df206 1335 /* Temporarily disable netpoll until the interface is down */
66b5552f 1336 netpoll_poll_disable(dev);
3f4df206 1337
44345724 1338 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1339
44345724 1340 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1341
44345724
OP
1342 /* Synchronize to scheduled poll. We cannot touch poll list, it
1343 * can be even on different cpu. So just clear netif_running().
1344 *
1345 * dev->stop() will invoke napi_disable() on all of it's
1346 * napi_struct instances on this device.
1347 */
4e857c58 1348 smp_mb__after_atomic(); /* Commit netif_running(). */
44345724 1349 }
1da177e4 1350
44345724 1351 dev_deactivate_many(head);
d8b2a4d2 1352
5cde2829 1353 list_for_each_entry(dev, head, close_list) {
44345724 1354 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1355
44345724
OP
1356 /*
1357 * Call the device specific close. This cannot fail.
1358 * Only if device is UP
1359 *
1360 * We allow it to be called even after a DETACH hot-plug
1361 * event.
1362 */
1363 if (ops->ndo_stop)
1364 ops->ndo_stop(dev);
1365
44345724 1366 dev->flags &= ~IFF_UP;
66b5552f 1367 netpoll_poll_enable(dev);
44345724
OP
1368 }
1369
1370 return 0;
1371}
1372
1373static int __dev_close(struct net_device *dev)
1374{
f87e6f47 1375 int retval;
44345724
OP
1376 LIST_HEAD(single);
1377
5cde2829 1378 list_add(&dev->close_list, &single);
f87e6f47
LT
1379 retval = __dev_close_many(&single);
1380 list_del(&single);
ca99ca14 1381
f87e6f47 1382 return retval;
44345724
OP
1383}
1384
3fbd8758 1385static int dev_close_many(struct list_head *head)
44345724
OP
1386{
1387 struct net_device *dev, *tmp;
1da177e4 1388
5cde2829
EB
1389 /* Remove the devices that don't need to be closed */
1390 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1391 if (!(dev->flags & IFF_UP))
5cde2829 1392 list_del_init(&dev->close_list);
44345724
OP
1393
1394 __dev_close_many(head);
1da177e4 1395
5cde2829 1396 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1397 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1398 call_netdevice_notifiers(NETDEV_DOWN, dev);
5cde2829 1399 list_del_init(&dev->close_list);
44345724 1400 }
bd380811
PM
1401
1402 return 0;
1403}
1404
1405/**
1406 * dev_close - shutdown an interface.
1407 * @dev: device to shutdown
1408 *
1409 * This function moves an active device into down state. A
1410 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1411 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1412 * chain.
1413 */
1414int dev_close(struct net_device *dev)
1415{
e14a5993
ED
1416 if (dev->flags & IFF_UP) {
1417 LIST_HEAD(single);
1da177e4 1418
5cde2829 1419 list_add(&dev->close_list, &single);
e14a5993
ED
1420 dev_close_many(&single);
1421 list_del(&single);
1422 }
da6e378b 1423 return 0;
1da177e4 1424}
d1b19dff 1425EXPORT_SYMBOL(dev_close);
1da177e4
LT
1426
1427
0187bdfb
BH
1428/**
1429 * dev_disable_lro - disable Large Receive Offload on a device
1430 * @dev: device
1431 *
1432 * Disable Large Receive Offload (LRO) on a net device. Must be
1433 * called under RTNL. This is needed if received packets may be
1434 * forwarded to another interface.
1435 */
1436void dev_disable_lro(struct net_device *dev)
1437{
f11970e3
NH
1438 /*
1439 * If we're trying to disable lro on a vlan device
1440 * use the underlying physical device instead
1441 */
1442 if (is_vlan_dev(dev))
1443 dev = vlan_dev_real_dev(dev);
1444
529d0489
MK
1445 /* the same for macvlan devices */
1446 if (netif_is_macvlan(dev))
1447 dev = macvlan_dev_real_dev(dev);
1448
bc5787c6
MM
1449 dev->wanted_features &= ~NETIF_F_LRO;
1450 netdev_update_features(dev);
27660515 1451
22d5969f
MM
1452 if (unlikely(dev->features & NETIF_F_LRO))
1453 netdev_WARN(dev, "failed to disable LRO!\n");
0187bdfb
BH
1454}
1455EXPORT_SYMBOL(dev_disable_lro);
1456
351638e7
JP
1457static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1458 struct net_device *dev)
1459{
1460 struct netdev_notifier_info info;
1461
1462 netdev_notifier_info_init(&info, dev);
1463 return nb->notifier_call(nb, val, &info);
1464}
0187bdfb 1465
881d966b
EB
1466static int dev_boot_phase = 1;
1467
1da177e4
LT
1468/**
1469 * register_netdevice_notifier - register a network notifier block
1470 * @nb: notifier
1471 *
1472 * Register a notifier to be called when network device events occur.
1473 * The notifier passed is linked into the kernel structures and must
1474 * not be reused until it has been unregistered. A negative errno code
1475 * is returned on a failure.
1476 *
1477 * When registered all registration and up events are replayed
4ec93edb 1478 * to the new notifier to allow device to have a race free
1da177e4
LT
1479 * view of the network device list.
1480 */
1481
1482int register_netdevice_notifier(struct notifier_block *nb)
1483{
1484 struct net_device *dev;
fcc5a03a 1485 struct net_device *last;
881d966b 1486 struct net *net;
1da177e4
LT
1487 int err;
1488
1489 rtnl_lock();
f07d5b94 1490 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1491 if (err)
1492 goto unlock;
881d966b
EB
1493 if (dev_boot_phase)
1494 goto unlock;
1495 for_each_net(net) {
1496 for_each_netdev(net, dev) {
351638e7 1497 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
881d966b
EB
1498 err = notifier_to_errno(err);
1499 if (err)
1500 goto rollback;
1501
1502 if (!(dev->flags & IFF_UP))
1503 continue;
1da177e4 1504
351638e7 1505 call_netdevice_notifier(nb, NETDEV_UP, dev);
881d966b 1506 }
1da177e4 1507 }
fcc5a03a
HX
1508
1509unlock:
1da177e4
LT
1510 rtnl_unlock();
1511 return err;
fcc5a03a
HX
1512
1513rollback:
1514 last = dev;
881d966b
EB
1515 for_each_net(net) {
1516 for_each_netdev(net, dev) {
1517 if (dev == last)
8f891489 1518 goto outroll;
fcc5a03a 1519
881d966b 1520 if (dev->flags & IFF_UP) {
351638e7
JP
1521 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1522 dev);
1523 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
881d966b 1524 }
351638e7 1525 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1526 }
fcc5a03a 1527 }
c67625a1 1528
8f891489 1529outroll:
c67625a1 1530 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1531 goto unlock;
1da177e4 1532}
d1b19dff 1533EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1534
1535/**
1536 * unregister_netdevice_notifier - unregister a network notifier block
1537 * @nb: notifier
1538 *
1539 * Unregister a notifier previously registered by
1540 * register_netdevice_notifier(). The notifier is unlinked into the
1541 * kernel structures and may then be reused. A negative errno code
1542 * is returned on a failure.
7d3d43da
EB
1543 *
1544 * After unregistering unregister and down device events are synthesized
1545 * for all devices on the device list to the removed notifier to remove
1546 * the need for special case cleanup code.
1da177e4
LT
1547 */
1548
1549int unregister_netdevice_notifier(struct notifier_block *nb)
1550{
7d3d43da
EB
1551 struct net_device *dev;
1552 struct net *net;
9f514950
HX
1553 int err;
1554
1555 rtnl_lock();
f07d5b94 1556 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1557 if (err)
1558 goto unlock;
1559
1560 for_each_net(net) {
1561 for_each_netdev(net, dev) {
1562 if (dev->flags & IFF_UP) {
351638e7
JP
1563 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1564 dev);
1565 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
7d3d43da 1566 }
351638e7 1567 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
7d3d43da
EB
1568 }
1569 }
1570unlock:
9f514950
HX
1571 rtnl_unlock();
1572 return err;
1da177e4 1573}
d1b19dff 1574EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1575
351638e7
JP
1576/**
1577 * call_netdevice_notifiers_info - call all network notifier blocks
1578 * @val: value passed unmodified to notifier function
1579 * @dev: net_device pointer passed unmodified to notifier function
1580 * @info: notifier information data
1581 *
1582 * Call all network notifier blocks. Parameters and return value
1583 * are as for raw_notifier_call_chain().
1584 */
1585
1d143d9f 1586static int call_netdevice_notifiers_info(unsigned long val,
1587 struct net_device *dev,
1588 struct netdev_notifier_info *info)
351638e7
JP
1589{
1590 ASSERT_RTNL();
1591 netdev_notifier_info_init(info, dev);
1592 return raw_notifier_call_chain(&netdev_chain, val, info);
1593}
351638e7 1594
1da177e4
LT
1595/**
1596 * call_netdevice_notifiers - call all network notifier blocks
1597 * @val: value passed unmodified to notifier function
c4ea43c5 1598 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1599 *
1600 * Call all network notifier blocks. Parameters and return value
f07d5b94 1601 * are as for raw_notifier_call_chain().
1da177e4
LT
1602 */
1603
ad7379d4 1604int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1605{
351638e7
JP
1606 struct netdev_notifier_info info;
1607
1608 return call_netdevice_notifiers_info(val, dev, &info);
1da177e4 1609}
edf947f1 1610EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1611
c5905afb 1612static struct static_key netstamp_needed __read_mostly;
b90e5794 1613#ifdef HAVE_JUMP_LABEL
c5905afb 1614/* We are not allowed to call static_key_slow_dec() from irq context
b90e5794 1615 * If net_disable_timestamp() is called from irq context, defer the
c5905afb 1616 * static_key_slow_dec() calls.
b90e5794
ED
1617 */
1618static atomic_t netstamp_needed_deferred;
1619#endif
1da177e4
LT
1620
1621void net_enable_timestamp(void)
1622{
b90e5794
ED
1623#ifdef HAVE_JUMP_LABEL
1624 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1625
1626 if (deferred) {
1627 while (--deferred)
c5905afb 1628 static_key_slow_dec(&netstamp_needed);
b90e5794
ED
1629 return;
1630 }
1631#endif
c5905afb 1632 static_key_slow_inc(&netstamp_needed);
1da177e4 1633}
d1b19dff 1634EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1635
1636void net_disable_timestamp(void)
1637{
b90e5794
ED
1638#ifdef HAVE_JUMP_LABEL
1639 if (in_interrupt()) {
1640 atomic_inc(&netstamp_needed_deferred);
1641 return;
1642 }
1643#endif
c5905afb 1644 static_key_slow_dec(&netstamp_needed);
1da177e4 1645}
d1b19dff 1646EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1647
3b098e2d 1648static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 1649{
588f0330 1650 skb->tstamp.tv64 = 0;
c5905afb 1651 if (static_key_false(&netstamp_needed))
a61bbcf2 1652 __net_timestamp(skb);
1da177e4
LT
1653}
1654
588f0330 1655#define net_timestamp_check(COND, SKB) \
c5905afb 1656 if (static_key_false(&netstamp_needed)) { \
588f0330
ED
1657 if ((COND) && !(SKB)->tstamp.tv64) \
1658 __net_timestamp(SKB); \
1659 } \
3b098e2d 1660
1ee481fb 1661bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
79b569f0
DL
1662{
1663 unsigned int len;
1664
1665 if (!(dev->flags & IFF_UP))
1666 return false;
1667
1668 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1669 if (skb->len <= len)
1670 return true;
1671
1672 /* if TSO is enabled, we don't care about the length as the packet
1673 * could be forwarded without being segmented before
1674 */
1675 if (skb_is_gso(skb))
1676 return true;
1677
1678 return false;
1679}
1ee481fb 1680EXPORT_SYMBOL_GPL(is_skb_forwardable);
79b569f0 1681
a0265d28
HX
1682int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1683{
1684 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1685 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1686 atomic_long_inc(&dev->rx_dropped);
1687 kfree_skb(skb);
1688 return NET_RX_DROP;
1689 }
1690 }
1691
1692 if (unlikely(!is_skb_forwardable(dev, skb))) {
1693 atomic_long_inc(&dev->rx_dropped);
1694 kfree_skb(skb);
1695 return NET_RX_DROP;
1696 }
1697
1698 skb_scrub_packet(skb, true);
1699 skb->protocol = eth_type_trans(skb, dev);
1700
1701 return 0;
1702}
1703EXPORT_SYMBOL_GPL(__dev_forward_skb);
1704
44540960
AB
1705/**
1706 * dev_forward_skb - loopback an skb to another netif
1707 *
1708 * @dev: destination network device
1709 * @skb: buffer to forward
1710 *
1711 * return values:
1712 * NET_RX_SUCCESS (no congestion)
6ec82562 1713 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1714 *
1715 * dev_forward_skb can be used for injecting an skb from the
1716 * start_xmit function of one device into the receive queue
1717 * of another device.
1718 *
1719 * The receiving device may be in another namespace, so
1720 * we have to clear all information in the skb that could
1721 * impact namespace isolation.
1722 */
1723int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1724{
a0265d28 1725 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
44540960
AB
1726}
1727EXPORT_SYMBOL_GPL(dev_forward_skb);
1728
71d9dec2
CG
1729static inline int deliver_skb(struct sk_buff *skb,
1730 struct packet_type *pt_prev,
1731 struct net_device *orig_dev)
1732{
1080e512
MT
1733 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1734 return -ENOMEM;
71d9dec2
CG
1735 atomic_inc(&skb->users);
1736 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1737}
1738
c0de08d0
EL
1739static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1740{
a3d744e9 1741 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
1742 return false;
1743
1744 if (ptype->id_match)
1745 return ptype->id_match(ptype, skb->sk);
1746 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1747 return true;
1748
1749 return false;
1750}
1751
1da177e4
LT
1752/*
1753 * Support routine. Sends outgoing frames to any network
1754 * taps currently in use.
1755 */
1756
f6a78bfc 1757static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1758{
1759 struct packet_type *ptype;
71d9dec2
CG
1760 struct sk_buff *skb2 = NULL;
1761 struct packet_type *pt_prev = NULL;
a61bbcf2 1762
1da177e4
LT
1763 rcu_read_lock();
1764 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1765 /* Never send packets back to the socket
1766 * they originated from - MvS (miquels@drinkel.ow.org)
1767 */
1768 if ((ptype->dev == dev || !ptype->dev) &&
c0de08d0 1769 (!skb_loop_sk(ptype, skb))) {
71d9dec2
CG
1770 if (pt_prev) {
1771 deliver_skb(skb2, pt_prev, skb->dev);
1772 pt_prev = ptype;
1773 continue;
1774 }
1775
1776 skb2 = skb_clone(skb, GFP_ATOMIC);
1da177e4
LT
1777 if (!skb2)
1778 break;
1779
70978182
ED
1780 net_timestamp_set(skb2);
1781
1da177e4
LT
1782 /* skb->nh should be correctly
1783 set by sender, so that the second statement is
1784 just protection against buggy protocols.
1785 */
459a98ed 1786 skb_reset_mac_header(skb2);
1da177e4 1787
d56f90a7 1788 if (skb_network_header(skb2) < skb2->data ||
ced14f68 1789 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
e87cc472
JP
1790 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1791 ntohs(skb2->protocol),
1792 dev->name);
c1d2bbe1 1793 skb_reset_network_header(skb2);
1da177e4
LT
1794 }
1795
b0e380b1 1796 skb2->transport_header = skb2->network_header;
1da177e4 1797 skb2->pkt_type = PACKET_OUTGOING;
71d9dec2 1798 pt_prev = ptype;
1da177e4
LT
1799 }
1800 }
71d9dec2
CG
1801 if (pt_prev)
1802 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1da177e4
LT
1803 rcu_read_unlock();
1804}
1805
2c53040f
BH
1806/**
1807 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
1808 * @dev: Network device
1809 * @txq: number of queues available
1810 *
1811 * If real_num_tx_queues is changed the tc mappings may no longer be
1812 * valid. To resolve this verify the tc mapping remains valid and if
1813 * not NULL the mapping. With no priorities mapping to this
1814 * offset/count pair it will no longer be used. In the worst case TC0
1815 * is invalid nothing can be done so disable priority mappings. If is
1816 * expected that drivers will fix this mapping if they can before
1817 * calling netif_set_real_num_tx_queues.
1818 */
bb134d22 1819static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
1820{
1821 int i;
1822 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1823
1824 /* If TC0 is invalidated disable TC mapping */
1825 if (tc->offset + tc->count > txq) {
7b6cd1ce 1826 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
1827 dev->num_tc = 0;
1828 return;
1829 }
1830
1831 /* Invalidated prio to tc mappings set to TC0 */
1832 for (i = 1; i < TC_BITMASK + 1; i++) {
1833 int q = netdev_get_prio_tc_map(dev, i);
1834
1835 tc = &dev->tc_to_txq[q];
1836 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
1837 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1838 i, q);
4f57c087
JF
1839 netdev_set_prio_tc_map(dev, i, 0);
1840 }
1841 }
1842}
1843
537c00de
AD
1844#ifdef CONFIG_XPS
1845static DEFINE_MUTEX(xps_map_mutex);
1846#define xmap_dereference(P) \
1847 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1848
10cdc3f3
AD
1849static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1850 int cpu, u16 index)
537c00de 1851{
10cdc3f3
AD
1852 struct xps_map *map = NULL;
1853 int pos;
537c00de 1854
10cdc3f3
AD
1855 if (dev_maps)
1856 map = xmap_dereference(dev_maps->cpu_map[cpu]);
537c00de 1857
10cdc3f3
AD
1858 for (pos = 0; map && pos < map->len; pos++) {
1859 if (map->queues[pos] == index) {
537c00de
AD
1860 if (map->len > 1) {
1861 map->queues[pos] = map->queues[--map->len];
1862 } else {
10cdc3f3 1863 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
537c00de
AD
1864 kfree_rcu(map, rcu);
1865 map = NULL;
1866 }
10cdc3f3 1867 break;
537c00de 1868 }
537c00de
AD
1869 }
1870
10cdc3f3
AD
1871 return map;
1872}
1873
024e9679 1874static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
10cdc3f3
AD
1875{
1876 struct xps_dev_maps *dev_maps;
024e9679 1877 int cpu, i;
10cdc3f3
AD
1878 bool active = false;
1879
1880 mutex_lock(&xps_map_mutex);
1881 dev_maps = xmap_dereference(dev->xps_maps);
1882
1883 if (!dev_maps)
1884 goto out_no_maps;
1885
1886 for_each_possible_cpu(cpu) {
024e9679
AD
1887 for (i = index; i < dev->num_tx_queues; i++) {
1888 if (!remove_xps_queue(dev_maps, cpu, i))
1889 break;
1890 }
1891 if (i == dev->num_tx_queues)
10cdc3f3
AD
1892 active = true;
1893 }
1894
1895 if (!active) {
537c00de
AD
1896 RCU_INIT_POINTER(dev->xps_maps, NULL);
1897 kfree_rcu(dev_maps, rcu);
1898 }
1899
024e9679
AD
1900 for (i = index; i < dev->num_tx_queues; i++)
1901 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1902 NUMA_NO_NODE);
1903
537c00de
AD
1904out_no_maps:
1905 mutex_unlock(&xps_map_mutex);
1906}
1907
01c5f864
AD
1908static struct xps_map *expand_xps_map(struct xps_map *map,
1909 int cpu, u16 index)
1910{
1911 struct xps_map *new_map;
1912 int alloc_len = XPS_MIN_MAP_ALLOC;
1913 int i, pos;
1914
1915 for (pos = 0; map && pos < map->len; pos++) {
1916 if (map->queues[pos] != index)
1917 continue;
1918 return map;
1919 }
1920
1921 /* Need to add queue to this CPU's existing map */
1922 if (map) {
1923 if (pos < map->alloc_len)
1924 return map;
1925
1926 alloc_len = map->alloc_len * 2;
1927 }
1928
1929 /* Need to allocate new map to store queue on this CPU's map */
1930 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1931 cpu_to_node(cpu));
1932 if (!new_map)
1933 return NULL;
1934
1935 for (i = 0; i < pos; i++)
1936 new_map->queues[i] = map->queues[i];
1937 new_map->alloc_len = alloc_len;
1938 new_map->len = pos;
1939
1940 return new_map;
1941}
1942
3573540c
MT
1943int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1944 u16 index)
537c00de 1945{
01c5f864 1946 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
537c00de 1947 struct xps_map *map, *new_map;
537c00de 1948 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
01c5f864
AD
1949 int cpu, numa_node_id = -2;
1950 bool active = false;
537c00de
AD
1951
1952 mutex_lock(&xps_map_mutex);
1953
1954 dev_maps = xmap_dereference(dev->xps_maps);
1955
01c5f864
AD
1956 /* allocate memory for queue storage */
1957 for_each_online_cpu(cpu) {
1958 if (!cpumask_test_cpu(cpu, mask))
1959 continue;
1960
1961 if (!new_dev_maps)
1962 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
1963 if (!new_dev_maps) {
1964 mutex_unlock(&xps_map_mutex);
01c5f864 1965 return -ENOMEM;
2bb60cb9 1966 }
01c5f864
AD
1967
1968 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1969 NULL;
1970
1971 map = expand_xps_map(map, cpu, index);
1972 if (!map)
1973 goto error;
1974
1975 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1976 }
1977
1978 if (!new_dev_maps)
1979 goto out_no_new_maps;
1980
537c00de 1981 for_each_possible_cpu(cpu) {
01c5f864
AD
1982 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1983 /* add queue to CPU maps */
1984 int pos = 0;
1985
1986 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1987 while ((pos < map->len) && (map->queues[pos] != index))
1988 pos++;
1989
1990 if (pos == map->len)
1991 map->queues[map->len++] = index;
537c00de 1992#ifdef CONFIG_NUMA
537c00de
AD
1993 if (numa_node_id == -2)
1994 numa_node_id = cpu_to_node(cpu);
1995 else if (numa_node_id != cpu_to_node(cpu))
1996 numa_node_id = -1;
537c00de 1997#endif
01c5f864
AD
1998 } else if (dev_maps) {
1999 /* fill in the new device map from the old device map */
2000 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2001 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
537c00de 2002 }
01c5f864 2003
537c00de
AD
2004 }
2005
01c5f864
AD
2006 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2007
537c00de 2008 /* Cleanup old maps */
01c5f864
AD
2009 if (dev_maps) {
2010 for_each_possible_cpu(cpu) {
2011 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2012 map = xmap_dereference(dev_maps->cpu_map[cpu]);
2013 if (map && map != new_map)
2014 kfree_rcu(map, rcu);
2015 }
537c00de 2016
01c5f864 2017 kfree_rcu(dev_maps, rcu);
537c00de
AD
2018 }
2019
01c5f864
AD
2020 dev_maps = new_dev_maps;
2021 active = true;
537c00de 2022
01c5f864
AD
2023out_no_new_maps:
2024 /* update Tx queue numa node */
537c00de
AD
2025 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2026 (numa_node_id >= 0) ? numa_node_id :
2027 NUMA_NO_NODE);
2028
01c5f864
AD
2029 if (!dev_maps)
2030 goto out_no_maps;
2031
2032 /* removes queue from unused CPUs */
2033 for_each_possible_cpu(cpu) {
2034 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2035 continue;
2036
2037 if (remove_xps_queue(dev_maps, cpu, index))
2038 active = true;
2039 }
2040
2041 /* free map if not active */
2042 if (!active) {
2043 RCU_INIT_POINTER(dev->xps_maps, NULL);
2044 kfree_rcu(dev_maps, rcu);
2045 }
2046
2047out_no_maps:
537c00de
AD
2048 mutex_unlock(&xps_map_mutex);
2049
2050 return 0;
2051error:
01c5f864
AD
2052 /* remove any maps that we added */
2053 for_each_possible_cpu(cpu) {
2054 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2055 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2056 NULL;
2057 if (new_map && new_map != map)
2058 kfree(new_map);
2059 }
2060
537c00de
AD
2061 mutex_unlock(&xps_map_mutex);
2062
537c00de
AD
2063 kfree(new_dev_maps);
2064 return -ENOMEM;
2065}
2066EXPORT_SYMBOL(netif_set_xps_queue);
2067
2068#endif
f0796d5c
JF
2069/*
2070 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2071 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2072 */
e6484930 2073int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2074{
1d24eb48
TH
2075 int rc;
2076
e6484930
TH
2077 if (txq < 1 || txq > dev->num_tx_queues)
2078 return -EINVAL;
f0796d5c 2079
5c56580b
BH
2080 if (dev->reg_state == NETREG_REGISTERED ||
2081 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2082 ASSERT_RTNL();
2083
1d24eb48
TH
2084 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2085 txq);
bf264145
TH
2086 if (rc)
2087 return rc;
2088
4f57c087
JF
2089 if (dev->num_tc)
2090 netif_setup_tc(dev, txq);
2091
024e9679 2092 if (txq < dev->real_num_tx_queues) {
e6484930 2093 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2094#ifdef CONFIG_XPS
2095 netif_reset_xps_queues_gt(dev, txq);
2096#endif
2097 }
f0796d5c 2098 }
e6484930
TH
2099
2100 dev->real_num_tx_queues = txq;
2101 return 0;
f0796d5c
JF
2102}
2103EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 2104
a953be53 2105#ifdef CONFIG_SYSFS
62fe0b40
BH
2106/**
2107 * netif_set_real_num_rx_queues - set actual number of RX queues used
2108 * @dev: Network device
2109 * @rxq: Actual number of RX queues
2110 *
2111 * This must be called either with the rtnl_lock held or before
2112 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
2113 * negative error code. If called before registration, it always
2114 * succeeds.
62fe0b40
BH
2115 */
2116int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2117{
2118 int rc;
2119
bd25fa7b
TH
2120 if (rxq < 1 || rxq > dev->num_rx_queues)
2121 return -EINVAL;
2122
62fe0b40
BH
2123 if (dev->reg_state == NETREG_REGISTERED) {
2124 ASSERT_RTNL();
2125
62fe0b40
BH
2126 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2127 rxq);
2128 if (rc)
2129 return rc;
62fe0b40
BH
2130 }
2131
2132 dev->real_num_rx_queues = rxq;
2133 return 0;
2134}
2135EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2136#endif
2137
2c53040f
BH
2138/**
2139 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
2140 *
2141 * This routine should set an upper limit on the number of RSS queues
2142 * used by default by multiqueue devices.
2143 */
a55b138b 2144int netif_get_num_default_rss_queues(void)
16917b87
YM
2145{
2146 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2147}
2148EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2149
def82a1d 2150static inline void __netif_reschedule(struct Qdisc *q)
56079431 2151{
def82a1d
JP
2152 struct softnet_data *sd;
2153 unsigned long flags;
56079431 2154
def82a1d 2155 local_irq_save(flags);
903ceff7 2156 sd = this_cpu_ptr(&softnet_data);
a9cbd588
CG
2157 q->next_sched = NULL;
2158 *sd->output_queue_tailp = q;
2159 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
2160 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2161 local_irq_restore(flags);
2162}
2163
2164void __netif_schedule(struct Qdisc *q)
2165{
2166 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2167 __netif_reschedule(q);
56079431
DV
2168}
2169EXPORT_SYMBOL(__netif_schedule);
2170
e6247027
ED
2171struct dev_kfree_skb_cb {
2172 enum skb_free_reason reason;
2173};
2174
2175static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
56079431 2176{
e6247027
ED
2177 return (struct dev_kfree_skb_cb *)skb->cb;
2178}
2179
46e5da40
JF
2180void netif_schedule_queue(struct netdev_queue *txq)
2181{
2182 rcu_read_lock();
2183 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2184 struct Qdisc *q = rcu_dereference(txq->qdisc);
2185
2186 __netif_schedule(q);
2187 }
2188 rcu_read_unlock();
2189}
2190EXPORT_SYMBOL(netif_schedule_queue);
2191
2192/**
2193 * netif_wake_subqueue - allow sending packets on subqueue
2194 * @dev: network device
2195 * @queue_index: sub queue index
2196 *
2197 * Resume individual transmit queue of a device with multiple transmit queues.
2198 */
2199void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2200{
2201 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2202
2203 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2204 struct Qdisc *q;
2205
2206 rcu_read_lock();
2207 q = rcu_dereference(txq->qdisc);
2208 __netif_schedule(q);
2209 rcu_read_unlock();
2210 }
2211}
2212EXPORT_SYMBOL(netif_wake_subqueue);
2213
2214void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2215{
2216 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2217 struct Qdisc *q;
2218
2219 rcu_read_lock();
2220 q = rcu_dereference(dev_queue->qdisc);
2221 __netif_schedule(q);
2222 rcu_read_unlock();
2223 }
2224}
2225EXPORT_SYMBOL(netif_tx_wake_queue);
2226
e6247027 2227void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 2228{
e6247027 2229 unsigned long flags;
56079431 2230
e6247027
ED
2231 if (likely(atomic_read(&skb->users) == 1)) {
2232 smp_rmb();
2233 atomic_set(&skb->users, 0);
2234 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2235 return;
bea3348e 2236 }
e6247027
ED
2237 get_kfree_skb_cb(skb)->reason = reason;
2238 local_irq_save(flags);
2239 skb->next = __this_cpu_read(softnet_data.completion_queue);
2240 __this_cpu_write(softnet_data.completion_queue, skb);
2241 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2242 local_irq_restore(flags);
56079431 2243}
e6247027 2244EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 2245
e6247027 2246void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
2247{
2248 if (in_irq() || irqs_disabled())
e6247027 2249 __dev_kfree_skb_irq(skb, reason);
56079431
DV
2250 else
2251 dev_kfree_skb(skb);
2252}
e6247027 2253EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
2254
2255
bea3348e
SH
2256/**
2257 * netif_device_detach - mark device as removed
2258 * @dev: network device
2259 *
2260 * Mark device as removed from system and therefore no longer available.
2261 */
56079431
DV
2262void netif_device_detach(struct net_device *dev)
2263{
2264 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2265 netif_running(dev)) {
d543103a 2266 netif_tx_stop_all_queues(dev);
56079431
DV
2267 }
2268}
2269EXPORT_SYMBOL(netif_device_detach);
2270
bea3348e
SH
2271/**
2272 * netif_device_attach - mark device as attached
2273 * @dev: network device
2274 *
2275 * Mark device as attached from system and restart if needed.
2276 */
56079431
DV
2277void netif_device_attach(struct net_device *dev)
2278{
2279 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2280 netif_running(dev)) {
d543103a 2281 netif_tx_wake_all_queues(dev);
4ec93edb 2282 __netdev_watchdog_up(dev);
56079431
DV
2283 }
2284}
2285EXPORT_SYMBOL(netif_device_attach);
2286
36c92474
BH
2287static void skb_warn_bad_offload(const struct sk_buff *skb)
2288{
65e9d2fa 2289 static const netdev_features_t null_features = 0;
36c92474
BH
2290 struct net_device *dev = skb->dev;
2291 const char *driver = "";
2292
c846ad9b
BG
2293 if (!net_ratelimit())
2294 return;
2295
36c92474
BH
2296 if (dev && dev->dev.parent)
2297 driver = dev_driver_string(dev->dev.parent);
2298
2299 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2300 "gso_type=%d ip_summed=%d\n",
65e9d2fa
MM
2301 driver, dev ? &dev->features : &null_features,
2302 skb->sk ? &skb->sk->sk_route_caps : &null_features,
36c92474
BH
2303 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2304 skb_shinfo(skb)->gso_type, skb->ip_summed);
2305}
2306
1da177e4
LT
2307/*
2308 * Invalidate hardware checksum when packet is to be mangled, and
2309 * complete checksum manually on outgoing path.
2310 */
84fa7933 2311int skb_checksum_help(struct sk_buff *skb)
1da177e4 2312{
d3bc23e7 2313 __wsum csum;
663ead3b 2314 int ret = 0, offset;
1da177e4 2315
84fa7933 2316 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
2317 goto out_set_summed;
2318
2319 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
2320 skb_warn_bad_offload(skb);
2321 return -EINVAL;
1da177e4
LT
2322 }
2323
cef401de
ED
2324 /* Before computing a checksum, we should make sure no frag could
2325 * be modified by an external entity : checksum could be wrong.
2326 */
2327 if (skb_has_shared_frag(skb)) {
2328 ret = __skb_linearize(skb);
2329 if (ret)
2330 goto out;
2331 }
2332
55508d60 2333 offset = skb_checksum_start_offset(skb);
a030847e
HX
2334 BUG_ON(offset >= skb_headlen(skb));
2335 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2336
2337 offset += skb->csum_offset;
2338 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2339
2340 if (skb_cloned(skb) &&
2341 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
2342 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2343 if (ret)
2344 goto out;
2345 }
2346
a030847e 2347 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 2348out_set_summed:
1da177e4 2349 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 2350out:
1da177e4
LT
2351 return ret;
2352}
d1b19dff 2353EXPORT_SYMBOL(skb_checksum_help);
1da177e4 2354
53d6471c 2355__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
f6a78bfc 2356{
4b9b1cdf 2357 unsigned int vlan_depth = skb->mac_len;
252e3346 2358 __be16 type = skb->protocol;
f6a78bfc 2359
19acc327
PS
2360 /* Tunnel gso handlers can set protocol to ethernet. */
2361 if (type == htons(ETH_P_TEB)) {
2362 struct ethhdr *eth;
2363
2364 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2365 return 0;
2366
2367 eth = (struct ethhdr *)skb_mac_header(skb);
2368 type = eth->h_proto;
2369 }
2370
4b9b1cdf
NA
2371 /* if skb->protocol is 802.1Q/AD then the header should already be
2372 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2373 * ETH_HLEN otherwise
2374 */
2375 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2376 if (vlan_depth) {
80019d31 2377 if (WARN_ON(vlan_depth < VLAN_HLEN))
4b9b1cdf
NA
2378 return 0;
2379 vlan_depth -= VLAN_HLEN;
2380 } else {
2381 vlan_depth = ETH_HLEN;
2382 }
2383 do {
2384 struct vlan_hdr *vh;
2385
2386 if (unlikely(!pskb_may_pull(skb,
2387 vlan_depth + VLAN_HLEN)))
2388 return 0;
2389
2390 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2391 type = vh->h_vlan_encapsulated_proto;
2392 vlan_depth += VLAN_HLEN;
2393 } while (type == htons(ETH_P_8021Q) ||
2394 type == htons(ETH_P_8021AD));
7b9c6090
JG
2395 }
2396
53d6471c
VY
2397 *depth = vlan_depth;
2398
ec5f0615
PS
2399 return type;
2400}
2401
2402/**
2403 * skb_mac_gso_segment - mac layer segmentation handler.
2404 * @skb: buffer to segment
2405 * @features: features for the output path (see dev->features)
2406 */
2407struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2408 netdev_features_t features)
2409{
2410 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2411 struct packet_offload *ptype;
53d6471c
VY
2412 int vlan_depth = skb->mac_len;
2413 __be16 type = skb_network_protocol(skb, &vlan_depth);
ec5f0615
PS
2414
2415 if (unlikely(!type))
2416 return ERR_PTR(-EINVAL);
2417
53d6471c 2418 __skb_pull(skb, vlan_depth);
f6a78bfc
HX
2419
2420 rcu_read_lock();
22061d80 2421 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 2422 if (ptype->type == type && ptype->callbacks.gso_segment) {
f191a1d1 2423 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
2424 break;
2425 }
2426 }
2427 rcu_read_unlock();
2428
98e399f8 2429 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 2430
f6a78bfc
HX
2431 return segs;
2432}
05e8ef4a
PS
2433EXPORT_SYMBOL(skb_mac_gso_segment);
2434
2435
2436/* openvswitch calls this on rx path, so we need a different check.
2437 */
2438static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2439{
2440 if (tx_path)
2441 return skb->ip_summed != CHECKSUM_PARTIAL;
2442 else
2443 return skb->ip_summed == CHECKSUM_NONE;
2444}
2445
2446/**
2447 * __skb_gso_segment - Perform segmentation on skb.
2448 * @skb: buffer to segment
2449 * @features: features for the output path (see dev->features)
2450 * @tx_path: whether it is called in TX path
2451 *
2452 * This function segments the given skb and returns a list of segments.
2453 *
2454 * It may return NULL if the skb requires no segmentation. This is
2455 * only possible when GSO is used for verifying header integrity.
2456 */
2457struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2458 netdev_features_t features, bool tx_path)
2459{
2460 if (unlikely(skb_needs_check(skb, tx_path))) {
2461 int err;
2462
2463 skb_warn_bad_offload(skb);
2464
a40e0a66 2465 err = skb_cow_head(skb, 0);
2466 if (err < 0)
05e8ef4a
PS
2467 return ERR_PTR(err);
2468 }
2469
68c33163 2470 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
2471 SKB_GSO_CB(skb)->encap_level = 0;
2472
05e8ef4a
PS
2473 skb_reset_mac_header(skb);
2474 skb_reset_mac_len(skb);
2475
2476 return skb_mac_gso_segment(skb, features);
2477}
12b0004d 2478EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 2479
fb286bb2
HX
2480/* Take action when hardware reception checksum errors are detected. */
2481#ifdef CONFIG_BUG
2482void netdev_rx_csum_fault(struct net_device *dev)
2483{
2484 if (net_ratelimit()) {
7b6cd1ce 2485 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
fb286bb2
HX
2486 dump_stack();
2487 }
2488}
2489EXPORT_SYMBOL(netdev_rx_csum_fault);
2490#endif
2491
1da177e4
LT
2492/* Actually, we should eliminate this check as soon as we know, that:
2493 * 1. IOMMU is present and allows to map all the memory.
2494 * 2. No high memory really exists on this machine.
2495 */
2496
c1e756bf 2497static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 2498{
3d3a8533 2499#ifdef CONFIG_HIGHMEM
1da177e4 2500 int i;
5acbbd42 2501 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
2502 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2503 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2504 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 2505 return 1;
ea2ab693 2506 }
5acbbd42 2507 }
1da177e4 2508
5acbbd42
FT
2509 if (PCI_DMA_BUS_IS_PHYS) {
2510 struct device *pdev = dev->dev.parent;
1da177e4 2511
9092c658
ED
2512 if (!pdev)
2513 return 0;
5acbbd42 2514 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ea2ab693
IC
2515 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2516 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
5acbbd42
FT
2517 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2518 return 1;
2519 }
2520 }
3d3a8533 2521#endif
1da177e4
LT
2522 return 0;
2523}
1da177e4 2524
3b392ddb
SH
2525/* If MPLS offload request, verify we are testing hardware MPLS features
2526 * instead of standard features for the netdev.
2527 */
2528#ifdef CONFIG_NET_MPLS_GSO
2529static netdev_features_t net_mpls_features(struct sk_buff *skb,
2530 netdev_features_t features,
2531 __be16 type)
2532{
2533 if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
2534 features &= skb->dev->mpls_features;
2535
2536 return features;
2537}
2538#else
2539static netdev_features_t net_mpls_features(struct sk_buff *skb,
2540 netdev_features_t features,
2541 __be16 type)
2542{
2543 return features;
2544}
2545#endif
2546
c8f44aff 2547static netdev_features_t harmonize_features(struct sk_buff *skb,
c1e756bf 2548 netdev_features_t features)
f01a5236 2549{
53d6471c 2550 int tmp;
3b392ddb
SH
2551 __be16 type;
2552
2553 type = skb_network_protocol(skb, &tmp);
2554 features = net_mpls_features(skb, features, type);
53d6471c 2555
c0d680e5 2556 if (skb->ip_summed != CHECKSUM_NONE &&
3b392ddb 2557 !can_checksum_protocol(features, type)) {
f01a5236 2558 features &= ~NETIF_F_ALL_CSUM;
c1e756bf 2559 } else if (illegal_highdma(skb->dev, skb)) {
f01a5236
JG
2560 features &= ~NETIF_F_SG;
2561 }
2562
2563 return features;
2564}
2565
c1e756bf 2566netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6 2567{
fcbeb976
ED
2568 const struct net_device *dev = skb->dev;
2569 netdev_features_t features = dev->features;
2570 u16 gso_segs = skb_shinfo(skb)->gso_segs;
58e998c6
JG
2571 __be16 protocol = skb->protocol;
2572
fcbeb976 2573 if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
30b678d8
BH
2574 features &= ~NETIF_F_GSO_MASK;
2575
8ad227ff 2576 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
58e998c6
JG
2577 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2578 protocol = veh->h_vlan_encapsulated_proto;
f01a5236 2579 } else if (!vlan_tx_tag_present(skb)) {
c1e756bf 2580 return harmonize_features(skb, features);
f01a5236 2581 }
58e998c6 2582
db115037 2583 features = netdev_intersect_features(features,
fcbeb976 2584 dev->vlan_features |
db115037
MK
2585 NETIF_F_HW_VLAN_CTAG_TX |
2586 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 2587
cdbaa0bb 2588 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
db115037
MK
2589 features = netdev_intersect_features(features,
2590 NETIF_F_SG |
2591 NETIF_F_HIGHDMA |
2592 NETIF_F_FRAGLIST |
2593 NETIF_F_GEN_CSUM |
2594 NETIF_F_HW_VLAN_CTAG_TX |
2595 NETIF_F_HW_VLAN_STAG_TX);
cdbaa0bb 2596
c1e756bf 2597 return harmonize_features(skb, features);
58e998c6 2598}
c1e756bf 2599EXPORT_SYMBOL(netif_skb_features);
58e998c6 2600
2ea25513 2601static int xmit_one(struct sk_buff *skb, struct net_device *dev,
95f6b3dd 2602 struct netdev_queue *txq, bool more)
f6a78bfc 2603{
2ea25513
DM
2604 unsigned int len;
2605 int rc;
00829823 2606
2ea25513
DM
2607 if (!list_empty(&ptype_all))
2608 dev_queue_xmit_nit(skb, dev);
fc741216 2609
2ea25513
DM
2610 len = skb->len;
2611 trace_net_dev_start_xmit(skb, dev);
95f6b3dd 2612 rc = netdev_start_xmit(skb, dev, txq, more);
2ea25513 2613 trace_net_dev_xmit(skb, rc, dev, len);
adf30907 2614
2ea25513
DM
2615 return rc;
2616}
7b9c6090 2617
8dcda22a
DM
2618struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2619 struct netdev_queue *txq, int *ret)
7f2e870f
DM
2620{
2621 struct sk_buff *skb = first;
2622 int rc = NETDEV_TX_OK;
7b9c6090 2623
7f2e870f
DM
2624 while (skb) {
2625 struct sk_buff *next = skb->next;
fc70fb64 2626
7f2e870f 2627 skb->next = NULL;
95f6b3dd 2628 rc = xmit_one(skb, dev, txq, next != NULL);
7f2e870f
DM
2629 if (unlikely(!dev_xmit_complete(rc))) {
2630 skb->next = next;
2631 goto out;
2632 }
6afff0ca 2633
7f2e870f
DM
2634 skb = next;
2635 if (netif_xmit_stopped(txq) && skb) {
2636 rc = NETDEV_TX_BUSY;
2637 break;
9ccb8975 2638 }
7f2e870f 2639 }
9ccb8975 2640
7f2e870f
DM
2641out:
2642 *ret = rc;
2643 return skb;
2644}
b40863c6 2645
1ff0dc94
ED
2646static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2647 netdev_features_t features)
f6a78bfc 2648{
eae3f88e
DM
2649 if (vlan_tx_tag_present(skb) &&
2650 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2651 skb = __vlan_put_tag(skb, skb->vlan_proto,
2652 vlan_tx_tag_get(skb));
2653 if (skb)
2654 skb->vlan_tci = 0;
f6a78bfc 2655 }
eae3f88e
DM
2656 return skb;
2657}
f6a78bfc 2658
55a93b3e 2659static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
eae3f88e
DM
2660{
2661 netdev_features_t features;
f6a78bfc 2662
eae3f88e
DM
2663 if (skb->next)
2664 return skb;
068a2de5 2665
eae3f88e
DM
2666 features = netif_skb_features(skb);
2667 skb = validate_xmit_vlan(skb, features);
2668 if (unlikely(!skb))
2669 goto out_null;
7b9c6090 2670
eae3f88e
DM
2671 /* If encapsulation offload request, verify we are testing
2672 * hardware encapsulation features instead of standard
2673 * features for the netdev
2674 */
2675 if (skb->encapsulation)
2676 features &= dev->hw_enc_features;
2677
04ffcb25 2678 if (netif_needs_gso(dev, skb, features)) {
ce93718f
DM
2679 struct sk_buff *segs;
2680
2681 segs = skb_gso_segment(skb, features);
cecda693 2682 if (IS_ERR(segs)) {
ce93718f 2683 segs = NULL;
cecda693
JW
2684 } else if (segs) {
2685 consume_skb(skb);
2686 skb = segs;
f6a78bfc 2687 }
eae3f88e
DM
2688 } else {
2689 if (skb_needs_linearize(skb, features) &&
2690 __skb_linearize(skb))
2691 goto out_kfree_skb;
4ec93edb 2692
eae3f88e
DM
2693 /* If packet is not checksummed and device does not
2694 * support checksumming for this protocol, complete
2695 * checksumming here.
2696 */
2697 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2698 if (skb->encapsulation)
2699 skb_set_inner_transport_header(skb,
2700 skb_checksum_start_offset(skb));
2701 else
2702 skb_set_transport_header(skb,
2703 skb_checksum_start_offset(skb));
2704 if (!(features & NETIF_F_ALL_CSUM) &&
2705 skb_checksum_help(skb))
2706 goto out_kfree_skb;
7b9c6090 2707 }
0c772159 2708 }
7b9c6090 2709
eae3f88e 2710 return skb;
fc70fb64 2711
f6a78bfc
HX
2712out_kfree_skb:
2713 kfree_skb(skb);
eae3f88e
DM
2714out_null:
2715 return NULL;
2716}
6afff0ca 2717
55a93b3e
ED
2718struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2719{
2720 struct sk_buff *next, *head = NULL, *tail;
2721
bec3cfdc 2722 for (; skb != NULL; skb = next) {
55a93b3e
ED
2723 next = skb->next;
2724 skb->next = NULL;
bec3cfdc
ED
2725
2726 /* in case skb wont be segmented, point to itself */
2727 skb->prev = skb;
2728
55a93b3e 2729 skb = validate_xmit_skb(skb, dev);
bec3cfdc
ED
2730 if (!skb)
2731 continue;
55a93b3e 2732
bec3cfdc
ED
2733 if (!head)
2734 head = skb;
2735 else
2736 tail->next = skb;
2737 /* If skb was segmented, skb->prev points to
2738 * the last segment. If not, it still contains skb.
2739 */
2740 tail = skb->prev;
55a93b3e
ED
2741 }
2742 return head;
f6a78bfc
HX
2743}
2744
1def9238
ED
2745static void qdisc_pkt_len_init(struct sk_buff *skb)
2746{
2747 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2748
2749 qdisc_skb_cb(skb)->pkt_len = skb->len;
2750
2751 /* To get more precise estimation of bytes sent on wire,
2752 * we add to pkt_len the headers size of all segments
2753 */
2754 if (shinfo->gso_size) {
757b8b1d 2755 unsigned int hdr_len;
15e5a030 2756 u16 gso_segs = shinfo->gso_segs;
1def9238 2757
757b8b1d
ED
2758 /* mac layer + network layer */
2759 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2760
2761 /* + transport layer */
1def9238
ED
2762 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2763 hdr_len += tcp_hdrlen(skb);
2764 else
2765 hdr_len += sizeof(struct udphdr);
15e5a030
JW
2766
2767 if (shinfo->gso_type & SKB_GSO_DODGY)
2768 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2769 shinfo->gso_size);
2770
2771 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
2772 }
2773}
2774
bbd8a0d3
KK
2775static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2776 struct net_device *dev,
2777 struct netdev_queue *txq)
2778{
2779 spinlock_t *root_lock = qdisc_lock(q);
a2da570d 2780 bool contended;
bbd8a0d3
KK
2781 int rc;
2782
1def9238 2783 qdisc_pkt_len_init(skb);
a2da570d 2784 qdisc_calculate_pkt_len(skb, q);
79640a4c
ED
2785 /*
2786 * Heuristic to force contended enqueues to serialize on a
2787 * separate lock before trying to get qdisc main lock.
9bf2b8c2
YX
2788 * This permits __QDISC___STATE_RUNNING owner to get the lock more
2789 * often and dequeue packets faster.
79640a4c 2790 */
a2da570d 2791 contended = qdisc_is_running(q);
79640a4c
ED
2792 if (unlikely(contended))
2793 spin_lock(&q->busylock);
2794
bbd8a0d3
KK
2795 spin_lock(root_lock);
2796 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2797 kfree_skb(skb);
2798 rc = NET_XMIT_DROP;
2799 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 2800 qdisc_run_begin(q)) {
bbd8a0d3
KK
2801 /*
2802 * This is a work-conserving queue; there are no old skbs
2803 * waiting to be sent out; and the qdisc is not running -
2804 * xmit the skb directly.
2805 */
bfe0d029 2806
bfe0d029
ED
2807 qdisc_bstats_update(q, skb);
2808
55a93b3e 2809 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
79640a4c
ED
2810 if (unlikely(contended)) {
2811 spin_unlock(&q->busylock);
2812 contended = false;
2813 }
bbd8a0d3 2814 __qdisc_run(q);
79640a4c 2815 } else
bc135b23 2816 qdisc_run_end(q);
bbd8a0d3
KK
2817
2818 rc = NET_XMIT_SUCCESS;
2819 } else {
a2da570d 2820 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
79640a4c
ED
2821 if (qdisc_run_begin(q)) {
2822 if (unlikely(contended)) {
2823 spin_unlock(&q->busylock);
2824 contended = false;
2825 }
2826 __qdisc_run(q);
2827 }
bbd8a0d3
KK
2828 }
2829 spin_unlock(root_lock);
79640a4c
ED
2830 if (unlikely(contended))
2831 spin_unlock(&q->busylock);
bbd8a0d3
KK
2832 return rc;
2833}
2834
86f8515f 2835#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
2836static void skb_update_prio(struct sk_buff *skb)
2837{
6977a79d 2838 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
5bc1421e 2839
91c68ce2
ED
2840 if (!skb->priority && skb->sk && map) {
2841 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2842
2843 if (prioidx < map->priomap_len)
2844 skb->priority = map->priomap[prioidx];
2845 }
5bc1421e
NH
2846}
2847#else
2848#define skb_update_prio(skb)
2849#endif
2850
745e20f1 2851static DEFINE_PER_CPU(int, xmit_recursion);
11a766ce 2852#define RECURSION_LIMIT 10
745e20f1 2853
95603e22
MM
2854/**
2855 * dev_loopback_xmit - loop back @skb
2856 * @skb: buffer to transmit
2857 */
2858int dev_loopback_xmit(struct sk_buff *skb)
2859{
2860 skb_reset_mac_header(skb);
2861 __skb_pull(skb, skb_network_offset(skb));
2862 skb->pkt_type = PACKET_LOOPBACK;
2863 skb->ip_summed = CHECKSUM_UNNECESSARY;
2864 WARN_ON(!skb_dst(skb));
2865 skb_dst_force(skb);
2866 netif_rx_ni(skb);
2867 return 0;
2868}
2869EXPORT_SYMBOL(dev_loopback_xmit);
2870
d29f749e 2871/**
9d08dd3d 2872 * __dev_queue_xmit - transmit a buffer
d29f749e 2873 * @skb: buffer to transmit
9d08dd3d 2874 * @accel_priv: private data used for L2 forwarding offload
d29f749e
DJ
2875 *
2876 * Queue a buffer for transmission to a network device. The caller must
2877 * have set the device and priority and built the buffer before calling
2878 * this function. The function can be called from an interrupt.
2879 *
2880 * A negative errno code is returned on a failure. A success does not
2881 * guarantee the frame will be transmitted as it may be dropped due
2882 * to congestion or traffic shaping.
2883 *
2884 * -----------------------------------------------------------------------------------
2885 * I notice this method can also return errors from the queue disciplines,
2886 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2887 * be positive.
2888 *
2889 * Regardless of the return value, the skb is consumed, so it is currently
2890 * difficult to retry a send to this method. (You can bump the ref count
2891 * before sending to hold a reference for retry if you are careful.)
2892 *
2893 * When calling this method, interrupts MUST be enabled. This is because
2894 * the BH enable code must have IRQs enabled so that it will not deadlock.
2895 * --BLG
2896 */
0a59f3a9 2897static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
1da177e4
LT
2898{
2899 struct net_device *dev = skb->dev;
dc2b4847 2900 struct netdev_queue *txq;
1da177e4
LT
2901 struct Qdisc *q;
2902 int rc = -ENOMEM;
2903
6d1ccff6
ED
2904 skb_reset_mac_header(skb);
2905
e7fd2885
WB
2906 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
2907 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
2908
4ec93edb
YH
2909 /* Disable soft irqs for various locks below. Also
2910 * stops preemption for RCU.
1da177e4 2911 */
4ec93edb 2912 rcu_read_lock_bh();
1da177e4 2913
5bc1421e
NH
2914 skb_update_prio(skb);
2915
02875878
ED
2916 /* If device/qdisc don't need skb->dst, release it right now while
2917 * its hot in this cpu cache.
2918 */
2919 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2920 skb_dst_drop(skb);
2921 else
2922 skb_dst_force(skb);
2923
f663dd9a 2924 txq = netdev_pick_tx(dev, skb, accel_priv);
a898def2 2925 q = rcu_dereference_bh(txq->qdisc);
37437bb2 2926
1da177e4 2927#ifdef CONFIG_NET_CLS_ACT
d1b19dff 2928 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4 2929#endif
cf66ba58 2930 trace_net_dev_queue(skb);
1da177e4 2931 if (q->enqueue) {
bbd8a0d3 2932 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 2933 goto out;
1da177e4
LT
2934 }
2935
2936 /* The device has no queue. Common case for software devices:
2937 loopback, all the sorts of tunnels...
2938
932ff279
HX
2939 Really, it is unlikely that netif_tx_lock protection is necessary
2940 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
2941 counters.)
2942 However, it is possible, that they rely on protection
2943 made by us here.
2944
2945 Check this and shot the lock. It is not prone from deadlocks.
2946 Either shot noqueue qdisc, it is even simpler 8)
2947 */
2948 if (dev->flags & IFF_UP) {
2949 int cpu = smp_processor_id(); /* ok because BHs are off */
2950
c773e847 2951 if (txq->xmit_lock_owner != cpu) {
1da177e4 2952
745e20f1
ED
2953 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2954 goto recursion_alert;
2955
1f59533f
JDB
2956 skb = validate_xmit_skb(skb, dev);
2957 if (!skb)
2958 goto drop;
2959
c773e847 2960 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 2961
73466498 2962 if (!netif_xmit_stopped(txq)) {
745e20f1 2963 __this_cpu_inc(xmit_recursion);
ce93718f 2964 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
745e20f1 2965 __this_cpu_dec(xmit_recursion);
572a9d7b 2966 if (dev_xmit_complete(rc)) {
c773e847 2967 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2968 goto out;
2969 }
2970 }
c773e847 2971 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
2972 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2973 dev->name);
1da177e4
LT
2974 } else {
2975 /* Recursion is detected! It is possible,
745e20f1
ED
2976 * unfortunately
2977 */
2978recursion_alert:
e87cc472
JP
2979 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2980 dev->name);
1da177e4
LT
2981 }
2982 }
2983
2984 rc = -ENETDOWN;
1f59533f 2985drop:
d4828d85 2986 rcu_read_unlock_bh();
1da177e4 2987
015f0688 2988 atomic_long_inc(&dev->tx_dropped);
1f59533f 2989 kfree_skb_list(skb);
1da177e4
LT
2990 return rc;
2991out:
d4828d85 2992 rcu_read_unlock_bh();
1da177e4
LT
2993 return rc;
2994}
f663dd9a
JW
2995
2996int dev_queue_xmit(struct sk_buff *skb)
2997{
2998 return __dev_queue_xmit(skb, NULL);
2999}
d1b19dff 3000EXPORT_SYMBOL(dev_queue_xmit);
1da177e4 3001
f663dd9a
JW
3002int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3003{
3004 return __dev_queue_xmit(skb, accel_priv);
3005}
3006EXPORT_SYMBOL(dev_queue_xmit_accel);
3007
1da177e4
LT
3008
3009/*=======================================================================
3010 Receiver routines
3011 =======================================================================*/
3012
6b2bedc3 3013int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
3014EXPORT_SYMBOL(netdev_max_backlog);
3015
3b098e2d 3016int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
3017int netdev_budget __read_mostly = 300;
3018int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 3019
eecfd7c4
ED
3020/* Called with irq disabled */
3021static inline void ____napi_schedule(struct softnet_data *sd,
3022 struct napi_struct *napi)
3023{
3024 list_add_tail(&napi->poll_list, &sd->poll_list);
3025 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3026}
3027
bfb564e7
KK
3028#ifdef CONFIG_RPS
3029
3030/* One global table that all flow-based protocols share. */
6e3f7faf 3031struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7
KK
3032EXPORT_SYMBOL(rps_sock_flow_table);
3033
c5905afb 3034struct static_key rps_needed __read_mostly;
adc9300e 3035
c445477d
BH
3036static struct rps_dev_flow *
3037set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3038 struct rps_dev_flow *rflow, u16 next_cpu)
3039{
09994d1b 3040 if (next_cpu != RPS_NO_CPU) {
c445477d
BH
3041#ifdef CONFIG_RFS_ACCEL
3042 struct netdev_rx_queue *rxqueue;
3043 struct rps_dev_flow_table *flow_table;
3044 struct rps_dev_flow *old_rflow;
3045 u32 flow_id;
3046 u16 rxq_index;
3047 int rc;
3048
3049 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
3050 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3051 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
3052 goto out;
3053 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3054 if (rxq_index == skb_get_rx_queue(skb))
3055 goto out;
3056
3057 rxqueue = dev->_rx + rxq_index;
3058 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3059 if (!flow_table)
3060 goto out;
61b905da 3061 flow_id = skb_get_hash(skb) & flow_table->mask;
c445477d
BH
3062 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3063 rxq_index, flow_id);
3064 if (rc < 0)
3065 goto out;
3066 old_rflow = rflow;
3067 rflow = &flow_table->flows[flow_id];
c445477d
BH
3068 rflow->filter = rc;
3069 if (old_rflow->filter == rflow->filter)
3070 old_rflow->filter = RPS_NO_FILTER;
3071 out:
3072#endif
3073 rflow->last_qtail =
09994d1b 3074 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
3075 }
3076
09994d1b 3077 rflow->cpu = next_cpu;
c445477d
BH
3078 return rflow;
3079}
3080
bfb564e7
KK
3081/*
3082 * get_rps_cpu is called from netif_receive_skb and returns the target
3083 * CPU from the RPS map of the receiving queue for a given skb.
3084 * rcu_read_lock must be held on entry.
3085 */
3086static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3087 struct rps_dev_flow **rflowp)
3088{
3089 struct netdev_rx_queue *rxqueue;
6e3f7faf 3090 struct rps_map *map;
bfb564e7
KK
3091 struct rps_dev_flow_table *flow_table;
3092 struct rps_sock_flow_table *sock_flow_table;
3093 int cpu = -1;
3094 u16 tcpu;
61b905da 3095 u32 hash;
bfb564e7
KK
3096
3097 if (skb_rx_queue_recorded(skb)) {
3098 u16 index = skb_get_rx_queue(skb);
62fe0b40
BH
3099 if (unlikely(index >= dev->real_num_rx_queues)) {
3100 WARN_ONCE(dev->real_num_rx_queues > 1,
3101 "%s received packet on queue %u, but number "
3102 "of RX queues is %u\n",
3103 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
3104 goto done;
3105 }
3106 rxqueue = dev->_rx + index;
3107 } else
3108 rxqueue = dev->_rx;
3109
6e3f7faf
ED
3110 map = rcu_dereference(rxqueue->rps_map);
3111 if (map) {
85875236 3112 if (map->len == 1 &&
33d480ce 3113 !rcu_access_pointer(rxqueue->rps_flow_table)) {
6febfca9
CG
3114 tcpu = map->cpus[0];
3115 if (cpu_online(tcpu))
3116 cpu = tcpu;
3117 goto done;
3118 }
33d480ce 3119 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
bfb564e7 3120 goto done;
6febfca9 3121 }
bfb564e7 3122
2d47b459 3123 skb_reset_network_header(skb);
61b905da
TH
3124 hash = skb_get_hash(skb);
3125 if (!hash)
bfb564e7
KK
3126 goto done;
3127
fec5e652
TH
3128 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3129 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3130 if (flow_table && sock_flow_table) {
3131 u16 next_cpu;
3132 struct rps_dev_flow *rflow;
3133
61b905da 3134 rflow = &flow_table->flows[hash & flow_table->mask];
fec5e652
TH
3135 tcpu = rflow->cpu;
3136
61b905da 3137 next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask];
fec5e652
TH
3138
3139 /*
3140 * If the desired CPU (where last recvmsg was done) is
3141 * different from current CPU (one in the rx-queue flow
3142 * table entry), switch if one of the following holds:
3143 * - Current CPU is unset (equal to RPS_NO_CPU).
3144 * - Current CPU is offline.
3145 * - The current CPU's queue tail has advanced beyond the
3146 * last packet that was enqueued using this table entry.
3147 * This guarantees that all previous packets for the flow
3148 * have been dequeued, thus preserving in order delivery.
3149 */
3150 if (unlikely(tcpu != next_cpu) &&
3151 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3152 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
3153 rflow->last_qtail)) >= 0)) {
3154 tcpu = next_cpu;
c445477d 3155 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 3156 }
c445477d 3157
fec5e652
TH
3158 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3159 *rflowp = rflow;
3160 cpu = tcpu;
3161 goto done;
3162 }
3163 }
3164
0a9627f2 3165 if (map) {
8fc54f68 3166 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
0a9627f2
TH
3167 if (cpu_online(tcpu)) {
3168 cpu = tcpu;
3169 goto done;
3170 }
3171 }
3172
3173done:
0a9627f2
TH
3174 return cpu;
3175}
3176
c445477d
BH
3177#ifdef CONFIG_RFS_ACCEL
3178
3179/**
3180 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3181 * @dev: Device on which the filter was set
3182 * @rxq_index: RX queue index
3183 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3184 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3185 *
3186 * Drivers that implement ndo_rx_flow_steer() should periodically call
3187 * this function for each installed filter and remove the filters for
3188 * which it returns %true.
3189 */
3190bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3191 u32 flow_id, u16 filter_id)
3192{
3193 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3194 struct rps_dev_flow_table *flow_table;
3195 struct rps_dev_flow *rflow;
3196 bool expire = true;
3197 int cpu;
3198
3199 rcu_read_lock();
3200 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3201 if (flow_table && flow_id <= flow_table->mask) {
3202 rflow = &flow_table->flows[flow_id];
3203 cpu = ACCESS_ONCE(rflow->cpu);
3204 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3205 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3206 rflow->last_qtail) <
3207 (int)(10 * flow_table->mask)))
3208 expire = false;
3209 }
3210 rcu_read_unlock();
3211 return expire;
3212}
3213EXPORT_SYMBOL(rps_may_expire_flow);
3214
3215#endif /* CONFIG_RFS_ACCEL */
3216
0a9627f2 3217/* Called from hardirq (IPI) context */
e36fa2f7 3218static void rps_trigger_softirq(void *data)
0a9627f2 3219{
e36fa2f7
ED
3220 struct softnet_data *sd = data;
3221
eecfd7c4 3222 ____napi_schedule(sd, &sd->backlog);
dee42870 3223 sd->received_rps++;
0a9627f2 3224}
e36fa2f7 3225
fec5e652 3226#endif /* CONFIG_RPS */
0a9627f2 3227
e36fa2f7
ED
3228/*
3229 * Check if this softnet_data structure is another cpu one
3230 * If yes, queue it to our IPI list and return 1
3231 * If no, return 0
3232 */
3233static int rps_ipi_queued(struct softnet_data *sd)
3234{
3235#ifdef CONFIG_RPS
903ceff7 3236 struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
e36fa2f7
ED
3237
3238 if (sd != mysd) {
3239 sd->rps_ipi_next = mysd->rps_ipi_list;
3240 mysd->rps_ipi_list = sd;
3241
3242 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3243 return 1;
3244 }
3245#endif /* CONFIG_RPS */
3246 return 0;
3247}
3248
99bbc707
WB
3249#ifdef CONFIG_NET_FLOW_LIMIT
3250int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3251#endif
3252
3253static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3254{
3255#ifdef CONFIG_NET_FLOW_LIMIT
3256 struct sd_flow_limit *fl;
3257 struct softnet_data *sd;
3258 unsigned int old_flow, new_flow;
3259
3260 if (qlen < (netdev_max_backlog >> 1))
3261 return false;
3262
903ceff7 3263 sd = this_cpu_ptr(&softnet_data);
99bbc707
WB
3264
3265 rcu_read_lock();
3266 fl = rcu_dereference(sd->flow_limit);
3267 if (fl) {
3958afa1 3268 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
3269 old_flow = fl->history[fl->history_head];
3270 fl->history[fl->history_head] = new_flow;
3271
3272 fl->history_head++;
3273 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3274
3275 if (likely(fl->buckets[old_flow]))
3276 fl->buckets[old_flow]--;
3277
3278 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3279 fl->count++;
3280 rcu_read_unlock();
3281 return true;
3282 }
3283 }
3284 rcu_read_unlock();
3285#endif
3286 return false;
3287}
3288
0a9627f2
TH
3289/*
3290 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3291 * queue (may be a remote CPU queue).
3292 */
fec5e652
TH
3293static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3294 unsigned int *qtail)
0a9627f2 3295{
e36fa2f7 3296 struct softnet_data *sd;
0a9627f2 3297 unsigned long flags;
99bbc707 3298 unsigned int qlen;
0a9627f2 3299
e36fa2f7 3300 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
3301
3302 local_irq_save(flags);
0a9627f2 3303
e36fa2f7 3304 rps_lock(sd);
99bbc707
WB
3305 qlen = skb_queue_len(&sd->input_pkt_queue);
3306 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
6e7676c1 3307 if (skb_queue_len(&sd->input_pkt_queue)) {
0a9627f2 3308enqueue:
e36fa2f7 3309 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 3310 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 3311 rps_unlock(sd);
152102c7 3312 local_irq_restore(flags);
0a9627f2
TH
3313 return NET_RX_SUCCESS;
3314 }
3315
ebda37c2
ED
3316 /* Schedule NAPI for backlog device
3317 * We can use non atomic operation since we own the queue lock
3318 */
3319 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 3320 if (!rps_ipi_queued(sd))
eecfd7c4 3321 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
3322 }
3323 goto enqueue;
3324 }
3325
dee42870 3326 sd->dropped++;
e36fa2f7 3327 rps_unlock(sd);
0a9627f2 3328
0a9627f2
TH
3329 local_irq_restore(flags);
3330
caf586e5 3331 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
3332 kfree_skb(skb);
3333 return NET_RX_DROP;
3334}
1da177e4 3335
ae78dbfa 3336static int netif_rx_internal(struct sk_buff *skb)
1da177e4 3337{
b0e28f1e 3338 int ret;
1da177e4 3339
588f0330 3340 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 3341
cf66ba58 3342 trace_netif_rx(skb);
df334545 3343#ifdef CONFIG_RPS
c5905afb 3344 if (static_key_false(&rps_needed)) {
fec5e652 3345 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
3346 int cpu;
3347
cece1945 3348 preempt_disable();
b0e28f1e 3349 rcu_read_lock();
fec5e652
TH
3350
3351 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
3352 if (cpu < 0)
3353 cpu = smp_processor_id();
fec5e652
TH
3354
3355 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3356
b0e28f1e 3357 rcu_read_unlock();
cece1945 3358 preempt_enable();
adc9300e
ED
3359 } else
3360#endif
fec5e652
TH
3361 {
3362 unsigned int qtail;
3363 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3364 put_cpu();
3365 }
b0e28f1e 3366 return ret;
1da177e4 3367}
ae78dbfa
BH
3368
3369/**
3370 * netif_rx - post buffer to the network code
3371 * @skb: buffer to post
3372 *
3373 * This function receives a packet from a device driver and queues it for
3374 * the upper (protocol) levels to process. It always succeeds. The buffer
3375 * may be dropped during processing for congestion control or by the
3376 * protocol layers.
3377 *
3378 * return values:
3379 * NET_RX_SUCCESS (no congestion)
3380 * NET_RX_DROP (packet was dropped)
3381 *
3382 */
3383
3384int netif_rx(struct sk_buff *skb)
3385{
3386 trace_netif_rx_entry(skb);
3387
3388 return netif_rx_internal(skb);
3389}
d1b19dff 3390EXPORT_SYMBOL(netif_rx);
1da177e4
LT
3391
3392int netif_rx_ni(struct sk_buff *skb)
3393{
3394 int err;
3395
ae78dbfa
BH
3396 trace_netif_rx_ni_entry(skb);
3397
1da177e4 3398 preempt_disable();
ae78dbfa 3399 err = netif_rx_internal(skb);
1da177e4
LT
3400 if (local_softirq_pending())
3401 do_softirq();
3402 preempt_enable();
3403
3404 return err;
3405}
1da177e4
LT
3406EXPORT_SYMBOL(netif_rx_ni);
3407
1da177e4
LT
3408static void net_tx_action(struct softirq_action *h)
3409{
903ceff7 3410 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1da177e4
LT
3411
3412 if (sd->completion_queue) {
3413 struct sk_buff *clist;
3414
3415 local_irq_disable();
3416 clist = sd->completion_queue;
3417 sd->completion_queue = NULL;
3418 local_irq_enable();
3419
3420 while (clist) {
3421 struct sk_buff *skb = clist;
3422 clist = clist->next;
3423
547b792c 3424 WARN_ON(atomic_read(&skb->users));
e6247027
ED
3425 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3426 trace_consume_skb(skb);
3427 else
3428 trace_kfree_skb(skb, net_tx_action);
1da177e4
LT
3429 __kfree_skb(skb);
3430 }
3431 }
3432
3433 if (sd->output_queue) {
37437bb2 3434 struct Qdisc *head;
1da177e4
LT
3435
3436 local_irq_disable();
3437 head = sd->output_queue;
3438 sd->output_queue = NULL;
a9cbd588 3439 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
3440 local_irq_enable();
3441
3442 while (head) {
37437bb2
DM
3443 struct Qdisc *q = head;
3444 spinlock_t *root_lock;
3445
1da177e4
LT
3446 head = head->next_sched;
3447
5fb66229 3448 root_lock = qdisc_lock(q);
37437bb2 3449 if (spin_trylock(root_lock)) {
4e857c58 3450 smp_mb__before_atomic();
def82a1d
JP
3451 clear_bit(__QDISC_STATE_SCHED,
3452 &q->state);
37437bb2
DM
3453 qdisc_run(q);
3454 spin_unlock(root_lock);
1da177e4 3455 } else {
195648bb 3456 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 3457 &q->state)) {
195648bb 3458 __netif_reschedule(q);
e8a83e10 3459 } else {
4e857c58 3460 smp_mb__before_atomic();
e8a83e10
JP
3461 clear_bit(__QDISC_STATE_SCHED,
3462 &q->state);
3463 }
1da177e4
LT
3464 }
3465 }
3466 }
3467}
3468
ab95bfe0
JP
3469#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3470 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
3471/* This hook is defined here for ATM LANE */
3472int (*br_fdb_test_addr_hook)(struct net_device *dev,
3473 unsigned char *addr) __read_mostly;
4fb019a0 3474EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 3475#endif
1da177e4 3476
1da177e4
LT
3477#ifdef CONFIG_NET_CLS_ACT
3478/* TODO: Maybe we should just force sch_ingress to be compiled in
3479 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3480 * a compare and 2 stores extra right now if we dont have it on
3481 * but have CONFIG_NET_CLS_ACT
25985edc
LDM
3482 * NOTE: This doesn't stop any functionality; if you dont have
3483 * the ingress scheduler, you just can't add policies on ingress.
1da177e4
LT
3484 *
3485 */
24824a09 3486static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
1da177e4 3487{
1da177e4 3488 struct net_device *dev = skb->dev;
f697c3e8 3489 u32 ttl = G_TC_RTTL(skb->tc_verd);
555353cf
DM
3490 int result = TC_ACT_OK;
3491 struct Qdisc *q;
4ec93edb 3492
de384830 3493 if (unlikely(MAX_RED_LOOP < ttl++)) {
e87cc472
JP
3494 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3495 skb->skb_iif, dev->ifindex);
f697c3e8
HX
3496 return TC_ACT_SHOT;
3497 }
1da177e4 3498
f697c3e8
HX
3499 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3500 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1da177e4 3501
46e5da40 3502 q = rcu_dereference(rxq->qdisc);
8d50b53d 3503 if (q != &noop_qdisc) {
83874000 3504 spin_lock(qdisc_lock(q));
a9312ae8
DM
3505 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3506 result = qdisc_enqueue_root(skb, q);
83874000
DM
3507 spin_unlock(qdisc_lock(q));
3508 }
f697c3e8
HX
3509
3510 return result;
3511}
86e65da9 3512
f697c3e8
HX
3513static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3514 struct packet_type **pt_prev,
3515 int *ret, struct net_device *orig_dev)
3516{
24824a09
ED
3517 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3518
46e5da40 3519 if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
f697c3e8 3520 goto out;
1da177e4 3521
f697c3e8
HX
3522 if (*pt_prev) {
3523 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3524 *pt_prev = NULL;
1da177e4
LT
3525 }
3526
24824a09 3527 switch (ing_filter(skb, rxq)) {
f697c3e8
HX
3528 case TC_ACT_SHOT:
3529 case TC_ACT_STOLEN:
3530 kfree_skb(skb);
3531 return NULL;
3532 }
3533
3534out:
3535 skb->tc_verd = 0;
3536 return skb;
1da177e4
LT
3537}
3538#endif
3539
ab95bfe0
JP
3540/**
3541 * netdev_rx_handler_register - register receive handler
3542 * @dev: device to register a handler for
3543 * @rx_handler: receive handler to register
93e2c32b 3544 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0 3545 *
e227867f 3546 * Register a receive handler for a device. This handler will then be
ab95bfe0
JP
3547 * called from __netif_receive_skb. A negative errno code is returned
3548 * on a failure.
3549 *
3550 * The caller must hold the rtnl_mutex.
8a4eb573
JP
3551 *
3552 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
3553 */
3554int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
3555 rx_handler_func_t *rx_handler,
3556 void *rx_handler_data)
ab95bfe0
JP
3557{
3558 ASSERT_RTNL();
3559
3560 if (dev->rx_handler)
3561 return -EBUSY;
3562
00cfec37 3563 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 3564 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
3565 rcu_assign_pointer(dev->rx_handler, rx_handler);
3566
3567 return 0;
3568}
3569EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3570
3571/**
3572 * netdev_rx_handler_unregister - unregister receive handler
3573 * @dev: device to unregister a handler from
3574 *
166ec369 3575 * Unregister a receive handler from a device.
ab95bfe0
JP
3576 *
3577 * The caller must hold the rtnl_mutex.
3578 */
3579void netdev_rx_handler_unregister(struct net_device *dev)
3580{
3581
3582 ASSERT_RTNL();
a9b3cd7f 3583 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
3584 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3585 * section has a guarantee to see a non NULL rx_handler_data
3586 * as well.
3587 */
3588 synchronize_net();
a9b3cd7f 3589 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
3590}
3591EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3592
b4b9e355
MG
3593/*
3594 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3595 * the special handling of PFMEMALLOC skbs.
3596 */
3597static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3598{
3599 switch (skb->protocol) {
2b8837ae
JP
3600 case htons(ETH_P_ARP):
3601 case htons(ETH_P_IP):
3602 case htons(ETH_P_IPV6):
3603 case htons(ETH_P_8021Q):
3604 case htons(ETH_P_8021AD):
b4b9e355
MG
3605 return true;
3606 default:
3607 return false;
3608 }
3609}
3610
9754e293 3611static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
1da177e4
LT
3612{
3613 struct packet_type *ptype, *pt_prev;
ab95bfe0 3614 rx_handler_func_t *rx_handler;
f2ccd8fa 3615 struct net_device *orig_dev;
63d8ea7f 3616 struct net_device *null_or_dev;
8a4eb573 3617 bool deliver_exact = false;
1da177e4 3618 int ret = NET_RX_DROP;
252e3346 3619 __be16 type;
1da177e4 3620
588f0330 3621 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 3622
cf66ba58 3623 trace_netif_receive_skb(skb);
9b22ea56 3624
cc9bd5ce 3625 orig_dev = skb->dev;
8f903c70 3626
c1d2bbe1 3627 skb_reset_network_header(skb);
fda55eca
ED
3628 if (!skb_transport_header_was_set(skb))
3629 skb_reset_transport_header(skb);
0b5c9db1 3630 skb_reset_mac_len(skb);
1da177e4
LT
3631
3632 pt_prev = NULL;
3633
3634 rcu_read_lock();
3635
63d8ea7f 3636another_round:
b6858177 3637 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
3638
3639 __this_cpu_inc(softnet_data.processed);
3640
8ad227ff
PM
3641 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3642 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
0d5501c1 3643 skb = skb_vlan_untag(skb);
bcc6d479 3644 if (unlikely(!skb))
b4b9e355 3645 goto unlock;
bcc6d479
JP
3646 }
3647
1da177e4
LT
3648#ifdef CONFIG_NET_CLS_ACT
3649 if (skb->tc_verd & TC_NCLS) {
3650 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3651 goto ncls;
3652 }
3653#endif
3654
9754e293 3655 if (pfmemalloc)
b4b9e355
MG
3656 goto skip_taps;
3657
1da177e4 3658 list_for_each_entry_rcu(ptype, &ptype_all, list) {
63d8ea7f 3659 if (!ptype->dev || ptype->dev == skb->dev) {
4ec93edb 3660 if (pt_prev)
f2ccd8fa 3661 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
3662 pt_prev = ptype;
3663 }
3664 }
3665
b4b9e355 3666skip_taps:
1da177e4 3667#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
3668 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3669 if (!skb)
b4b9e355 3670 goto unlock;
1da177e4
LT
3671ncls:
3672#endif
3673
9754e293 3674 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
3675 goto drop;
3676
2425717b
JF
3677 if (vlan_tx_tag_present(skb)) {
3678 if (pt_prev) {
3679 ret = deliver_skb(skb, pt_prev, orig_dev);
3680 pt_prev = NULL;
3681 }
48cc32d3 3682 if (vlan_do_receive(&skb))
2425717b
JF
3683 goto another_round;
3684 else if (unlikely(!skb))
b4b9e355 3685 goto unlock;
2425717b
JF
3686 }
3687
48cc32d3 3688 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
3689 if (rx_handler) {
3690 if (pt_prev) {
3691 ret = deliver_skb(skb, pt_prev, orig_dev);
3692 pt_prev = NULL;
3693 }
8a4eb573
JP
3694 switch (rx_handler(&skb)) {
3695 case RX_HANDLER_CONSUMED:
3bc1b1ad 3696 ret = NET_RX_SUCCESS;
b4b9e355 3697 goto unlock;
8a4eb573 3698 case RX_HANDLER_ANOTHER:
63d8ea7f 3699 goto another_round;
8a4eb573
JP
3700 case RX_HANDLER_EXACT:
3701 deliver_exact = true;
3702 case RX_HANDLER_PASS:
3703 break;
3704 default:
3705 BUG();
3706 }
ab95bfe0 3707 }
1da177e4 3708
d4b812de
ED
3709 if (unlikely(vlan_tx_tag_present(skb))) {
3710 if (vlan_tx_tag_get_id(skb))
3711 skb->pkt_type = PACKET_OTHERHOST;
3712 /* Note: we might in the future use prio bits
3713 * and set skb->priority like in vlan_do_receive()
3714 * For the time being, just ignore Priority Code Point
3715 */
3716 skb->vlan_tci = 0;
3717 }
48cc32d3 3718
63d8ea7f 3719 /* deliver only exact match when indicated */
8a4eb573 3720 null_or_dev = deliver_exact ? skb->dev : NULL;
1f3c8804 3721
1da177e4 3722 type = skb->protocol;
82d8a867
PE
3723 list_for_each_entry_rcu(ptype,
3724 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
63d8ea7f 3725 if (ptype->type == type &&
e3f48d37
JP
3726 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3727 ptype->dev == orig_dev)) {
4ec93edb 3728 if (pt_prev)
f2ccd8fa 3729 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
3730 pt_prev = ptype;
3731 }
3732 }
3733
3734 if (pt_prev) {
1080e512 3735 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
0e698bf6 3736 goto drop;
1080e512
MT
3737 else
3738 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 3739 } else {
b4b9e355 3740drop:
caf586e5 3741 atomic_long_inc(&skb->dev->rx_dropped);
1da177e4
LT
3742 kfree_skb(skb);
3743 /* Jamal, now you will not able to escape explaining
3744 * me how you were going to use this. :-)
3745 */
3746 ret = NET_RX_DROP;
3747 }
3748
b4b9e355 3749unlock:
1da177e4 3750 rcu_read_unlock();
9754e293
DM
3751 return ret;
3752}
3753
3754static int __netif_receive_skb(struct sk_buff *skb)
3755{
3756 int ret;
3757
3758 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3759 unsigned long pflags = current->flags;
3760
3761 /*
3762 * PFMEMALLOC skbs are special, they should
3763 * - be delivered to SOCK_MEMALLOC sockets only
3764 * - stay away from userspace
3765 * - have bounded memory usage
3766 *
3767 * Use PF_MEMALLOC as this saves us from propagating the allocation
3768 * context down to all allocation sites.
3769 */
3770 current->flags |= PF_MEMALLOC;
3771 ret = __netif_receive_skb_core(skb, true);
3772 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3773 } else
3774 ret = __netif_receive_skb_core(skb, false);
3775
1da177e4
LT
3776 return ret;
3777}
0a9627f2 3778
ae78dbfa 3779static int netif_receive_skb_internal(struct sk_buff *skb)
0a9627f2 3780{
588f0330 3781 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 3782
c1f19b51
RC
3783 if (skb_defer_rx_timestamp(skb))
3784 return NET_RX_SUCCESS;
3785
df334545 3786#ifdef CONFIG_RPS
c5905afb 3787 if (static_key_false(&rps_needed)) {
3b098e2d
ED
3788 struct rps_dev_flow voidflow, *rflow = &voidflow;
3789 int cpu, ret;
fec5e652 3790
3b098e2d
ED
3791 rcu_read_lock();
3792
3793 cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 3794
3b098e2d
ED
3795 if (cpu >= 0) {
3796 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3797 rcu_read_unlock();
adc9300e 3798 return ret;
3b098e2d 3799 }
adc9300e 3800 rcu_read_unlock();
fec5e652 3801 }
1e94d72f 3802#endif
adc9300e 3803 return __netif_receive_skb(skb);
0a9627f2 3804}
ae78dbfa
BH
3805
3806/**
3807 * netif_receive_skb - process receive buffer from network
3808 * @skb: buffer to process
3809 *
3810 * netif_receive_skb() is the main receive data processing function.
3811 * It always succeeds. The buffer may be dropped during processing
3812 * for congestion control or by the protocol layers.
3813 *
3814 * This function may only be called from softirq context and interrupts
3815 * should be enabled.
3816 *
3817 * Return values (usually ignored):
3818 * NET_RX_SUCCESS: no congestion
3819 * NET_RX_DROP: packet was dropped
3820 */
3821int netif_receive_skb(struct sk_buff *skb)
3822{
3823 trace_netif_receive_skb_entry(skb);
3824
3825 return netif_receive_skb_internal(skb);
3826}
d1b19dff 3827EXPORT_SYMBOL(netif_receive_skb);
1da177e4 3828
88751275
ED
3829/* Network device is going away, flush any packets still pending
3830 * Called with irqs disabled.
3831 */
152102c7 3832static void flush_backlog(void *arg)
6e583ce5 3833{
152102c7 3834 struct net_device *dev = arg;
903ceff7 3835 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
6e583ce5
SH
3836 struct sk_buff *skb, *tmp;
3837
e36fa2f7 3838 rps_lock(sd);
6e7676c1 3839 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 3840 if (skb->dev == dev) {
e36fa2f7 3841 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 3842 kfree_skb(skb);
76cc8b13 3843 input_queue_head_incr(sd);
6e583ce5 3844 }
6e7676c1 3845 }
e36fa2f7 3846 rps_unlock(sd);
6e7676c1
CG
3847
3848 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3849 if (skb->dev == dev) {
3850 __skb_unlink(skb, &sd->process_queue);
3851 kfree_skb(skb);
76cc8b13 3852 input_queue_head_incr(sd);
6e7676c1
CG
3853 }
3854 }
6e583ce5
SH
3855}
3856
d565b0a1
HX
3857static int napi_gro_complete(struct sk_buff *skb)
3858{
22061d80 3859 struct packet_offload *ptype;
d565b0a1 3860 __be16 type = skb->protocol;
22061d80 3861 struct list_head *head = &offload_base;
d565b0a1
HX
3862 int err = -ENOENT;
3863
c3c7c254
ED
3864 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3865
fc59f9a3
HX
3866 if (NAPI_GRO_CB(skb)->count == 1) {
3867 skb_shinfo(skb)->gso_size = 0;
d565b0a1 3868 goto out;
fc59f9a3 3869 }
d565b0a1
HX
3870
3871 rcu_read_lock();
3872 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 3873 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
3874 continue;
3875
299603e8 3876 err = ptype->callbacks.gro_complete(skb, 0);
d565b0a1
HX
3877 break;
3878 }
3879 rcu_read_unlock();
3880
3881 if (err) {
3882 WARN_ON(&ptype->list == head);
3883 kfree_skb(skb);
3884 return NET_RX_SUCCESS;
3885 }
3886
3887out:
ae78dbfa 3888 return netif_receive_skb_internal(skb);
d565b0a1
HX
3889}
3890
2e71a6f8
ED
3891/* napi->gro_list contains packets ordered by age.
3892 * youngest packets at the head of it.
3893 * Complete skbs in reverse order to reduce latencies.
3894 */
3895void napi_gro_flush(struct napi_struct *napi, bool flush_old)
d565b0a1 3896{
2e71a6f8 3897 struct sk_buff *skb, *prev = NULL;
d565b0a1 3898
2e71a6f8
ED
3899 /* scan list and build reverse chain */
3900 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3901 skb->prev = prev;
3902 prev = skb;
3903 }
3904
3905 for (skb = prev; skb; skb = prev) {
d565b0a1 3906 skb->next = NULL;
2e71a6f8
ED
3907
3908 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3909 return;
3910
3911 prev = skb->prev;
d565b0a1 3912 napi_gro_complete(skb);
2e71a6f8 3913 napi->gro_count--;
d565b0a1
HX
3914 }
3915
3916 napi->gro_list = NULL;
3917}
86cac58b 3918EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 3919
89c5fa33
ED
3920static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3921{
3922 struct sk_buff *p;
3923 unsigned int maclen = skb->dev->hard_header_len;
0b4cec8c 3924 u32 hash = skb_get_hash_raw(skb);
89c5fa33
ED
3925
3926 for (p = napi->gro_list; p; p = p->next) {
3927 unsigned long diffs;
3928
0b4cec8c
TH
3929 NAPI_GRO_CB(p)->flush = 0;
3930
3931 if (hash != skb_get_hash_raw(p)) {
3932 NAPI_GRO_CB(p)->same_flow = 0;
3933 continue;
3934 }
3935
89c5fa33
ED
3936 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3937 diffs |= p->vlan_tci ^ skb->vlan_tci;
3938 if (maclen == ETH_HLEN)
3939 diffs |= compare_ether_header(skb_mac_header(p),
a50e233c 3940 skb_mac_header(skb));
89c5fa33
ED
3941 else if (!diffs)
3942 diffs = memcmp(skb_mac_header(p),
a50e233c 3943 skb_mac_header(skb),
89c5fa33
ED
3944 maclen);
3945 NAPI_GRO_CB(p)->same_flow = !diffs;
89c5fa33
ED
3946 }
3947}
3948
299603e8
JC
3949static void skb_gro_reset_offset(struct sk_buff *skb)
3950{
3951 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3952 const skb_frag_t *frag0 = &pinfo->frags[0];
3953
3954 NAPI_GRO_CB(skb)->data_offset = 0;
3955 NAPI_GRO_CB(skb)->frag0 = NULL;
3956 NAPI_GRO_CB(skb)->frag0_len = 0;
3957
3958 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3959 pinfo->nr_frags &&
3960 !PageHighMem(skb_frag_page(frag0))) {
3961 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3962 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
89c5fa33
ED
3963 }
3964}
3965
a50e233c
ED
3966static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
3967{
3968 struct skb_shared_info *pinfo = skb_shinfo(skb);
3969
3970 BUG_ON(skb->end - skb->tail < grow);
3971
3972 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3973
3974 skb->data_len -= grow;
3975 skb->tail += grow;
3976
3977 pinfo->frags[0].page_offset += grow;
3978 skb_frag_size_sub(&pinfo->frags[0], grow);
3979
3980 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
3981 skb_frag_unref(skb, 0);
3982 memmove(pinfo->frags, pinfo->frags + 1,
3983 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
3984 }
3985}
3986
bb728820 3987static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
3988{
3989 struct sk_buff **pp = NULL;
22061d80 3990 struct packet_offload *ptype;
d565b0a1 3991 __be16 type = skb->protocol;
22061d80 3992 struct list_head *head = &offload_base;
0da2afd5 3993 int same_flow;
5b252f0c 3994 enum gro_result ret;
a50e233c 3995 int grow;
d565b0a1 3996
9c62a68d 3997 if (!(skb->dev->features & NETIF_F_GRO))
d565b0a1
HX
3998 goto normal;
3999
5a212329 4000 if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
f17f5c91
HX
4001 goto normal;
4002
89c5fa33
ED
4003 gro_list_prepare(napi, skb);
4004
d565b0a1
HX
4005 rcu_read_lock();
4006 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 4007 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
4008 continue;
4009
86911732 4010 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 4011 skb_reset_mac_len(skb);
d565b0a1
HX
4012 NAPI_GRO_CB(skb)->same_flow = 0;
4013 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 4014 NAPI_GRO_CB(skb)->free = 0;
b582ef09 4015 NAPI_GRO_CB(skb)->udp_mark = 0;
d565b0a1 4016
662880f4
TH
4017 /* Setup for GRO checksum validation */
4018 switch (skb->ip_summed) {
4019 case CHECKSUM_COMPLETE:
4020 NAPI_GRO_CB(skb)->csum = skb->csum;
4021 NAPI_GRO_CB(skb)->csum_valid = 1;
4022 NAPI_GRO_CB(skb)->csum_cnt = 0;
4023 break;
4024 case CHECKSUM_UNNECESSARY:
4025 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4026 NAPI_GRO_CB(skb)->csum_valid = 0;
4027 break;
4028 default:
4029 NAPI_GRO_CB(skb)->csum_cnt = 0;
4030 NAPI_GRO_CB(skb)->csum_valid = 0;
4031 }
d565b0a1 4032
f191a1d1 4033 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
d565b0a1
HX
4034 break;
4035 }
4036 rcu_read_unlock();
4037
4038 if (&ptype->list == head)
4039 goto normal;
4040
0da2afd5 4041 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 4042 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 4043
d565b0a1
HX
4044 if (pp) {
4045 struct sk_buff *nskb = *pp;
4046
4047 *pp = nskb->next;
4048 nskb->next = NULL;
4049 napi_gro_complete(nskb);
4ae5544f 4050 napi->gro_count--;
d565b0a1
HX
4051 }
4052
0da2afd5 4053 if (same_flow)
d565b0a1
HX
4054 goto ok;
4055
600adc18 4056 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 4057 goto normal;
d565b0a1 4058
600adc18
ED
4059 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4060 struct sk_buff *nskb = napi->gro_list;
4061
4062 /* locate the end of the list to select the 'oldest' flow */
4063 while (nskb->next) {
4064 pp = &nskb->next;
4065 nskb = *pp;
4066 }
4067 *pp = NULL;
4068 nskb->next = NULL;
4069 napi_gro_complete(nskb);
4070 } else {
4071 napi->gro_count++;
4072 }
d565b0a1 4073 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 4074 NAPI_GRO_CB(skb)->age = jiffies;
29e98242 4075 NAPI_GRO_CB(skb)->last = skb;
86911732 4076 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
4077 skb->next = napi->gro_list;
4078 napi->gro_list = skb;
5d0d9be8 4079 ret = GRO_HELD;
d565b0a1 4080
ad0f9904 4081pull:
a50e233c
ED
4082 grow = skb_gro_offset(skb) - skb_headlen(skb);
4083 if (grow > 0)
4084 gro_pull_from_frag0(skb, grow);
d565b0a1 4085ok:
5d0d9be8 4086 return ret;
d565b0a1
HX
4087
4088normal:
ad0f9904
HX
4089 ret = GRO_NORMAL;
4090 goto pull;
5d38a079 4091}
96e93eab 4092
bf5a755f
JC
4093struct packet_offload *gro_find_receive_by_type(__be16 type)
4094{
4095 struct list_head *offload_head = &offload_base;
4096 struct packet_offload *ptype;
4097
4098 list_for_each_entry_rcu(ptype, offload_head, list) {
4099 if (ptype->type != type || !ptype->callbacks.gro_receive)
4100 continue;
4101 return ptype;
4102 }
4103 return NULL;
4104}
e27a2f83 4105EXPORT_SYMBOL(gro_find_receive_by_type);
bf5a755f
JC
4106
4107struct packet_offload *gro_find_complete_by_type(__be16 type)
4108{
4109 struct list_head *offload_head = &offload_base;
4110 struct packet_offload *ptype;
4111
4112 list_for_each_entry_rcu(ptype, offload_head, list) {
4113 if (ptype->type != type || !ptype->callbacks.gro_complete)
4114 continue;
4115 return ptype;
4116 }
4117 return NULL;
4118}
e27a2f83 4119EXPORT_SYMBOL(gro_find_complete_by_type);
5d38a079 4120
bb728820 4121static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 4122{
5d0d9be8
HX
4123 switch (ret) {
4124 case GRO_NORMAL:
ae78dbfa 4125 if (netif_receive_skb_internal(skb))
c7c4b3b6
BH
4126 ret = GRO_DROP;
4127 break;
5d38a079 4128
5d0d9be8 4129 case GRO_DROP:
5d38a079
HX
4130 kfree_skb(skb);
4131 break;
5b252f0c 4132
daa86548 4133 case GRO_MERGED_FREE:
d7e8883c
ED
4134 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4135 kmem_cache_free(skbuff_head_cache, skb);
4136 else
4137 __kfree_skb(skb);
daa86548
ED
4138 break;
4139
5b252f0c
BH
4140 case GRO_HELD:
4141 case GRO_MERGED:
4142 break;
5d38a079
HX
4143 }
4144
c7c4b3b6 4145 return ret;
5d0d9be8 4146}
5d0d9be8 4147
c7c4b3b6 4148gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 4149{
ae78dbfa 4150 trace_napi_gro_receive_entry(skb);
86911732 4151
a50e233c
ED
4152 skb_gro_reset_offset(skb);
4153
89c5fa33 4154 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
d565b0a1
HX
4155}
4156EXPORT_SYMBOL(napi_gro_receive);
4157
d0c2b0d2 4158static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 4159{
93a35f59
ED
4160 if (unlikely(skb->pfmemalloc)) {
4161 consume_skb(skb);
4162 return;
4163 }
96e93eab 4164 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
4165 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4166 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3701e513 4167 skb->vlan_tci = 0;
66c46d74 4168 skb->dev = napi->dev;
6d152e23 4169 skb->skb_iif = 0;
c3caf119
JC
4170 skb->encapsulation = 0;
4171 skb_shinfo(skb)->gso_type = 0;
e33d0ba8 4172 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
96e93eab
HX
4173
4174 napi->skb = skb;
4175}
96e93eab 4176
76620aaf 4177struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 4178{
5d38a079 4179 struct sk_buff *skb = napi->skb;
5d38a079
HX
4180
4181 if (!skb) {
89d71a66 4182 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
84b9cd63 4183 napi->skb = skb;
80595d59 4184 }
96e93eab
HX
4185 return skb;
4186}
76620aaf 4187EXPORT_SYMBOL(napi_get_frags);
96e93eab 4188
a50e233c
ED
4189static gro_result_t napi_frags_finish(struct napi_struct *napi,
4190 struct sk_buff *skb,
4191 gro_result_t ret)
96e93eab 4192{
5d0d9be8
HX
4193 switch (ret) {
4194 case GRO_NORMAL:
a50e233c
ED
4195 case GRO_HELD:
4196 __skb_push(skb, ETH_HLEN);
4197 skb->protocol = eth_type_trans(skb, skb->dev);
4198 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
c7c4b3b6 4199 ret = GRO_DROP;
86911732 4200 break;
5d38a079 4201
5d0d9be8 4202 case GRO_DROP:
5d0d9be8
HX
4203 case GRO_MERGED_FREE:
4204 napi_reuse_skb(napi, skb);
4205 break;
5b252f0c
BH
4206
4207 case GRO_MERGED:
4208 break;
5d0d9be8 4209 }
5d38a079 4210
c7c4b3b6 4211 return ret;
5d38a079 4212}
5d0d9be8 4213
a50e233c
ED
4214/* Upper GRO stack assumes network header starts at gro_offset=0
4215 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4216 * We copy ethernet header into skb->data to have a common layout.
4217 */
4adb9c4a 4218static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
4219{
4220 struct sk_buff *skb = napi->skb;
a50e233c
ED
4221 const struct ethhdr *eth;
4222 unsigned int hlen = sizeof(*eth);
76620aaf
HX
4223
4224 napi->skb = NULL;
4225
a50e233c
ED
4226 skb_reset_mac_header(skb);
4227 skb_gro_reset_offset(skb);
4228
4229 eth = skb_gro_header_fast(skb, 0);
4230 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4231 eth = skb_gro_header_slow(skb, hlen, 0);
4232 if (unlikely(!eth)) {
4233 napi_reuse_skb(napi, skb);
4234 return NULL;
4235 }
4236 } else {
4237 gro_pull_from_frag0(skb, hlen);
4238 NAPI_GRO_CB(skb)->frag0 += hlen;
4239 NAPI_GRO_CB(skb)->frag0_len -= hlen;
76620aaf 4240 }
a50e233c
ED
4241 __skb_pull(skb, hlen);
4242
4243 /*
4244 * This works because the only protocols we care about don't require
4245 * special handling.
4246 * We'll fix it up properly in napi_frags_finish()
4247 */
4248 skb->protocol = eth->h_proto;
76620aaf 4249
76620aaf
HX
4250 return skb;
4251}
76620aaf 4252
c7c4b3b6 4253gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 4254{
76620aaf 4255 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
4256
4257 if (!skb)
c7c4b3b6 4258 return GRO_DROP;
5d0d9be8 4259
ae78dbfa
BH
4260 trace_napi_gro_frags_entry(skb);
4261
89c5fa33 4262 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5d0d9be8 4263}
5d38a079
HX
4264EXPORT_SYMBOL(napi_gro_frags);
4265
573e8fca
TH
4266/* Compute the checksum from gro_offset and return the folded value
4267 * after adding in any pseudo checksum.
4268 */
4269__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4270{
4271 __wsum wsum;
4272 __sum16 sum;
4273
4274 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4275
4276 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4277 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4278 if (likely(!sum)) {
4279 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4280 !skb->csum_complete_sw)
4281 netdev_rx_csum_fault(skb->dev);
4282 }
4283
4284 NAPI_GRO_CB(skb)->csum = wsum;
4285 NAPI_GRO_CB(skb)->csum_valid = 1;
4286
4287 return sum;
4288}
4289EXPORT_SYMBOL(__skb_gro_checksum_complete);
4290
e326bed2 4291/*
855abcf0 4292 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
4293 * Note: called with local irq disabled, but exits with local irq enabled.
4294 */
4295static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4296{
4297#ifdef CONFIG_RPS
4298 struct softnet_data *remsd = sd->rps_ipi_list;
4299
4300 if (remsd) {
4301 sd->rps_ipi_list = NULL;
4302
4303 local_irq_enable();
4304
4305 /* Send pending IPI's to kick RPS processing on remote cpus. */
4306 while (remsd) {
4307 struct softnet_data *next = remsd->rps_ipi_next;
4308
4309 if (cpu_online(remsd->cpu))
c46fff2a 4310 smp_call_function_single_async(remsd->cpu,
fce8ad15 4311 &remsd->csd);
e326bed2
ED
4312 remsd = next;
4313 }
4314 } else
4315#endif
4316 local_irq_enable();
4317}
4318
bea3348e 4319static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
4320{
4321 int work = 0;
eecfd7c4 4322 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 4323
e326bed2
ED
4324#ifdef CONFIG_RPS
4325 /* Check if we have pending ipi, its better to send them now,
4326 * not waiting net_rx_action() end.
4327 */
4328 if (sd->rps_ipi_list) {
4329 local_irq_disable();
4330 net_rps_action_and_irq_enable(sd);
4331 }
4332#endif
bea3348e 4333 napi->weight = weight_p;
6e7676c1 4334 local_irq_disable();
11ef7a89 4335 while (1) {
1da177e4 4336 struct sk_buff *skb;
6e7676c1
CG
4337
4338 while ((skb = __skb_dequeue(&sd->process_queue))) {
4339 local_irq_enable();
4340 __netif_receive_skb(skb);
6e7676c1 4341 local_irq_disable();
76cc8b13
TH
4342 input_queue_head_incr(sd);
4343 if (++work >= quota) {
4344 local_irq_enable();
4345 return work;
4346 }
6e7676c1 4347 }
1da177e4 4348
e36fa2f7 4349 rps_lock(sd);
11ef7a89 4350 if (skb_queue_empty(&sd->input_pkt_queue)) {
eecfd7c4
ED
4351 /*
4352 * Inline a custom version of __napi_complete().
4353 * only current cpu owns and manipulates this napi,
11ef7a89
TH
4354 * and NAPI_STATE_SCHED is the only possible flag set
4355 * on backlog.
4356 * We can use a plain write instead of clear_bit(),
eecfd7c4
ED
4357 * and we dont need an smp_mb() memory barrier.
4358 */
4359 list_del(&napi->poll_list);
4360 napi->state = 0;
11ef7a89 4361 rps_unlock(sd);
eecfd7c4 4362
11ef7a89 4363 break;
bea3348e 4364 }
11ef7a89
TH
4365
4366 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4367 &sd->process_queue);
e36fa2f7 4368 rps_unlock(sd);
6e7676c1
CG
4369 }
4370 local_irq_enable();
1da177e4 4371
bea3348e
SH
4372 return work;
4373}
1da177e4 4374
bea3348e
SH
4375/**
4376 * __napi_schedule - schedule for receive
c4ea43c5 4377 * @n: entry to schedule
bea3348e 4378 *
bc9ad166
ED
4379 * The entry's receive function will be scheduled to run.
4380 * Consider using __napi_schedule_irqoff() if hard irqs are masked.
bea3348e 4381 */
b5606c2d 4382void __napi_schedule(struct napi_struct *n)
bea3348e
SH
4383{
4384 unsigned long flags;
1da177e4 4385
bea3348e 4386 local_irq_save(flags);
903ceff7 4387 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
bea3348e 4388 local_irq_restore(flags);
1da177e4 4389}
bea3348e
SH
4390EXPORT_SYMBOL(__napi_schedule);
4391
bc9ad166
ED
4392/**
4393 * __napi_schedule_irqoff - schedule for receive
4394 * @n: entry to schedule
4395 *
4396 * Variant of __napi_schedule() assuming hard irqs are masked
4397 */
4398void __napi_schedule_irqoff(struct napi_struct *n)
4399{
4400 ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4401}
4402EXPORT_SYMBOL(__napi_schedule_irqoff);
4403
d565b0a1
HX
4404void __napi_complete(struct napi_struct *n)
4405{
4406 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4407 BUG_ON(n->gro_list);
4408
4409 list_del(&n->poll_list);
4e857c58 4410 smp_mb__before_atomic();
d565b0a1
HX
4411 clear_bit(NAPI_STATE_SCHED, &n->state);
4412}
4413EXPORT_SYMBOL(__napi_complete);
4414
4415void napi_complete(struct napi_struct *n)
4416{
4417 unsigned long flags;
4418
4419 /*
4420 * don't let napi dequeue from the cpu poll list
4421 * just in case its running on a different cpu
4422 */
4423 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4424 return;
4425
2e71a6f8 4426 napi_gro_flush(n, false);
d565b0a1
HX
4427 local_irq_save(flags);
4428 __napi_complete(n);
4429 local_irq_restore(flags);
4430}
4431EXPORT_SYMBOL(napi_complete);
4432
af12fa6e
ET
4433/* must be called under rcu_read_lock(), as we dont take a reference */
4434struct napi_struct *napi_by_id(unsigned int napi_id)
4435{
4436 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4437 struct napi_struct *napi;
4438
4439 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4440 if (napi->napi_id == napi_id)
4441 return napi;
4442
4443 return NULL;
4444}
4445EXPORT_SYMBOL_GPL(napi_by_id);
4446
4447void napi_hash_add(struct napi_struct *napi)
4448{
4449 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4450
4451 spin_lock(&napi_hash_lock);
4452
4453 /* 0 is not a valid id, we also skip an id that is taken
4454 * we expect both events to be extremely rare
4455 */
4456 napi->napi_id = 0;
4457 while (!napi->napi_id) {
4458 napi->napi_id = ++napi_gen_id;
4459 if (napi_by_id(napi->napi_id))
4460 napi->napi_id = 0;
4461 }
4462
4463 hlist_add_head_rcu(&napi->napi_hash_node,
4464 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4465
4466 spin_unlock(&napi_hash_lock);
4467 }
4468}
4469EXPORT_SYMBOL_GPL(napi_hash_add);
4470
4471/* Warning : caller is responsible to make sure rcu grace period
4472 * is respected before freeing memory containing @napi
4473 */
4474void napi_hash_del(struct napi_struct *napi)
4475{
4476 spin_lock(&napi_hash_lock);
4477
4478 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4479 hlist_del_rcu(&napi->napi_hash_node);
4480
4481 spin_unlock(&napi_hash_lock);
4482}
4483EXPORT_SYMBOL_GPL(napi_hash_del);
4484
d565b0a1
HX
4485void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4486 int (*poll)(struct napi_struct *, int), int weight)
4487{
4488 INIT_LIST_HEAD(&napi->poll_list);
4ae5544f 4489 napi->gro_count = 0;
d565b0a1 4490 napi->gro_list = NULL;
5d38a079 4491 napi->skb = NULL;
d565b0a1 4492 napi->poll = poll;
82dc3c63
ED
4493 if (weight > NAPI_POLL_WEIGHT)
4494 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4495 weight, dev->name);
d565b0a1
HX
4496 napi->weight = weight;
4497 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 4498 napi->dev = dev;
5d38a079 4499#ifdef CONFIG_NETPOLL
d565b0a1
HX
4500 spin_lock_init(&napi->poll_lock);
4501 napi->poll_owner = -1;
4502#endif
4503 set_bit(NAPI_STATE_SCHED, &napi->state);
4504}
4505EXPORT_SYMBOL(netif_napi_add);
4506
4507void netif_napi_del(struct napi_struct *napi)
4508{
d7b06636 4509 list_del_init(&napi->dev_list);
76620aaf 4510 napi_free_frags(napi);
d565b0a1 4511
289dccbe 4512 kfree_skb_list(napi->gro_list);
d565b0a1 4513 napi->gro_list = NULL;
4ae5544f 4514 napi->gro_count = 0;
d565b0a1
HX
4515}
4516EXPORT_SYMBOL(netif_napi_del);
4517
1da177e4
LT
4518static void net_rx_action(struct softirq_action *h)
4519{
903ceff7 4520 struct softnet_data *sd = this_cpu_ptr(&softnet_data);
24f8b238 4521 unsigned long time_limit = jiffies + 2;
51b0bded 4522 int budget = netdev_budget;
53fb95d3
MM
4523 void *have;
4524
1da177e4
LT
4525 local_irq_disable();
4526
e326bed2 4527 while (!list_empty(&sd->poll_list)) {
bea3348e
SH
4528 struct napi_struct *n;
4529 int work, weight;
1da177e4 4530
bea3348e 4531 /* If softirq window is exhuasted then punt.
24f8b238
SH
4532 * Allow this to run for 2 jiffies since which will allow
4533 * an average latency of 1.5/HZ.
bea3348e 4534 */
d1f41b67 4535 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
1da177e4
LT
4536 goto softnet_break;
4537
4538 local_irq_enable();
4539
bea3348e
SH
4540 /* Even though interrupts have been re-enabled, this
4541 * access is safe because interrupts can only add new
4542 * entries to the tail of this list, and only ->poll()
4543 * calls can remove this head entry from the list.
4544 */
e326bed2 4545 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
1da177e4 4546
bea3348e
SH
4547 have = netpoll_poll_lock(n);
4548
4549 weight = n->weight;
4550
0a7606c1
DM
4551 /* This NAPI_STATE_SCHED test is for avoiding a race
4552 * with netpoll's poll_napi(). Only the entity which
4553 * obtains the lock and sees NAPI_STATE_SCHED set will
4554 * actually make the ->poll() call. Therefore we avoid
25985edc 4555 * accidentally calling ->poll() when NAPI is not scheduled.
0a7606c1
DM
4556 */
4557 work = 0;
4ea7e386 4558 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
0a7606c1 4559 work = n->poll(n, weight);
4ea7e386
NH
4560 trace_napi_poll(n);
4561 }
bea3348e
SH
4562
4563 WARN_ON_ONCE(work > weight);
4564
4565 budget -= work;
4566
4567 local_irq_disable();
4568
4569 /* Drivers must not modify the NAPI state if they
4570 * consume the entire weight. In such cases this code
4571 * still "owns" the NAPI instance and therefore can
4572 * move the instance around on the list at-will.
4573 */
fed17f30 4574 if (unlikely(work == weight)) {
ff780cd8
HX
4575 if (unlikely(napi_disable_pending(n))) {
4576 local_irq_enable();
4577 napi_complete(n);
4578 local_irq_disable();
2e71a6f8
ED
4579 } else {
4580 if (n->gro_list) {
4581 /* flush too old packets
4582 * If HZ < 1000, flush all packets.
4583 */
4584 local_irq_enable();
4585 napi_gro_flush(n, HZ >= 1000);
4586 local_irq_disable();
4587 }
e326bed2 4588 list_move_tail(&n->poll_list, &sd->poll_list);
2e71a6f8 4589 }
fed17f30 4590 }
bea3348e
SH
4591
4592 netpoll_poll_unlock(have);
1da177e4
LT
4593 }
4594out:
e326bed2 4595 net_rps_action_and_irq_enable(sd);
0a9627f2 4596
1da177e4
LT
4597 return;
4598
4599softnet_break:
dee42870 4600 sd->time_squeeze++;
1da177e4
LT
4601 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4602 goto out;
4603}
4604
aa9d8560 4605struct netdev_adjacent {
9ff162a8 4606 struct net_device *dev;
5d261913
VF
4607
4608 /* upper master flag, there can only be one master device per list */
9ff162a8 4609 bool master;
5d261913 4610
5d261913
VF
4611 /* counter for the number of times this device was added to us */
4612 u16 ref_nr;
4613
402dae96
VF
4614 /* private field for the users */
4615 void *private;
4616
9ff162a8
JP
4617 struct list_head list;
4618 struct rcu_head rcu;
9ff162a8
JP
4619};
4620
5d261913
VF
4621static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4622 struct net_device *adj_dev,
2f268f12 4623 struct list_head *adj_list)
9ff162a8 4624{
5d261913 4625 struct netdev_adjacent *adj;
5d261913 4626
2f268f12 4627 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
4628 if (adj->dev == adj_dev)
4629 return adj;
9ff162a8
JP
4630 }
4631 return NULL;
4632}
4633
4634/**
4635 * netdev_has_upper_dev - Check if device is linked to an upper device
4636 * @dev: device
4637 * @upper_dev: upper device to check
4638 *
4639 * Find out if a device is linked to specified upper device and return true
4640 * in case it is. Note that this checks only immediate upper device,
4641 * not through a complete stack of devices. The caller must hold the RTNL lock.
4642 */
4643bool netdev_has_upper_dev(struct net_device *dev,
4644 struct net_device *upper_dev)
4645{
4646 ASSERT_RTNL();
4647
2f268f12 4648 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
9ff162a8
JP
4649}
4650EXPORT_SYMBOL(netdev_has_upper_dev);
4651
4652/**
4653 * netdev_has_any_upper_dev - Check if device is linked to some device
4654 * @dev: device
4655 *
4656 * Find out if a device is linked to an upper device and return true in case
4657 * it is. The caller must hold the RTNL lock.
4658 */
1d143d9f 4659static bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
4660{
4661 ASSERT_RTNL();
4662
2f268f12 4663 return !list_empty(&dev->all_adj_list.upper);
9ff162a8 4664}
9ff162a8
JP
4665
4666/**
4667 * netdev_master_upper_dev_get - Get master upper device
4668 * @dev: device
4669 *
4670 * Find a master upper device and return pointer to it or NULL in case
4671 * it's not there. The caller must hold the RTNL lock.
4672 */
4673struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4674{
aa9d8560 4675 struct netdev_adjacent *upper;
9ff162a8
JP
4676
4677 ASSERT_RTNL();
4678
2f268f12 4679 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
4680 return NULL;
4681
2f268f12 4682 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 4683 struct netdev_adjacent, list);
9ff162a8
JP
4684 if (likely(upper->master))
4685 return upper->dev;
4686 return NULL;
4687}
4688EXPORT_SYMBOL(netdev_master_upper_dev_get);
4689
b6ccba4c
VF
4690void *netdev_adjacent_get_private(struct list_head *adj_list)
4691{
4692 struct netdev_adjacent *adj;
4693
4694 adj = list_entry(adj_list, struct netdev_adjacent, list);
4695
4696 return adj->private;
4697}
4698EXPORT_SYMBOL(netdev_adjacent_get_private);
4699
44a40855
VY
4700/**
4701 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4702 * @dev: device
4703 * @iter: list_head ** of the current position
4704 *
4705 * Gets the next device from the dev's upper list, starting from iter
4706 * position. The caller must hold RCU read lock.
4707 */
4708struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4709 struct list_head **iter)
4710{
4711 struct netdev_adjacent *upper;
4712
4713 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4714
4715 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4716
4717 if (&upper->list == &dev->adj_list.upper)
4718 return NULL;
4719
4720 *iter = &upper->list;
4721
4722 return upper->dev;
4723}
4724EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4725
31088a11
VF
4726/**
4727 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
48311f46
VF
4728 * @dev: device
4729 * @iter: list_head ** of the current position
4730 *
4731 * Gets the next device from the dev's upper list, starting from iter
4732 * position. The caller must hold RCU read lock.
4733 */
2f268f12
VF
4734struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4735 struct list_head **iter)
48311f46
VF
4736{
4737 struct netdev_adjacent *upper;
4738
85328240 4739 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
48311f46
VF
4740
4741 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4742
2f268f12 4743 if (&upper->list == &dev->all_adj_list.upper)
48311f46
VF
4744 return NULL;
4745
4746 *iter = &upper->list;
4747
4748 return upper->dev;
4749}
2f268f12 4750EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
48311f46 4751
31088a11
VF
4752/**
4753 * netdev_lower_get_next_private - Get the next ->private from the
4754 * lower neighbour list
4755 * @dev: device
4756 * @iter: list_head ** of the current position
4757 *
4758 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4759 * list, starting from iter position. The caller must hold either hold the
4760 * RTNL lock or its own locking that guarantees that the neighbour lower
4761 * list will remain unchainged.
4762 */
4763void *netdev_lower_get_next_private(struct net_device *dev,
4764 struct list_head **iter)
4765{
4766 struct netdev_adjacent *lower;
4767
4768 lower = list_entry(*iter, struct netdev_adjacent, list);
4769
4770 if (&lower->list == &dev->adj_list.lower)
4771 return NULL;
4772
6859e7df 4773 *iter = lower->list.next;
31088a11
VF
4774
4775 return lower->private;
4776}
4777EXPORT_SYMBOL(netdev_lower_get_next_private);
4778
4779/**
4780 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4781 * lower neighbour list, RCU
4782 * variant
4783 * @dev: device
4784 * @iter: list_head ** of the current position
4785 *
4786 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4787 * list, starting from iter position. The caller must hold RCU read lock.
4788 */
4789void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4790 struct list_head **iter)
4791{
4792 struct netdev_adjacent *lower;
4793
4794 WARN_ON_ONCE(!rcu_read_lock_held());
4795
4796 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4797
4798 if (&lower->list == &dev->adj_list.lower)
4799 return NULL;
4800
6859e7df 4801 *iter = &lower->list;
31088a11
VF
4802
4803 return lower->private;
4804}
4805EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4806
4085ebe8
VY
4807/**
4808 * netdev_lower_get_next - Get the next device from the lower neighbour
4809 * list
4810 * @dev: device
4811 * @iter: list_head ** of the current position
4812 *
4813 * Gets the next netdev_adjacent from the dev's lower neighbour
4814 * list, starting from iter position. The caller must hold RTNL lock or
4815 * its own locking that guarantees that the neighbour lower
4816 * list will remain unchainged.
4817 */
4818void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4819{
4820 struct netdev_adjacent *lower;
4821
4822 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4823
4824 if (&lower->list == &dev->adj_list.lower)
4825 return NULL;
4826
4827 *iter = &lower->list;
4828
4829 return lower->dev;
4830}
4831EXPORT_SYMBOL(netdev_lower_get_next);
4832
e001bfad 4833/**
4834 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4835 * lower neighbour list, RCU
4836 * variant
4837 * @dev: device
4838 *
4839 * Gets the first netdev_adjacent->private from the dev's lower neighbour
4840 * list. The caller must hold RCU read lock.
4841 */
4842void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4843{
4844 struct netdev_adjacent *lower;
4845
4846 lower = list_first_or_null_rcu(&dev->adj_list.lower,
4847 struct netdev_adjacent, list);
4848 if (lower)
4849 return lower->private;
4850 return NULL;
4851}
4852EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4853
9ff162a8
JP
4854/**
4855 * netdev_master_upper_dev_get_rcu - Get master upper device
4856 * @dev: device
4857 *
4858 * Find a master upper device and return pointer to it or NULL in case
4859 * it's not there. The caller must hold the RCU read lock.
4860 */
4861struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4862{
aa9d8560 4863 struct netdev_adjacent *upper;
9ff162a8 4864
2f268f12 4865 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 4866 struct netdev_adjacent, list);
9ff162a8
JP
4867 if (upper && likely(upper->master))
4868 return upper->dev;
4869 return NULL;
4870}
4871EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4872
0a59f3a9 4873static int netdev_adjacent_sysfs_add(struct net_device *dev,
3ee32707
VF
4874 struct net_device *adj_dev,
4875 struct list_head *dev_list)
4876{
4877 char linkname[IFNAMSIZ+7];
4878 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4879 "upper_%s" : "lower_%s", adj_dev->name);
4880 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4881 linkname);
4882}
0a59f3a9 4883static void netdev_adjacent_sysfs_del(struct net_device *dev,
3ee32707
VF
4884 char *name,
4885 struct list_head *dev_list)
4886{
4887 char linkname[IFNAMSIZ+7];
4888 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4889 "upper_%s" : "lower_%s", name);
4890 sysfs_remove_link(&(dev->dev.kobj), linkname);
4891}
4892
7ce64c79
AF
4893static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
4894 struct net_device *adj_dev,
4895 struct list_head *dev_list)
4896{
4897 return (dev_list == &dev->adj_list.upper ||
4898 dev_list == &dev->adj_list.lower) &&
4899 net_eq(dev_net(dev), dev_net(adj_dev));
4900}
3ee32707 4901
5d261913
VF
4902static int __netdev_adjacent_dev_insert(struct net_device *dev,
4903 struct net_device *adj_dev,
7863c054 4904 struct list_head *dev_list,
402dae96 4905 void *private, bool master)
5d261913
VF
4906{
4907 struct netdev_adjacent *adj;
842d67a7 4908 int ret;
5d261913 4909
7863c054 4910 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5d261913
VF
4911
4912 if (adj) {
5d261913
VF
4913 adj->ref_nr++;
4914 return 0;
4915 }
4916
4917 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4918 if (!adj)
4919 return -ENOMEM;
4920
4921 adj->dev = adj_dev;
4922 adj->master = master;
5d261913 4923 adj->ref_nr = 1;
402dae96 4924 adj->private = private;
5d261913 4925 dev_hold(adj_dev);
2f268f12
VF
4926
4927 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4928 adj_dev->name, dev->name, adj_dev->name);
5d261913 4929
7ce64c79 4930 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
3ee32707 4931 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5831d66e
VF
4932 if (ret)
4933 goto free_adj;
4934 }
4935
7863c054 4936 /* Ensure that master link is always the first item in list. */
842d67a7
VF
4937 if (master) {
4938 ret = sysfs_create_link(&(dev->dev.kobj),
4939 &(adj_dev->dev.kobj), "master");
4940 if (ret)
5831d66e 4941 goto remove_symlinks;
842d67a7 4942
7863c054 4943 list_add_rcu(&adj->list, dev_list);
842d67a7 4944 } else {
7863c054 4945 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 4946 }
5d261913
VF
4947
4948 return 0;
842d67a7 4949
5831d66e 4950remove_symlinks:
7ce64c79 4951 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 4952 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
842d67a7
VF
4953free_adj:
4954 kfree(adj);
974daef7 4955 dev_put(adj_dev);
842d67a7
VF
4956
4957 return ret;
5d261913
VF
4958}
4959
1d143d9f 4960static void __netdev_adjacent_dev_remove(struct net_device *dev,
4961 struct net_device *adj_dev,
4962 struct list_head *dev_list)
5d261913
VF
4963{
4964 struct netdev_adjacent *adj;
4965
7863c054 4966 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5d261913 4967
2f268f12
VF
4968 if (!adj) {
4969 pr_err("tried to remove device %s from %s\n",
4970 dev->name, adj_dev->name);
5d261913 4971 BUG();
2f268f12 4972 }
5d261913
VF
4973
4974 if (adj->ref_nr > 1) {
2f268f12
VF
4975 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
4976 adj->ref_nr-1);
5d261913
VF
4977 adj->ref_nr--;
4978 return;
4979 }
4980
842d67a7
VF
4981 if (adj->master)
4982 sysfs_remove_link(&(dev->dev.kobj), "master");
4983
7ce64c79 4984 if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
3ee32707 4985 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5831d66e 4986
5d261913 4987 list_del_rcu(&adj->list);
2f268f12
VF
4988 pr_debug("dev_put for %s, because link removed from %s to %s\n",
4989 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
4990 dev_put(adj_dev);
4991 kfree_rcu(adj, rcu);
4992}
4993
1d143d9f 4994static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
4995 struct net_device *upper_dev,
4996 struct list_head *up_list,
4997 struct list_head *down_list,
4998 void *private, bool master)
5d261913
VF
4999{
5000 int ret;
5001
402dae96
VF
5002 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5003 master);
5d261913
VF
5004 if (ret)
5005 return ret;
5006
402dae96
VF
5007 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5008 false);
5d261913 5009 if (ret) {
2f268f12 5010 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5d261913
VF
5011 return ret;
5012 }
5013
5014 return 0;
5015}
5016
1d143d9f 5017static int __netdev_adjacent_dev_link(struct net_device *dev,
5018 struct net_device *upper_dev)
5d261913 5019{
2f268f12
VF
5020 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5021 &dev->all_adj_list.upper,
5022 &upper_dev->all_adj_list.lower,
402dae96 5023 NULL, false);
5d261913
VF
5024}
5025
1d143d9f 5026static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5027 struct net_device *upper_dev,
5028 struct list_head *up_list,
5029 struct list_head *down_list)
5d261913 5030{
2f268f12
VF
5031 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5032 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5d261913
VF
5033}
5034
1d143d9f 5035static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5036 struct net_device *upper_dev)
5d261913 5037{
2f268f12
VF
5038 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5039 &dev->all_adj_list.upper,
5040 &upper_dev->all_adj_list.lower);
5041}
5042
1d143d9f 5043static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5044 struct net_device *upper_dev,
5045 void *private, bool master)
2f268f12
VF
5046{
5047 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5048
5049 if (ret)
5050 return ret;
5051
5052 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5053 &dev->adj_list.upper,
5054 &upper_dev->adj_list.lower,
402dae96 5055 private, master);
2f268f12
VF
5056 if (ret) {
5057 __netdev_adjacent_dev_unlink(dev, upper_dev);
5058 return ret;
5059 }
5060
5061 return 0;
5d261913
VF
5062}
5063
1d143d9f 5064static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5065 struct net_device *upper_dev)
2f268f12
VF
5066{
5067 __netdev_adjacent_dev_unlink(dev, upper_dev);
5068 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5069 &dev->adj_list.upper,
5070 &upper_dev->adj_list.lower);
5071}
5d261913 5072
9ff162a8 5073static int __netdev_upper_dev_link(struct net_device *dev,
402dae96
VF
5074 struct net_device *upper_dev, bool master,
5075 void *private)
9ff162a8 5076{
5d261913
VF
5077 struct netdev_adjacent *i, *j, *to_i, *to_j;
5078 int ret = 0;
9ff162a8
JP
5079
5080 ASSERT_RTNL();
5081
5082 if (dev == upper_dev)
5083 return -EBUSY;
5084
5085 /* To prevent loops, check if dev is not upper device to upper_dev. */
2f268f12 5086 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
9ff162a8
JP
5087 return -EBUSY;
5088
2f268f12 5089 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
9ff162a8
JP
5090 return -EEXIST;
5091
5092 if (master && netdev_master_upper_dev_get(dev))
5093 return -EBUSY;
5094
402dae96
VF
5095 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5096 master);
5d261913
VF
5097 if (ret)
5098 return ret;
9ff162a8 5099
5d261913 5100 /* Now that we linked these devs, make all the upper_dev's
2f268f12 5101 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5d261913
VF
5102 * versa, and don't forget the devices itself. All of these
5103 * links are non-neighbours.
5104 */
2f268f12
VF
5105 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5106 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5107 pr_debug("Interlinking %s with %s, non-neighbour\n",
5108 i->dev->name, j->dev->name);
5d261913
VF
5109 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5110 if (ret)
5111 goto rollback_mesh;
5112 }
5113 }
5114
5115 /* add dev to every upper_dev's upper device */
2f268f12
VF
5116 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5117 pr_debug("linking %s's upper device %s with %s\n",
5118 upper_dev->name, i->dev->name, dev->name);
5d261913
VF
5119 ret = __netdev_adjacent_dev_link(dev, i->dev);
5120 if (ret)
5121 goto rollback_upper_mesh;
5122 }
5123
5124 /* add upper_dev to every dev's lower device */
2f268f12
VF
5125 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5126 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5127 i->dev->name, upper_dev->name);
5d261913
VF
5128 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5129 if (ret)
5130 goto rollback_lower_mesh;
5131 }
9ff162a8 5132
42e52bf9 5133 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
9ff162a8 5134 return 0;
5d261913
VF
5135
5136rollback_lower_mesh:
5137 to_i = i;
2f268f12 5138 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5d261913
VF
5139 if (i == to_i)
5140 break;
5141 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5142 }
5143
5144 i = NULL;
5145
5146rollback_upper_mesh:
5147 to_i = i;
2f268f12 5148 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5149 if (i == to_i)
5150 break;
5151 __netdev_adjacent_dev_unlink(dev, i->dev);
5152 }
5153
5154 i = j = NULL;
5155
5156rollback_mesh:
5157 to_i = i;
5158 to_j = j;
2f268f12
VF
5159 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5160 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5161 if (i == to_i && j == to_j)
5162 break;
5163 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5164 }
5165 if (i == to_i)
5166 break;
5167 }
5168
2f268f12 5169 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5170
5171 return ret;
9ff162a8
JP
5172}
5173
5174/**
5175 * netdev_upper_dev_link - Add a link to the upper device
5176 * @dev: device
5177 * @upper_dev: new upper device
5178 *
5179 * Adds a link to device which is upper to this one. The caller must hold
5180 * the RTNL lock. On a failure a negative errno code is returned.
5181 * On success the reference counts are adjusted and the function
5182 * returns zero.
5183 */
5184int netdev_upper_dev_link(struct net_device *dev,
5185 struct net_device *upper_dev)
5186{
402dae96 5187 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
9ff162a8
JP
5188}
5189EXPORT_SYMBOL(netdev_upper_dev_link);
5190
5191/**
5192 * netdev_master_upper_dev_link - Add a master link to the upper device
5193 * @dev: device
5194 * @upper_dev: new upper device
5195 *
5196 * Adds a link to device which is upper to this one. In this case, only
5197 * one master upper device can be linked, although other non-master devices
5198 * might be linked as well. The caller must hold the RTNL lock.
5199 * On a failure a negative errno code is returned. On success the reference
5200 * counts are adjusted and the function returns zero.
5201 */
5202int netdev_master_upper_dev_link(struct net_device *dev,
5203 struct net_device *upper_dev)
5204{
402dae96 5205 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
9ff162a8
JP
5206}
5207EXPORT_SYMBOL(netdev_master_upper_dev_link);
5208
402dae96
VF
5209int netdev_master_upper_dev_link_private(struct net_device *dev,
5210 struct net_device *upper_dev,
5211 void *private)
5212{
5213 return __netdev_upper_dev_link(dev, upper_dev, true, private);
5214}
5215EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5216
9ff162a8
JP
5217/**
5218 * netdev_upper_dev_unlink - Removes a link to upper device
5219 * @dev: device
5220 * @upper_dev: new upper device
5221 *
5222 * Removes a link to device which is upper to this one. The caller must hold
5223 * the RTNL lock.
5224 */
5225void netdev_upper_dev_unlink(struct net_device *dev,
5226 struct net_device *upper_dev)
5227{
5d261913 5228 struct netdev_adjacent *i, *j;
9ff162a8
JP
5229 ASSERT_RTNL();
5230
2f268f12 5231 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5232
5233 /* Here is the tricky part. We must remove all dev's lower
5234 * devices from all upper_dev's upper devices and vice
5235 * versa, to maintain the graph relationship.
5236 */
2f268f12
VF
5237 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5238 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5239 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5240
5241 /* remove also the devices itself from lower/upper device
5242 * list
5243 */
2f268f12 5244 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5d261913
VF
5245 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5246
2f268f12 5247 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5248 __netdev_adjacent_dev_unlink(dev, i->dev);
5249
42e52bf9 5250 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
9ff162a8
JP
5251}
5252EXPORT_SYMBOL(netdev_upper_dev_unlink);
5253
4c75431a
AF
5254void netdev_adjacent_add_links(struct net_device *dev)
5255{
5256 struct netdev_adjacent *iter;
5257
5258 struct net *net = dev_net(dev);
5259
5260 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5261 if (!net_eq(net,dev_net(iter->dev)))
5262 continue;
5263 netdev_adjacent_sysfs_add(iter->dev, dev,
5264 &iter->dev->adj_list.lower);
5265 netdev_adjacent_sysfs_add(dev, iter->dev,
5266 &dev->adj_list.upper);
5267 }
5268
5269 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5270 if (!net_eq(net,dev_net(iter->dev)))
5271 continue;
5272 netdev_adjacent_sysfs_add(iter->dev, dev,
5273 &iter->dev->adj_list.upper);
5274 netdev_adjacent_sysfs_add(dev, iter->dev,
5275 &dev->adj_list.lower);
5276 }
5277}
5278
5279void netdev_adjacent_del_links(struct net_device *dev)
5280{
5281 struct netdev_adjacent *iter;
5282
5283 struct net *net = dev_net(dev);
5284
5285 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5286 if (!net_eq(net,dev_net(iter->dev)))
5287 continue;
5288 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5289 &iter->dev->adj_list.lower);
5290 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5291 &dev->adj_list.upper);
5292 }
5293
5294 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5295 if (!net_eq(net,dev_net(iter->dev)))
5296 continue;
5297 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5298 &iter->dev->adj_list.upper);
5299 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5300 &dev->adj_list.lower);
5301 }
5302}
5303
5bb025fa 5304void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
402dae96 5305{
5bb025fa 5306 struct netdev_adjacent *iter;
402dae96 5307
4c75431a
AF
5308 struct net *net = dev_net(dev);
5309
5bb025fa 5310 list_for_each_entry(iter, &dev->adj_list.upper, list) {
4c75431a
AF
5311 if (!net_eq(net,dev_net(iter->dev)))
5312 continue;
5bb025fa
VF
5313 netdev_adjacent_sysfs_del(iter->dev, oldname,
5314 &iter->dev->adj_list.lower);
5315 netdev_adjacent_sysfs_add(iter->dev, dev,
5316 &iter->dev->adj_list.lower);
5317 }
402dae96 5318
5bb025fa 5319 list_for_each_entry(iter, &dev->adj_list.lower, list) {
4c75431a
AF
5320 if (!net_eq(net,dev_net(iter->dev)))
5321 continue;
5bb025fa
VF
5322 netdev_adjacent_sysfs_del(iter->dev, oldname,
5323 &iter->dev->adj_list.upper);
5324 netdev_adjacent_sysfs_add(iter->dev, dev,
5325 &iter->dev->adj_list.upper);
5326 }
402dae96 5327}
402dae96
VF
5328
5329void *netdev_lower_dev_get_private(struct net_device *dev,
5330 struct net_device *lower_dev)
5331{
5332 struct netdev_adjacent *lower;
5333
5334 if (!lower_dev)
5335 return NULL;
5336 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5337 if (!lower)
5338 return NULL;
5339
5340 return lower->private;
5341}
5342EXPORT_SYMBOL(netdev_lower_dev_get_private);
5343
4085ebe8
VY
5344
5345int dev_get_nest_level(struct net_device *dev,
5346 bool (*type_check)(struct net_device *dev))
5347{
5348 struct net_device *lower = NULL;
5349 struct list_head *iter;
5350 int max_nest = -1;
5351 int nest;
5352
5353 ASSERT_RTNL();
5354
5355 netdev_for_each_lower_dev(dev, lower, iter) {
5356 nest = dev_get_nest_level(lower, type_check);
5357 if (max_nest < nest)
5358 max_nest = nest;
5359 }
5360
5361 if (type_check(dev))
5362 max_nest++;
5363
5364 return max_nest;
5365}
5366EXPORT_SYMBOL(dev_get_nest_level);
5367
b6c40d68
PM
5368static void dev_change_rx_flags(struct net_device *dev, int flags)
5369{
d314774c
SH
5370 const struct net_device_ops *ops = dev->netdev_ops;
5371
d2615bf4 5372 if (ops->ndo_change_rx_flags)
d314774c 5373 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
5374}
5375
991fb3f7 5376static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 5377{
b536db93 5378 unsigned int old_flags = dev->flags;
d04a48b0
EB
5379 kuid_t uid;
5380 kgid_t gid;
1da177e4 5381
24023451
PM
5382 ASSERT_RTNL();
5383
dad9b335
WC
5384 dev->flags |= IFF_PROMISC;
5385 dev->promiscuity += inc;
5386 if (dev->promiscuity == 0) {
5387 /*
5388 * Avoid overflow.
5389 * If inc causes overflow, untouch promisc and return error.
5390 */
5391 if (inc < 0)
5392 dev->flags &= ~IFF_PROMISC;
5393 else {
5394 dev->promiscuity -= inc;
7b6cd1ce
JP
5395 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5396 dev->name);
dad9b335
WC
5397 return -EOVERFLOW;
5398 }
5399 }
52609c0b 5400 if (dev->flags != old_flags) {
7b6cd1ce
JP
5401 pr_info("device %s %s promiscuous mode\n",
5402 dev->name,
5403 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
5404 if (audit_enabled) {
5405 current_uid_gid(&uid, &gid);
7759db82
KHK
5406 audit_log(current->audit_context, GFP_ATOMIC,
5407 AUDIT_ANOM_PROMISCUOUS,
5408 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5409 dev->name, (dev->flags & IFF_PROMISC),
5410 (old_flags & IFF_PROMISC),
e1760bd5 5411 from_kuid(&init_user_ns, audit_get_loginuid(current)),
d04a48b0
EB
5412 from_kuid(&init_user_ns, uid),
5413 from_kgid(&init_user_ns, gid),
7759db82 5414 audit_get_sessionid(current));
8192b0c4 5415 }
24023451 5416
b6c40d68 5417 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 5418 }
991fb3f7
ND
5419 if (notify)
5420 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 5421 return 0;
1da177e4
LT
5422}
5423
4417da66
PM
5424/**
5425 * dev_set_promiscuity - update promiscuity count on a device
5426 * @dev: device
5427 * @inc: modifier
5428 *
5429 * Add or remove promiscuity from a device. While the count in the device
5430 * remains above zero the interface remains promiscuous. Once it hits zero
5431 * the device reverts back to normal filtering operation. A negative inc
5432 * value is used to drop promiscuity on the device.
dad9b335 5433 * Return 0 if successful or a negative errno code on error.
4417da66 5434 */
dad9b335 5435int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 5436{
b536db93 5437 unsigned int old_flags = dev->flags;
dad9b335 5438 int err;
4417da66 5439
991fb3f7 5440 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 5441 if (err < 0)
dad9b335 5442 return err;
4417da66
PM
5443 if (dev->flags != old_flags)
5444 dev_set_rx_mode(dev);
dad9b335 5445 return err;
4417da66 5446}
d1b19dff 5447EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 5448
991fb3f7 5449static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 5450{
991fb3f7 5451 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 5452
24023451
PM
5453 ASSERT_RTNL();
5454
1da177e4 5455 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
5456 dev->allmulti += inc;
5457 if (dev->allmulti == 0) {
5458 /*
5459 * Avoid overflow.
5460 * If inc causes overflow, untouch allmulti and return error.
5461 */
5462 if (inc < 0)
5463 dev->flags &= ~IFF_ALLMULTI;
5464 else {
5465 dev->allmulti -= inc;
7b6cd1ce
JP
5466 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5467 dev->name);
dad9b335
WC
5468 return -EOVERFLOW;
5469 }
5470 }
24023451 5471 if (dev->flags ^ old_flags) {
b6c40d68 5472 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 5473 dev_set_rx_mode(dev);
991fb3f7
ND
5474 if (notify)
5475 __dev_notify_flags(dev, old_flags,
5476 dev->gflags ^ old_gflags);
24023451 5477 }
dad9b335 5478 return 0;
4417da66 5479}
991fb3f7
ND
5480
5481/**
5482 * dev_set_allmulti - update allmulti count on a device
5483 * @dev: device
5484 * @inc: modifier
5485 *
5486 * Add or remove reception of all multicast frames to a device. While the
5487 * count in the device remains above zero the interface remains listening
5488 * to all interfaces. Once it hits zero the device reverts back to normal
5489 * filtering operation. A negative @inc value is used to drop the counter
5490 * when releasing a resource needing all multicasts.
5491 * Return 0 if successful or a negative errno code on error.
5492 */
5493
5494int dev_set_allmulti(struct net_device *dev, int inc)
5495{
5496 return __dev_set_allmulti(dev, inc, true);
5497}
d1b19dff 5498EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
5499
5500/*
5501 * Upload unicast and multicast address lists to device and
5502 * configure RX filtering. When the device doesn't support unicast
53ccaae1 5503 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
5504 * are present.
5505 */
5506void __dev_set_rx_mode(struct net_device *dev)
5507{
d314774c
SH
5508 const struct net_device_ops *ops = dev->netdev_ops;
5509
4417da66
PM
5510 /* dev_open will call this function so the list will stay sane. */
5511 if (!(dev->flags&IFF_UP))
5512 return;
5513
5514 if (!netif_device_present(dev))
40b77c94 5515 return;
4417da66 5516
01789349 5517 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
5518 /* Unicast addresses changes may only happen under the rtnl,
5519 * therefore calling __dev_set_promiscuity here is safe.
5520 */
32e7bfc4 5521 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 5522 __dev_set_promiscuity(dev, 1, false);
2d348d1f 5523 dev->uc_promisc = true;
32e7bfc4 5524 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 5525 __dev_set_promiscuity(dev, -1, false);
2d348d1f 5526 dev->uc_promisc = false;
4417da66 5527 }
4417da66 5528 }
01789349
JP
5529
5530 if (ops->ndo_set_rx_mode)
5531 ops->ndo_set_rx_mode(dev);
4417da66
PM
5532}
5533
5534void dev_set_rx_mode(struct net_device *dev)
5535{
b9e40857 5536 netif_addr_lock_bh(dev);
4417da66 5537 __dev_set_rx_mode(dev);
b9e40857 5538 netif_addr_unlock_bh(dev);
1da177e4
LT
5539}
5540
f0db275a
SH
5541/**
5542 * dev_get_flags - get flags reported to userspace
5543 * @dev: device
5544 *
5545 * Get the combination of flag bits exported through APIs to userspace.
5546 */
95c96174 5547unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 5548{
95c96174 5549 unsigned int flags;
1da177e4
LT
5550
5551 flags = (dev->flags & ~(IFF_PROMISC |
5552 IFF_ALLMULTI |
b00055aa
SR
5553 IFF_RUNNING |
5554 IFF_LOWER_UP |
5555 IFF_DORMANT)) |
1da177e4
LT
5556 (dev->gflags & (IFF_PROMISC |
5557 IFF_ALLMULTI));
5558
b00055aa
SR
5559 if (netif_running(dev)) {
5560 if (netif_oper_up(dev))
5561 flags |= IFF_RUNNING;
5562 if (netif_carrier_ok(dev))
5563 flags |= IFF_LOWER_UP;
5564 if (netif_dormant(dev))
5565 flags |= IFF_DORMANT;
5566 }
1da177e4
LT
5567
5568 return flags;
5569}
d1b19dff 5570EXPORT_SYMBOL(dev_get_flags);
1da177e4 5571
bd380811 5572int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 5573{
b536db93 5574 unsigned int old_flags = dev->flags;
bd380811 5575 int ret;
1da177e4 5576
24023451
PM
5577 ASSERT_RTNL();
5578
1da177e4
LT
5579 /*
5580 * Set the flags on our device.
5581 */
5582
5583 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5584 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5585 IFF_AUTOMEDIA)) |
5586 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5587 IFF_ALLMULTI));
5588
5589 /*
5590 * Load in the correct multicast list now the flags have changed.
5591 */
5592
b6c40d68
PM
5593 if ((old_flags ^ flags) & IFF_MULTICAST)
5594 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 5595
4417da66 5596 dev_set_rx_mode(dev);
1da177e4
LT
5597
5598 /*
5599 * Have we downed the interface. We handle IFF_UP ourselves
5600 * according to user attempts to set it, rather than blindly
5601 * setting it.
5602 */
5603
5604 ret = 0;
d215d10f 5605 if ((old_flags ^ flags) & IFF_UP)
bd380811 5606 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4 5607
1da177e4 5608 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 5609 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 5610 unsigned int old_flags = dev->flags;
d1b19dff 5611
1da177e4 5612 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
5613
5614 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5615 if (dev->flags != old_flags)
5616 dev_set_rx_mode(dev);
1da177e4
LT
5617 }
5618
5619 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5620 is important. Some (broken) drivers set IFF_PROMISC, when
5621 IFF_ALLMULTI is requested not asking us and not reporting.
5622 */
5623 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
5624 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5625
1da177e4 5626 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 5627 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
5628 }
5629
bd380811
PM
5630 return ret;
5631}
5632
a528c219
ND
5633void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5634 unsigned int gchanges)
bd380811
PM
5635{
5636 unsigned int changes = dev->flags ^ old_flags;
5637
a528c219 5638 if (gchanges)
7f294054 5639 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 5640
bd380811
PM
5641 if (changes & IFF_UP) {
5642 if (dev->flags & IFF_UP)
5643 call_netdevice_notifiers(NETDEV_UP, dev);
5644 else
5645 call_netdevice_notifiers(NETDEV_DOWN, dev);
5646 }
5647
5648 if (dev->flags & IFF_UP &&
be9efd36
JP
5649 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5650 struct netdev_notifier_change_info change_info;
5651
5652 change_info.flags_changed = changes;
5653 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5654 &change_info.info);
5655 }
bd380811
PM
5656}
5657
5658/**
5659 * dev_change_flags - change device settings
5660 * @dev: device
5661 * @flags: device state flags
5662 *
5663 * Change settings on device based state flags. The flags are
5664 * in the userspace exported format.
5665 */
b536db93 5666int dev_change_flags(struct net_device *dev, unsigned int flags)
bd380811 5667{
b536db93 5668 int ret;
991fb3f7 5669 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811
PM
5670
5671 ret = __dev_change_flags(dev, flags);
5672 if (ret < 0)
5673 return ret;
5674
991fb3f7 5675 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 5676 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
5677 return ret;
5678}
d1b19dff 5679EXPORT_SYMBOL(dev_change_flags);
1da177e4 5680
2315dc91
VF
5681static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5682{
5683 const struct net_device_ops *ops = dev->netdev_ops;
5684
5685 if (ops->ndo_change_mtu)
5686 return ops->ndo_change_mtu(dev, new_mtu);
5687
5688 dev->mtu = new_mtu;
5689 return 0;
5690}
5691
f0db275a
SH
5692/**
5693 * dev_set_mtu - Change maximum transfer unit
5694 * @dev: device
5695 * @new_mtu: new transfer unit
5696 *
5697 * Change the maximum transfer size of the network device.
5698 */
1da177e4
LT
5699int dev_set_mtu(struct net_device *dev, int new_mtu)
5700{
2315dc91 5701 int err, orig_mtu;
1da177e4
LT
5702
5703 if (new_mtu == dev->mtu)
5704 return 0;
5705
5706 /* MTU must be positive. */
5707 if (new_mtu < 0)
5708 return -EINVAL;
5709
5710 if (!netif_device_present(dev))
5711 return -ENODEV;
5712
1d486bfb
VF
5713 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5714 err = notifier_to_errno(err);
5715 if (err)
5716 return err;
d314774c 5717
2315dc91
VF
5718 orig_mtu = dev->mtu;
5719 err = __dev_set_mtu(dev, new_mtu);
d314774c 5720
2315dc91
VF
5721 if (!err) {
5722 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5723 err = notifier_to_errno(err);
5724 if (err) {
5725 /* setting mtu back and notifying everyone again,
5726 * so that they have a chance to revert changes.
5727 */
5728 __dev_set_mtu(dev, orig_mtu);
5729 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5730 }
5731 }
1da177e4
LT
5732 return err;
5733}
d1b19dff 5734EXPORT_SYMBOL(dev_set_mtu);
1da177e4 5735
cbda10fa
VD
5736/**
5737 * dev_set_group - Change group this device belongs to
5738 * @dev: device
5739 * @new_group: group this device should belong to
5740 */
5741void dev_set_group(struct net_device *dev, int new_group)
5742{
5743 dev->group = new_group;
5744}
5745EXPORT_SYMBOL(dev_set_group);
5746
f0db275a
SH
5747/**
5748 * dev_set_mac_address - Change Media Access Control Address
5749 * @dev: device
5750 * @sa: new address
5751 *
5752 * Change the hardware (MAC) address of the device
5753 */
1da177e4
LT
5754int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5755{
d314774c 5756 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
5757 int err;
5758
d314774c 5759 if (!ops->ndo_set_mac_address)
1da177e4
LT
5760 return -EOPNOTSUPP;
5761 if (sa->sa_family != dev->type)
5762 return -EINVAL;
5763 if (!netif_device_present(dev))
5764 return -ENODEV;
d314774c 5765 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
5766 if (err)
5767 return err;
fbdeca2d 5768 dev->addr_assign_type = NET_ADDR_SET;
f6521516 5769 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 5770 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 5771 return 0;
1da177e4 5772}
d1b19dff 5773EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 5774
4bf84c35
JP
5775/**
5776 * dev_change_carrier - Change device carrier
5777 * @dev: device
691b3b7e 5778 * @new_carrier: new value
4bf84c35
JP
5779 *
5780 * Change device carrier
5781 */
5782int dev_change_carrier(struct net_device *dev, bool new_carrier)
5783{
5784 const struct net_device_ops *ops = dev->netdev_ops;
5785
5786 if (!ops->ndo_change_carrier)
5787 return -EOPNOTSUPP;
5788 if (!netif_device_present(dev))
5789 return -ENODEV;
5790 return ops->ndo_change_carrier(dev, new_carrier);
5791}
5792EXPORT_SYMBOL(dev_change_carrier);
5793
66b52b0d
JP
5794/**
5795 * dev_get_phys_port_id - Get device physical port ID
5796 * @dev: device
5797 * @ppid: port ID
5798 *
5799 * Get device physical port ID
5800 */
5801int dev_get_phys_port_id(struct net_device *dev,
5802 struct netdev_phys_port_id *ppid)
5803{
5804 const struct net_device_ops *ops = dev->netdev_ops;
5805
5806 if (!ops->ndo_get_phys_port_id)
5807 return -EOPNOTSUPP;
5808 return ops->ndo_get_phys_port_id(dev, ppid);
5809}
5810EXPORT_SYMBOL(dev_get_phys_port_id);
5811
1da177e4
LT
5812/**
5813 * dev_new_index - allocate an ifindex
c4ea43c5 5814 * @net: the applicable net namespace
1da177e4
LT
5815 *
5816 * Returns a suitable unique value for a new device interface
5817 * number. The caller must hold the rtnl semaphore or the
5818 * dev_base_lock to be sure it remains unique.
5819 */
881d966b 5820static int dev_new_index(struct net *net)
1da177e4 5821{
aa79e66e 5822 int ifindex = net->ifindex;
1da177e4
LT
5823 for (;;) {
5824 if (++ifindex <= 0)
5825 ifindex = 1;
881d966b 5826 if (!__dev_get_by_index(net, ifindex))
aa79e66e 5827 return net->ifindex = ifindex;
1da177e4
LT
5828 }
5829}
5830
1da177e4 5831/* Delayed registration/unregisteration */
3b5b34fd 5832static LIST_HEAD(net_todo_list);
200b916f 5833DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 5834
6f05f629 5835static void net_set_todo(struct net_device *dev)
1da177e4 5836{
1da177e4 5837 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 5838 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
5839}
5840
9b5e383c 5841static void rollback_registered_many(struct list_head *head)
93ee31f1 5842{
e93737b0 5843 struct net_device *dev, *tmp;
5cde2829 5844 LIST_HEAD(close_head);
9b5e383c 5845
93ee31f1
DL
5846 BUG_ON(dev_boot_phase);
5847 ASSERT_RTNL();
5848
e93737b0 5849 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 5850 /* Some devices call without registering
e93737b0
KK
5851 * for initialization unwind. Remove those
5852 * devices and proceed with the remaining.
9b5e383c
ED
5853 */
5854 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
5855 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5856 dev->name, dev);
93ee31f1 5857
9b5e383c 5858 WARN_ON(1);
e93737b0
KK
5859 list_del(&dev->unreg_list);
5860 continue;
9b5e383c 5861 }
449f4544 5862 dev->dismantle = true;
9b5e383c 5863 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 5864 }
93ee31f1 5865
44345724 5866 /* If device is running, close it first. */
5cde2829
EB
5867 list_for_each_entry(dev, head, unreg_list)
5868 list_add_tail(&dev->close_list, &close_head);
5869 dev_close_many(&close_head);
93ee31f1 5870
44345724 5871 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
5872 /* And unlink it from device chain. */
5873 unlist_netdevice(dev);
93ee31f1 5874
9b5e383c
ED
5875 dev->reg_state = NETREG_UNREGISTERING;
5876 }
93ee31f1
DL
5877
5878 synchronize_net();
5879
9b5e383c
ED
5880 list_for_each_entry(dev, head, unreg_list) {
5881 /* Shutdown queueing discipline. */
5882 dev_shutdown(dev);
93ee31f1
DL
5883
5884
9b5e383c
ED
5885 /* Notify protocols, that we are about to destroy
5886 this device. They should clean all the things.
5887 */
5888 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 5889
9b5e383c
ED
5890 /*
5891 * Flush the unicast and multicast chains
5892 */
a748ee24 5893 dev_uc_flush(dev);
22bedad3 5894 dev_mc_flush(dev);
93ee31f1 5895
9b5e383c
ED
5896 if (dev->netdev_ops->ndo_uninit)
5897 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 5898
56bfa7ee
RP
5899 if (!dev->rtnl_link_ops ||
5900 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5901 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
5902
9ff162a8
JP
5903 /* Notifier chain MUST detach us all upper devices. */
5904 WARN_ON(netdev_has_any_upper_dev(dev));
93ee31f1 5905
9b5e383c
ED
5906 /* Remove entries from kobject tree */
5907 netdev_unregister_kobject(dev);
024e9679
AD
5908#ifdef CONFIG_XPS
5909 /* Remove XPS queueing entries */
5910 netif_reset_xps_queues_gt(dev, 0);
5911#endif
9b5e383c 5912 }
93ee31f1 5913
850a545b 5914 synchronize_net();
395264d5 5915
a5ee1551 5916 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
5917 dev_put(dev);
5918}
5919
5920static void rollback_registered(struct net_device *dev)
5921{
5922 LIST_HEAD(single);
5923
5924 list_add(&dev->unreg_list, &single);
5925 rollback_registered_many(&single);
ceaaec98 5926 list_del(&single);
93ee31f1
DL
5927}
5928
c8f44aff
MM
5929static netdev_features_t netdev_fix_features(struct net_device *dev,
5930 netdev_features_t features)
b63365a2 5931{
57422dc5
MM
5932 /* Fix illegal checksum combinations */
5933 if ((features & NETIF_F_HW_CSUM) &&
5934 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 5935 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
5936 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5937 }
5938
b63365a2 5939 /* TSO requires that SG is present as well. */
ea2d3688 5940 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 5941 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 5942 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
5943 }
5944
ec5f0615
PS
5945 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5946 !(features & NETIF_F_IP_CSUM)) {
5947 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5948 features &= ~NETIF_F_TSO;
5949 features &= ~NETIF_F_TSO_ECN;
5950 }
5951
5952 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5953 !(features & NETIF_F_IPV6_CSUM)) {
5954 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5955 features &= ~NETIF_F_TSO6;
5956 }
5957
31d8b9e0
BH
5958 /* TSO ECN requires that TSO is present as well. */
5959 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5960 features &= ~NETIF_F_TSO_ECN;
5961
212b573f
MM
5962 /* Software GSO depends on SG. */
5963 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 5964 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
5965 features &= ~NETIF_F_GSO;
5966 }
5967
acd1130e 5968 /* UFO needs SG and checksumming */
b63365a2 5969 if (features & NETIF_F_UFO) {
79032644
MM
5970 /* maybe split UFO into V4 and V6? */
5971 if (!((features & NETIF_F_GEN_CSUM) ||
5972 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5973 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 5974 netdev_dbg(dev,
acd1130e 5975 "Dropping NETIF_F_UFO since no checksum offload features.\n");
b63365a2
HX
5976 features &= ~NETIF_F_UFO;
5977 }
5978
5979 if (!(features & NETIF_F_SG)) {
6f404e44 5980 netdev_dbg(dev,
acd1130e 5981 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
b63365a2
HX
5982 features &= ~NETIF_F_UFO;
5983 }
5984 }
5985
d0290214
JP
5986#ifdef CONFIG_NET_RX_BUSY_POLL
5987 if (dev->netdev_ops->ndo_busy_poll)
5988 features |= NETIF_F_BUSY_POLL;
5989 else
5990#endif
5991 features &= ~NETIF_F_BUSY_POLL;
5992
b63365a2
HX
5993 return features;
5994}
b63365a2 5995
6cb6a27c 5996int __netdev_update_features(struct net_device *dev)
5455c699 5997{
c8f44aff 5998 netdev_features_t features;
5455c699
MM
5999 int err = 0;
6000
87267485
MM
6001 ASSERT_RTNL();
6002
5455c699
MM
6003 features = netdev_get_wanted_features(dev);
6004
6005 if (dev->netdev_ops->ndo_fix_features)
6006 features = dev->netdev_ops->ndo_fix_features(dev, features);
6007
6008 /* driver might be less strict about feature dependencies */
6009 features = netdev_fix_features(dev, features);
6010
6011 if (dev->features == features)
6cb6a27c 6012 return 0;
5455c699 6013
c8f44aff
MM
6014 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6015 &dev->features, &features);
5455c699
MM
6016
6017 if (dev->netdev_ops->ndo_set_features)
6018 err = dev->netdev_ops->ndo_set_features(dev, features);
6019
6cb6a27c 6020 if (unlikely(err < 0)) {
5455c699 6021 netdev_err(dev,
c8f44aff
MM
6022 "set_features() failed (%d); wanted %pNF, left %pNF\n",
6023 err, &features, &dev->features);
6cb6a27c
MM
6024 return -1;
6025 }
6026
6027 if (!err)
6028 dev->features = features;
6029
6030 return 1;
6031}
6032
afe12cc8
MM
6033/**
6034 * netdev_update_features - recalculate device features
6035 * @dev: the device to check
6036 *
6037 * Recalculate dev->features set and send notifications if it
6038 * has changed. Should be called after driver or hardware dependent
6039 * conditions might have changed that influence the features.
6040 */
6cb6a27c
MM
6041void netdev_update_features(struct net_device *dev)
6042{
6043 if (__netdev_update_features(dev))
6044 netdev_features_change(dev);
5455c699
MM
6045}
6046EXPORT_SYMBOL(netdev_update_features);
6047
afe12cc8
MM
6048/**
6049 * netdev_change_features - recalculate device features
6050 * @dev: the device to check
6051 *
6052 * Recalculate dev->features set and send notifications even
6053 * if they have not changed. Should be called instead of
6054 * netdev_update_features() if also dev->vlan_features might
6055 * have changed to allow the changes to be propagated to stacked
6056 * VLAN devices.
6057 */
6058void netdev_change_features(struct net_device *dev)
6059{
6060 __netdev_update_features(dev);
6061 netdev_features_change(dev);
6062}
6063EXPORT_SYMBOL(netdev_change_features);
6064
fc4a7489
PM
6065/**
6066 * netif_stacked_transfer_operstate - transfer operstate
6067 * @rootdev: the root or lower level device to transfer state from
6068 * @dev: the device to transfer operstate to
6069 *
6070 * Transfer operational state from root to device. This is normally
6071 * called when a stacking relationship exists between the root
6072 * device and the device(a leaf device).
6073 */
6074void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6075 struct net_device *dev)
6076{
6077 if (rootdev->operstate == IF_OPER_DORMANT)
6078 netif_dormant_on(dev);
6079 else
6080 netif_dormant_off(dev);
6081
6082 if (netif_carrier_ok(rootdev)) {
6083 if (!netif_carrier_ok(dev))
6084 netif_carrier_on(dev);
6085 } else {
6086 if (netif_carrier_ok(dev))
6087 netif_carrier_off(dev);
6088 }
6089}
6090EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6091
a953be53 6092#ifdef CONFIG_SYSFS
1b4bf461
ED
6093static int netif_alloc_rx_queues(struct net_device *dev)
6094{
1b4bf461 6095 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 6096 struct netdev_rx_queue *rx;
1b4bf461 6097
bd25fa7b 6098 BUG_ON(count < 1);
1b4bf461 6099
bd25fa7b 6100 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
62b5942a 6101 if (!rx)
bd25fa7b 6102 return -ENOMEM;
62b5942a 6103
bd25fa7b
TH
6104 dev->_rx = rx;
6105
bd25fa7b 6106 for (i = 0; i < count; i++)
fe822240 6107 rx[i].dev = dev;
1b4bf461
ED
6108 return 0;
6109}
bf264145 6110#endif
1b4bf461 6111
aa942104
CG
6112static void netdev_init_one_queue(struct net_device *dev,
6113 struct netdev_queue *queue, void *_unused)
6114{
6115 /* Initialize queue lock */
6116 spin_lock_init(&queue->_xmit_lock);
6117 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6118 queue->xmit_lock_owner = -1;
b236da69 6119 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 6120 queue->dev = dev;
114cf580
TH
6121#ifdef CONFIG_BQL
6122 dql_init(&queue->dql, HZ);
6123#endif
aa942104
CG
6124}
6125
60877a32
ED
6126static void netif_free_tx_queues(struct net_device *dev)
6127{
4cb28970 6128 kvfree(dev->_tx);
60877a32
ED
6129}
6130
e6484930
TH
6131static int netif_alloc_netdev_queues(struct net_device *dev)
6132{
6133 unsigned int count = dev->num_tx_queues;
6134 struct netdev_queue *tx;
60877a32 6135 size_t sz = count * sizeof(*tx);
e6484930 6136
60877a32 6137 BUG_ON(count < 1 || count > 0xffff);
62b5942a 6138
60877a32
ED
6139 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6140 if (!tx) {
6141 tx = vzalloc(sz);
6142 if (!tx)
6143 return -ENOMEM;
6144 }
e6484930 6145 dev->_tx = tx;
1d24eb48 6146
e6484930
TH
6147 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6148 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
6149
6150 return 0;
e6484930
TH
6151}
6152
1da177e4
LT
6153/**
6154 * register_netdevice - register a network device
6155 * @dev: device to register
6156 *
6157 * Take a completed network device structure and add it to the kernel
6158 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6159 * chain. 0 is returned on success. A negative errno code is returned
6160 * on a failure to set up the device, or if the name is a duplicate.
6161 *
6162 * Callers must hold the rtnl semaphore. You may want
6163 * register_netdev() instead of this.
6164 *
6165 * BUGS:
6166 * The locking appears insufficient to guarantee two parallel registers
6167 * will not get the same name.
6168 */
6169
6170int register_netdevice(struct net_device *dev)
6171{
1da177e4 6172 int ret;
d314774c 6173 struct net *net = dev_net(dev);
1da177e4
LT
6174
6175 BUG_ON(dev_boot_phase);
6176 ASSERT_RTNL();
6177
b17a7c17
SH
6178 might_sleep();
6179
1da177e4
LT
6180 /* When net_device's are persistent, this will be fatal. */
6181 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 6182 BUG_ON(!net);
1da177e4 6183
f1f28aa3 6184 spin_lock_init(&dev->addr_list_lock);
cf508b12 6185 netdev_set_addr_lockdep_class(dev);
1da177e4 6186
1da177e4
LT
6187 dev->iflink = -1;
6188
828de4f6 6189 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
6190 if (ret < 0)
6191 goto out;
6192
1da177e4 6193 /* Init, if this function is available */
d314774c
SH
6194 if (dev->netdev_ops->ndo_init) {
6195 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
6196 if (ret) {
6197 if (ret > 0)
6198 ret = -EIO;
90833aa4 6199 goto out;
1da177e4
LT
6200 }
6201 }
4ec93edb 6202
f646968f
PM
6203 if (((dev->hw_features | dev->features) &
6204 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
6205 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6206 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6207 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6208 ret = -EINVAL;
6209 goto err_uninit;
6210 }
6211
9c7dafbf
PE
6212 ret = -EBUSY;
6213 if (!dev->ifindex)
6214 dev->ifindex = dev_new_index(net);
6215 else if (__dev_get_by_index(net, dev->ifindex))
6216 goto err_uninit;
6217
1da177e4
LT
6218 if (dev->iflink == -1)
6219 dev->iflink = dev->ifindex;
6220
5455c699
MM
6221 /* Transfer changeable features to wanted_features and enable
6222 * software offloads (GSO and GRO).
6223 */
6224 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f
MM
6225 dev->features |= NETIF_F_SOFT_FEATURES;
6226 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 6227
34324dc2
MM
6228 if (!(dev->flags & IFF_LOOPBACK)) {
6229 dev->hw_features |= NETIF_F_NOCACHE_COPY;
c6e1a0d1
TH
6230 }
6231
1180e7d6 6232 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 6233 */
1180e7d6 6234 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 6235
ee579677
PS
6236 /* Make NETIF_F_SG inheritable to tunnel devices.
6237 */
6238 dev->hw_enc_features |= NETIF_F_SG;
6239
0d89d203
SH
6240 /* Make NETIF_F_SG inheritable to MPLS.
6241 */
6242 dev->mpls_features |= NETIF_F_SG;
6243
7ffbe3fd
JB
6244 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6245 ret = notifier_to_errno(ret);
6246 if (ret)
6247 goto err_uninit;
6248
8b41d188 6249 ret = netdev_register_kobject(dev);
b17a7c17 6250 if (ret)
7ce1b0ed 6251 goto err_uninit;
b17a7c17
SH
6252 dev->reg_state = NETREG_REGISTERED;
6253
6cb6a27c 6254 __netdev_update_features(dev);
8e9b59b2 6255
1da177e4
LT
6256 /*
6257 * Default initial state at registry is that the
6258 * device is present.
6259 */
6260
6261 set_bit(__LINK_STATE_PRESENT, &dev->state);
6262
8f4cccbb
BH
6263 linkwatch_init_dev(dev);
6264
1da177e4 6265 dev_init_scheduler(dev);
1da177e4 6266 dev_hold(dev);
ce286d32 6267 list_netdevice(dev);
7bf23575 6268 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 6269
948b337e
JP
6270 /* If the device has permanent device address, driver should
6271 * set dev_addr and also addr_assign_type should be set to
6272 * NET_ADDR_PERM (default value).
6273 */
6274 if (dev->addr_assign_type == NET_ADDR_PERM)
6275 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6276
1da177e4 6277 /* Notify protocols, that a new device appeared. */
056925ab 6278 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 6279 ret = notifier_to_errno(ret);
93ee31f1
DL
6280 if (ret) {
6281 rollback_registered(dev);
6282 dev->reg_state = NETREG_UNREGISTERED;
6283 }
d90a909e
EB
6284 /*
6285 * Prevent userspace races by waiting until the network
6286 * device is fully setup before sending notifications.
6287 */
a2835763
PM
6288 if (!dev->rtnl_link_ops ||
6289 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 6290 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
6291
6292out:
6293 return ret;
7ce1b0ed
HX
6294
6295err_uninit:
d314774c
SH
6296 if (dev->netdev_ops->ndo_uninit)
6297 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 6298 goto out;
1da177e4 6299}
d1b19dff 6300EXPORT_SYMBOL(register_netdevice);
1da177e4 6301
937f1ba5
BH
6302/**
6303 * init_dummy_netdev - init a dummy network device for NAPI
6304 * @dev: device to init
6305 *
6306 * This takes a network device structure and initialize the minimum
6307 * amount of fields so it can be used to schedule NAPI polls without
6308 * registering a full blown interface. This is to be used by drivers
6309 * that need to tie several hardware interfaces to a single NAPI
6310 * poll scheduler due to HW limitations.
6311 */
6312int init_dummy_netdev(struct net_device *dev)
6313{
6314 /* Clear everything. Note we don't initialize spinlocks
6315 * are they aren't supposed to be taken by any of the
6316 * NAPI code and this dummy netdev is supposed to be
6317 * only ever used for NAPI polls
6318 */
6319 memset(dev, 0, sizeof(struct net_device));
6320
6321 /* make sure we BUG if trying to hit standard
6322 * register/unregister code path
6323 */
6324 dev->reg_state = NETREG_DUMMY;
6325
937f1ba5
BH
6326 /* NAPI wants this */
6327 INIT_LIST_HEAD(&dev->napi_list);
6328
6329 /* a dummy interface is started by default */
6330 set_bit(__LINK_STATE_PRESENT, &dev->state);
6331 set_bit(__LINK_STATE_START, &dev->state);
6332
29b4433d
ED
6333 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6334 * because users of this 'device' dont need to change
6335 * its refcount.
6336 */
6337
937f1ba5
BH
6338 return 0;
6339}
6340EXPORT_SYMBOL_GPL(init_dummy_netdev);
6341
6342
1da177e4
LT
6343/**
6344 * register_netdev - register a network device
6345 * @dev: device to register
6346 *
6347 * Take a completed network device structure and add it to the kernel
6348 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6349 * chain. 0 is returned on success. A negative errno code is returned
6350 * on a failure to set up the device, or if the name is a duplicate.
6351 *
38b4da38 6352 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
6353 * and expands the device name if you passed a format string to
6354 * alloc_netdev.
6355 */
6356int register_netdev(struct net_device *dev)
6357{
6358 int err;
6359
6360 rtnl_lock();
1da177e4 6361 err = register_netdevice(dev);
1da177e4
LT
6362 rtnl_unlock();
6363 return err;
6364}
6365EXPORT_SYMBOL(register_netdev);
6366
29b4433d
ED
6367int netdev_refcnt_read(const struct net_device *dev)
6368{
6369 int i, refcnt = 0;
6370
6371 for_each_possible_cpu(i)
6372 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6373 return refcnt;
6374}
6375EXPORT_SYMBOL(netdev_refcnt_read);
6376
2c53040f 6377/**
1da177e4 6378 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 6379 * @dev: target net_device
1da177e4
LT
6380 *
6381 * This is called when unregistering network devices.
6382 *
6383 * Any protocol or device that holds a reference should register
6384 * for netdevice notification, and cleanup and put back the
6385 * reference if they receive an UNREGISTER event.
6386 * We can get stuck here if buggy protocols don't correctly
4ec93edb 6387 * call dev_put.
1da177e4
LT
6388 */
6389static void netdev_wait_allrefs(struct net_device *dev)
6390{
6391 unsigned long rebroadcast_time, warning_time;
29b4433d 6392 int refcnt;
1da177e4 6393
e014debe
ED
6394 linkwatch_forget_dev(dev);
6395
1da177e4 6396 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
6397 refcnt = netdev_refcnt_read(dev);
6398
6399 while (refcnt != 0) {
1da177e4 6400 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 6401 rtnl_lock();
1da177e4
LT
6402
6403 /* Rebroadcast unregister notification */
056925ab 6404 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 6405
748e2d93 6406 __rtnl_unlock();
0115e8e3 6407 rcu_barrier();
748e2d93
ED
6408 rtnl_lock();
6409
0115e8e3 6410 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
1da177e4
LT
6411 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6412 &dev->state)) {
6413 /* We must not have linkwatch events
6414 * pending on unregister. If this
6415 * happens, we simply run the queue
6416 * unscheduled, resulting in a noop
6417 * for this device.
6418 */
6419 linkwatch_run_queue();
6420 }
6421
6756ae4b 6422 __rtnl_unlock();
1da177e4
LT
6423
6424 rebroadcast_time = jiffies;
6425 }
6426
6427 msleep(250);
6428
29b4433d
ED
6429 refcnt = netdev_refcnt_read(dev);
6430
1da177e4 6431 if (time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
6432 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6433 dev->name, refcnt);
1da177e4
LT
6434 warning_time = jiffies;
6435 }
6436 }
6437}
6438
6439/* The sequence is:
6440 *
6441 * rtnl_lock();
6442 * ...
6443 * register_netdevice(x1);
6444 * register_netdevice(x2);
6445 * ...
6446 * unregister_netdevice(y1);
6447 * unregister_netdevice(y2);
6448 * ...
6449 * rtnl_unlock();
6450 * free_netdev(y1);
6451 * free_netdev(y2);
6452 *
58ec3b4d 6453 * We are invoked by rtnl_unlock().
1da177e4 6454 * This allows us to deal with problems:
b17a7c17 6455 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
6456 * without deadlocking with linkwatch via keventd.
6457 * 2) Since we run with the RTNL semaphore not held, we can sleep
6458 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
6459 *
6460 * We must not return until all unregister events added during
6461 * the interval the lock was held have been completed.
1da177e4 6462 */
1da177e4
LT
6463void netdev_run_todo(void)
6464{
626ab0e6 6465 struct list_head list;
1da177e4 6466
1da177e4 6467 /* Snapshot list, allow later requests */
626ab0e6 6468 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
6469
6470 __rtnl_unlock();
626ab0e6 6471
0115e8e3
ED
6472
6473 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
6474 if (!list_empty(&list))
6475 rcu_barrier();
6476
1da177e4
LT
6477 while (!list_empty(&list)) {
6478 struct net_device *dev
e5e26d75 6479 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
6480 list_del(&dev->todo_list);
6481
748e2d93 6482 rtnl_lock();
0115e8e3 6483 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
748e2d93 6484 __rtnl_unlock();
0115e8e3 6485
b17a7c17 6486 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 6487 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
6488 dev->name, dev->reg_state);
6489 dump_stack();
6490 continue;
6491 }
1da177e4 6492
b17a7c17 6493 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 6494
152102c7 6495 on_each_cpu(flush_backlog, dev, 1);
6e583ce5 6496
b17a7c17 6497 netdev_wait_allrefs(dev);
1da177e4 6498
b17a7c17 6499 /* paranoia */
29b4433d 6500 BUG_ON(netdev_refcnt_read(dev));
33d480ce
ED
6501 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6502 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
547b792c 6503 WARN_ON(dev->dn_ptr);
1da177e4 6504
b17a7c17
SH
6505 if (dev->destructor)
6506 dev->destructor(dev);
9093bbb2 6507
50624c93
EB
6508 /* Report a network device has been unregistered */
6509 rtnl_lock();
6510 dev_net(dev)->dev_unreg_count--;
6511 __rtnl_unlock();
6512 wake_up(&netdev_unregistering_wq);
6513
9093bbb2
SH
6514 /* Free network device */
6515 kobject_put(&dev->dev.kobj);
1da177e4 6516 }
1da177e4
LT
6517}
6518
3cfde79c
BH
6519/* Convert net_device_stats to rtnl_link_stats64. They have the same
6520 * fields in the same order, with only the type differing.
6521 */
77a1abf5
ED
6522void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6523 const struct net_device_stats *netdev_stats)
3cfde79c
BH
6524{
6525#if BITS_PER_LONG == 64
77a1abf5
ED
6526 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6527 memcpy(stats64, netdev_stats, sizeof(*stats64));
3cfde79c
BH
6528#else
6529 size_t i, n = sizeof(*stats64) / sizeof(u64);
6530 const unsigned long *src = (const unsigned long *)netdev_stats;
6531 u64 *dst = (u64 *)stats64;
6532
6533 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6534 sizeof(*stats64) / sizeof(u64));
6535 for (i = 0; i < n; i++)
6536 dst[i] = src[i];
6537#endif
6538}
77a1abf5 6539EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 6540
eeda3fd6
SH
6541/**
6542 * dev_get_stats - get network device statistics
6543 * @dev: device to get statistics from
28172739 6544 * @storage: place to store stats
eeda3fd6 6545 *
d7753516
BH
6546 * Get network statistics from device. Return @storage.
6547 * The device driver may provide its own method by setting
6548 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6549 * otherwise the internal statistics structure is used.
eeda3fd6 6550 */
d7753516
BH
6551struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6552 struct rtnl_link_stats64 *storage)
7004bf25 6553{
eeda3fd6
SH
6554 const struct net_device_ops *ops = dev->netdev_ops;
6555
28172739
ED
6556 if (ops->ndo_get_stats64) {
6557 memset(storage, 0, sizeof(*storage));
caf586e5
ED
6558 ops->ndo_get_stats64(dev, storage);
6559 } else if (ops->ndo_get_stats) {
3cfde79c 6560 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
6561 } else {
6562 netdev_stats_to_stats64(storage, &dev->stats);
28172739 6563 }
caf586e5 6564 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
015f0688 6565 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
28172739 6566 return storage;
c45d286e 6567}
eeda3fd6 6568EXPORT_SYMBOL(dev_get_stats);
c45d286e 6569
24824a09 6570struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 6571{
24824a09 6572 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 6573
24824a09
ED
6574#ifdef CONFIG_NET_CLS_ACT
6575 if (queue)
6576 return queue;
6577 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6578 if (!queue)
6579 return NULL;
6580 netdev_init_one_queue(dev, queue, NULL);
24824a09
ED
6581 queue->qdisc = &noop_qdisc;
6582 queue->qdisc_sleeping = &noop_qdisc;
6583 rcu_assign_pointer(dev->ingress_queue, queue);
6584#endif
6585 return queue;
bb949fbd
DM
6586}
6587
2c60db03
ED
6588static const struct ethtool_ops default_ethtool_ops;
6589
d07d7507
SG
6590void netdev_set_default_ethtool_ops(struct net_device *dev,
6591 const struct ethtool_ops *ops)
6592{
6593 if (dev->ethtool_ops == &default_ethtool_ops)
6594 dev->ethtool_ops = ops;
6595}
6596EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6597
74d332c1
ED
6598void netdev_freemem(struct net_device *dev)
6599{
6600 char *addr = (char *)dev - dev->padded;
6601
4cb28970 6602 kvfree(addr);
74d332c1
ED
6603}
6604
1da177e4 6605/**
36909ea4 6606 * alloc_netdev_mqs - allocate network device
c835a677
TG
6607 * @sizeof_priv: size of private data to allocate space for
6608 * @name: device name format string
6609 * @name_assign_type: origin of device name
6610 * @setup: callback to initialize device
6611 * @txqs: the number of TX subqueues to allocate
6612 * @rxqs: the number of RX subqueues to allocate
1da177e4
LT
6613 *
6614 * Allocates a struct net_device with private data area for driver use
90e51adf 6615 * and performs basic initialization. Also allocates subqueue structs
36909ea4 6616 * for each queue on the device.
1da177e4 6617 */
36909ea4 6618struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
c835a677 6619 unsigned char name_assign_type,
36909ea4
TH
6620 void (*setup)(struct net_device *),
6621 unsigned int txqs, unsigned int rxqs)
1da177e4 6622{
1da177e4 6623 struct net_device *dev;
7943986c 6624 size_t alloc_size;
1ce8e7b5 6625 struct net_device *p;
1da177e4 6626
b6fe17d6
SH
6627 BUG_ON(strlen(name) >= sizeof(dev->name));
6628
36909ea4 6629 if (txqs < 1) {
7b6cd1ce 6630 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
6631 return NULL;
6632 }
6633
a953be53 6634#ifdef CONFIG_SYSFS
36909ea4 6635 if (rxqs < 1) {
7b6cd1ce 6636 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
6637 return NULL;
6638 }
6639#endif
6640
fd2ea0a7 6641 alloc_size = sizeof(struct net_device);
d1643d24
AD
6642 if (sizeof_priv) {
6643 /* ensure 32-byte alignment of private area */
1ce8e7b5 6644 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
6645 alloc_size += sizeof_priv;
6646 }
6647 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 6648 alloc_size += NETDEV_ALIGN - 1;
1da177e4 6649
74d332c1
ED
6650 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6651 if (!p)
6652 p = vzalloc(alloc_size);
62b5942a 6653 if (!p)
1da177e4 6654 return NULL;
1da177e4 6655
1ce8e7b5 6656 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 6657 dev->padded = (char *)dev - (char *)p;
ab9c73cc 6658
29b4433d
ED
6659 dev->pcpu_refcnt = alloc_percpu(int);
6660 if (!dev->pcpu_refcnt)
74d332c1 6661 goto free_dev;
ab9c73cc 6662
ab9c73cc 6663 if (dev_addr_init(dev))
29b4433d 6664 goto free_pcpu;
ab9c73cc 6665
22bedad3 6666 dev_mc_init(dev);
a748ee24 6667 dev_uc_init(dev);
ccffad25 6668
c346dca1 6669 dev_net_set(dev, &init_net);
1da177e4 6670
8d3bdbd5 6671 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 6672 dev->gso_max_segs = GSO_MAX_SEGS;
fcbeb976 6673 dev->gso_min_segs = 0;
8d3bdbd5 6674
8d3bdbd5
DM
6675 INIT_LIST_HEAD(&dev->napi_list);
6676 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 6677 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 6678 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
6679 INIT_LIST_HEAD(&dev->adj_list.upper);
6680 INIT_LIST_HEAD(&dev->adj_list.lower);
6681 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6682 INIT_LIST_HEAD(&dev->all_adj_list.lower);
02875878 6683 dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
8d3bdbd5
DM
6684 setup(dev);
6685
36909ea4
TH
6686 dev->num_tx_queues = txqs;
6687 dev->real_num_tx_queues = txqs;
ed9af2e8 6688 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 6689 goto free_all;
e8a0464c 6690
a953be53 6691#ifdef CONFIG_SYSFS
36909ea4
TH
6692 dev->num_rx_queues = rxqs;
6693 dev->real_num_rx_queues = rxqs;
fe822240 6694 if (netif_alloc_rx_queues(dev))
8d3bdbd5 6695 goto free_all;
df334545 6696#endif
0a9627f2 6697
1da177e4 6698 strcpy(dev->name, name);
c835a677 6699 dev->name_assign_type = name_assign_type;
cbda10fa 6700 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
6701 if (!dev->ethtool_ops)
6702 dev->ethtool_ops = &default_ethtool_ops;
1da177e4 6703 return dev;
ab9c73cc 6704
8d3bdbd5
DM
6705free_all:
6706 free_netdev(dev);
6707 return NULL;
6708
29b4433d
ED
6709free_pcpu:
6710 free_percpu(dev->pcpu_refcnt);
74d332c1
ED
6711free_dev:
6712 netdev_freemem(dev);
ab9c73cc 6713 return NULL;
1da177e4 6714}
36909ea4 6715EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
6716
6717/**
6718 * free_netdev - free network device
6719 * @dev: device
6720 *
4ec93edb
YH
6721 * This function does the last stage of destroying an allocated device
6722 * interface. The reference to the device object is released.
1da177e4
LT
6723 * If this is the last reference then it will be freed.
6724 */
6725void free_netdev(struct net_device *dev)
6726{
d565b0a1
HX
6727 struct napi_struct *p, *n;
6728
f3005d7f
DL
6729 release_net(dev_net(dev));
6730
60877a32 6731 netif_free_tx_queues(dev);
a953be53 6732#ifdef CONFIG_SYSFS
fe822240
TH
6733 kfree(dev->_rx);
6734#endif
e8a0464c 6735
33d480ce 6736 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 6737
f001fde5
JP
6738 /* Flush device addresses */
6739 dev_addr_flush(dev);
6740
d565b0a1
HX
6741 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6742 netif_napi_del(p);
6743
29b4433d
ED
6744 free_percpu(dev->pcpu_refcnt);
6745 dev->pcpu_refcnt = NULL;
6746
3041a069 6747 /* Compatibility with error handling in drivers */
1da177e4 6748 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 6749 netdev_freemem(dev);
1da177e4
LT
6750 return;
6751 }
6752
6753 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6754 dev->reg_state = NETREG_RELEASED;
6755
43cb76d9
GKH
6756 /* will free via device release */
6757 put_device(&dev->dev);
1da177e4 6758}
d1b19dff 6759EXPORT_SYMBOL(free_netdev);
4ec93edb 6760
f0db275a
SH
6761/**
6762 * synchronize_net - Synchronize with packet receive processing
6763 *
6764 * Wait for packets currently being received to be done.
6765 * Does not block later packets from starting.
6766 */
4ec93edb 6767void synchronize_net(void)
1da177e4
LT
6768{
6769 might_sleep();
be3fc413
ED
6770 if (rtnl_is_locked())
6771 synchronize_rcu_expedited();
6772 else
6773 synchronize_rcu();
1da177e4 6774}
d1b19dff 6775EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
6776
6777/**
44a0873d 6778 * unregister_netdevice_queue - remove device from the kernel
1da177e4 6779 * @dev: device
44a0873d 6780 * @head: list
6ebfbc06 6781 *
1da177e4 6782 * This function shuts down a device interface and removes it
d59b54b1 6783 * from the kernel tables.
44a0873d 6784 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
6785 *
6786 * Callers must hold the rtnl semaphore. You may want
6787 * unregister_netdev() instead of this.
6788 */
6789
44a0873d 6790void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 6791{
a6620712
HX
6792 ASSERT_RTNL();
6793
44a0873d 6794 if (head) {
9fdce099 6795 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
6796 } else {
6797 rollback_registered(dev);
6798 /* Finish processing unregister after unlock */
6799 net_set_todo(dev);
6800 }
1da177e4 6801}
44a0873d 6802EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 6803
9b5e383c
ED
6804/**
6805 * unregister_netdevice_many - unregister many devices
6806 * @head: list of devices
87757a91
ED
6807 *
6808 * Note: As most callers use a stack allocated list_head,
6809 * we force a list_del() to make sure stack wont be corrupted later.
9b5e383c
ED
6810 */
6811void unregister_netdevice_many(struct list_head *head)
6812{
6813 struct net_device *dev;
6814
6815 if (!list_empty(head)) {
6816 rollback_registered_many(head);
6817 list_for_each_entry(dev, head, unreg_list)
6818 net_set_todo(dev);
87757a91 6819 list_del(head);
9b5e383c
ED
6820 }
6821}
63c8099d 6822EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 6823
1da177e4
LT
6824/**
6825 * unregister_netdev - remove device from the kernel
6826 * @dev: device
6827 *
6828 * This function shuts down a device interface and removes it
d59b54b1 6829 * from the kernel tables.
1da177e4
LT
6830 *
6831 * This is just a wrapper for unregister_netdevice that takes
6832 * the rtnl semaphore. In general you want to use this and not
6833 * unregister_netdevice.
6834 */
6835void unregister_netdev(struct net_device *dev)
6836{
6837 rtnl_lock();
6838 unregister_netdevice(dev);
6839 rtnl_unlock();
6840}
1da177e4
LT
6841EXPORT_SYMBOL(unregister_netdev);
6842
ce286d32
EB
6843/**
6844 * dev_change_net_namespace - move device to different nethost namespace
6845 * @dev: device
6846 * @net: network namespace
6847 * @pat: If not NULL name pattern to try if the current device name
6848 * is already taken in the destination network namespace.
6849 *
6850 * This function shuts down a device interface and moves it
6851 * to a new network namespace. On success 0 is returned, on
6852 * a failure a netagive errno code is returned.
6853 *
6854 * Callers must hold the rtnl semaphore.
6855 */
6856
6857int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6858{
ce286d32
EB
6859 int err;
6860
6861 ASSERT_RTNL();
6862
6863 /* Don't allow namespace local devices to be moved. */
6864 err = -EINVAL;
6865 if (dev->features & NETIF_F_NETNS_LOCAL)
6866 goto out;
6867
6868 /* Ensure the device has been registrered */
ce286d32
EB
6869 if (dev->reg_state != NETREG_REGISTERED)
6870 goto out;
6871
6872 /* Get out if there is nothing todo */
6873 err = 0;
878628fb 6874 if (net_eq(dev_net(dev), net))
ce286d32
EB
6875 goto out;
6876
6877 /* Pick the destination device name, and ensure
6878 * we can use it in the destination network namespace.
6879 */
6880 err = -EEXIST;
d9031024 6881 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
6882 /* We get here if we can't use the current device name */
6883 if (!pat)
6884 goto out;
828de4f6 6885 if (dev_get_valid_name(net, dev, pat) < 0)
ce286d32
EB
6886 goto out;
6887 }
6888
6889 /*
6890 * And now a mini version of register_netdevice unregister_netdevice.
6891 */
6892
6893 /* If device is running close it first. */
9b772652 6894 dev_close(dev);
ce286d32
EB
6895
6896 /* And unlink it from device chain */
6897 err = -ENODEV;
6898 unlist_netdevice(dev);
6899
6900 synchronize_net();
6901
6902 /* Shutdown queueing discipline. */
6903 dev_shutdown(dev);
6904
6905 /* Notify protocols, that we are about to destroy
6906 this device. They should clean all the things.
3b27e105
DL
6907
6908 Note that dev->reg_state stays at NETREG_REGISTERED.
6909 This is wanted because this way 8021q and macvlan know
6910 the device is just moving and can keep their slaves up.
ce286d32
EB
6911 */
6912 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43
G
6913 rcu_barrier();
6914 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7f294054 6915 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
ce286d32
EB
6916
6917 /*
6918 * Flush the unicast and multicast chains
6919 */
a748ee24 6920 dev_uc_flush(dev);
22bedad3 6921 dev_mc_flush(dev);
ce286d32 6922
4e66ae2e
SH
6923 /* Send a netdev-removed uevent to the old namespace */
6924 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
4c75431a 6925 netdev_adjacent_del_links(dev);
4e66ae2e 6926
ce286d32 6927 /* Actually switch the network namespace */
c346dca1 6928 dev_net_set(dev, net);
ce286d32 6929
ce286d32
EB
6930 /* If there is an ifindex conflict assign a new one */
6931 if (__dev_get_by_index(net, dev->ifindex)) {
6932 int iflink = (dev->iflink == dev->ifindex);
6933 dev->ifindex = dev_new_index(net);
6934 if (iflink)
6935 dev->iflink = dev->ifindex;
6936 }
6937
4e66ae2e
SH
6938 /* Send a netdev-add uevent to the new namespace */
6939 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
4c75431a 6940 netdev_adjacent_add_links(dev);
4e66ae2e 6941
8b41d188 6942 /* Fixup kobjects */
a1b3f594 6943 err = device_rename(&dev->dev, dev->name);
8b41d188 6944 WARN_ON(err);
ce286d32
EB
6945
6946 /* Add the device back in the hashes */
6947 list_netdevice(dev);
6948
6949 /* Notify protocols, that a new device appeared. */
6950 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6951
d90a909e
EB
6952 /*
6953 * Prevent userspace races by waiting until the network
6954 * device is fully setup before sending notifications.
6955 */
7f294054 6956 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 6957
ce286d32
EB
6958 synchronize_net();
6959 err = 0;
6960out:
6961 return err;
6962}
463d0183 6963EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 6964
1da177e4
LT
6965static int dev_cpu_callback(struct notifier_block *nfb,
6966 unsigned long action,
6967 void *ocpu)
6968{
6969 struct sk_buff **list_skb;
1da177e4
LT
6970 struct sk_buff *skb;
6971 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6972 struct softnet_data *sd, *oldsd;
6973
8bb78442 6974 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
6975 return NOTIFY_OK;
6976
6977 local_irq_disable();
6978 cpu = smp_processor_id();
6979 sd = &per_cpu(softnet_data, cpu);
6980 oldsd = &per_cpu(softnet_data, oldcpu);
6981
6982 /* Find end of our completion_queue. */
6983 list_skb = &sd->completion_queue;
6984 while (*list_skb)
6985 list_skb = &(*list_skb)->next;
6986 /* Append completion queue from offline CPU. */
6987 *list_skb = oldsd->completion_queue;
6988 oldsd->completion_queue = NULL;
6989
1da177e4 6990 /* Append output queue from offline CPU. */
a9cbd588
CG
6991 if (oldsd->output_queue) {
6992 *sd->output_queue_tailp = oldsd->output_queue;
6993 sd->output_queue_tailp = oldsd->output_queue_tailp;
6994 oldsd->output_queue = NULL;
6995 oldsd->output_queue_tailp = &oldsd->output_queue;
6996 }
264524d5
HC
6997 /* Append NAPI poll list from offline CPU. */
6998 if (!list_empty(&oldsd->poll_list)) {
6999 list_splice_init(&oldsd->poll_list, &sd->poll_list);
7000 raise_softirq_irqoff(NET_RX_SOFTIRQ);
7001 }
1da177e4
LT
7002
7003 raise_softirq_irqoff(NET_TX_SOFTIRQ);
7004 local_irq_enable();
7005
7006 /* Process offline CPU's input_pkt_queue */
76cc8b13 7007 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
ae78dbfa 7008 netif_rx_internal(skb);
76cc8b13 7009 input_queue_head_incr(oldsd);
fec5e652 7010 }
76cc8b13 7011 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
ae78dbfa 7012 netif_rx_internal(skb);
76cc8b13
TH
7013 input_queue_head_incr(oldsd);
7014 }
1da177e4
LT
7015
7016 return NOTIFY_OK;
7017}
1da177e4
LT
7018
7019
7f353bf2 7020/**
b63365a2
HX
7021 * netdev_increment_features - increment feature set by one
7022 * @all: current feature set
7023 * @one: new feature set
7024 * @mask: mask feature set
7f353bf2
HX
7025 *
7026 * Computes a new feature set after adding a device with feature set
b63365a2
HX
7027 * @one to the master device with current feature set @all. Will not
7028 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 7029 */
c8f44aff
MM
7030netdev_features_t netdev_increment_features(netdev_features_t all,
7031 netdev_features_t one, netdev_features_t mask)
b63365a2 7032{
1742f183
MM
7033 if (mask & NETIF_F_GEN_CSUM)
7034 mask |= NETIF_F_ALL_CSUM;
7035 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 7036
1742f183
MM
7037 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7038 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 7039
1742f183
MM
7040 /* If one device supports hw checksumming, set for all. */
7041 if (all & NETIF_F_GEN_CSUM)
7042 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7f353bf2
HX
7043
7044 return all;
7045}
b63365a2 7046EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 7047
430f03cd 7048static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
7049{
7050 int i;
7051 struct hlist_head *hash;
7052
7053 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7054 if (hash != NULL)
7055 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7056 INIT_HLIST_HEAD(&hash[i]);
7057
7058 return hash;
7059}
7060
881d966b 7061/* Initialize per network namespace state */
4665079c 7062static int __net_init netdev_init(struct net *net)
881d966b 7063{
734b6541
RM
7064 if (net != &init_net)
7065 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 7066
30d97d35
PE
7067 net->dev_name_head = netdev_create_hash();
7068 if (net->dev_name_head == NULL)
7069 goto err_name;
881d966b 7070
30d97d35
PE
7071 net->dev_index_head = netdev_create_hash();
7072 if (net->dev_index_head == NULL)
7073 goto err_idx;
881d966b
EB
7074
7075 return 0;
30d97d35
PE
7076
7077err_idx:
7078 kfree(net->dev_name_head);
7079err_name:
7080 return -ENOMEM;
881d966b
EB
7081}
7082
f0db275a
SH
7083/**
7084 * netdev_drivername - network driver for the device
7085 * @dev: network device
f0db275a
SH
7086 *
7087 * Determine network driver for device.
7088 */
3019de12 7089const char *netdev_drivername(const struct net_device *dev)
6579e57b 7090{
cf04a4c7
SH
7091 const struct device_driver *driver;
7092 const struct device *parent;
3019de12 7093 const char *empty = "";
6579e57b
AV
7094
7095 parent = dev->dev.parent;
6579e57b 7096 if (!parent)
3019de12 7097 return empty;
6579e57b
AV
7098
7099 driver = parent->driver;
7100 if (driver && driver->name)
3019de12
DM
7101 return driver->name;
7102 return empty;
6579e57b
AV
7103}
7104
6ea754eb
JP
7105static void __netdev_printk(const char *level, const struct net_device *dev,
7106 struct va_format *vaf)
256df2f3 7107{
b004ff49 7108 if (dev && dev->dev.parent) {
6ea754eb
JP
7109 dev_printk_emit(level[1] - '0',
7110 dev->dev.parent,
7111 "%s %s %s%s: %pV",
7112 dev_driver_string(dev->dev.parent),
7113 dev_name(dev->dev.parent),
7114 netdev_name(dev), netdev_reg_state(dev),
7115 vaf);
b004ff49 7116 } else if (dev) {
6ea754eb
JP
7117 printk("%s%s%s: %pV",
7118 level, netdev_name(dev), netdev_reg_state(dev), vaf);
b004ff49 7119 } else {
6ea754eb 7120 printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 7121 }
256df2f3
JP
7122}
7123
6ea754eb
JP
7124void netdev_printk(const char *level, const struct net_device *dev,
7125 const char *format, ...)
256df2f3
JP
7126{
7127 struct va_format vaf;
7128 va_list args;
256df2f3
JP
7129
7130 va_start(args, format);
7131
7132 vaf.fmt = format;
7133 vaf.va = &args;
7134
6ea754eb 7135 __netdev_printk(level, dev, &vaf);
b004ff49 7136
256df2f3 7137 va_end(args);
256df2f3
JP
7138}
7139EXPORT_SYMBOL(netdev_printk);
7140
7141#define define_netdev_printk_level(func, level) \
6ea754eb 7142void func(const struct net_device *dev, const char *fmt, ...) \
256df2f3 7143{ \
256df2f3
JP
7144 struct va_format vaf; \
7145 va_list args; \
7146 \
7147 va_start(args, fmt); \
7148 \
7149 vaf.fmt = fmt; \
7150 vaf.va = &args; \
7151 \
6ea754eb 7152 __netdev_printk(level, dev, &vaf); \
b004ff49 7153 \
256df2f3 7154 va_end(args); \
256df2f3
JP
7155} \
7156EXPORT_SYMBOL(func);
7157
7158define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7159define_netdev_printk_level(netdev_alert, KERN_ALERT);
7160define_netdev_printk_level(netdev_crit, KERN_CRIT);
7161define_netdev_printk_level(netdev_err, KERN_ERR);
7162define_netdev_printk_level(netdev_warn, KERN_WARNING);
7163define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7164define_netdev_printk_level(netdev_info, KERN_INFO);
7165
4665079c 7166static void __net_exit netdev_exit(struct net *net)
881d966b
EB
7167{
7168 kfree(net->dev_name_head);
7169 kfree(net->dev_index_head);
7170}
7171
022cbae6 7172static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
7173 .init = netdev_init,
7174 .exit = netdev_exit,
7175};
7176
4665079c 7177static void __net_exit default_device_exit(struct net *net)
ce286d32 7178{
e008b5fc 7179 struct net_device *dev, *aux;
ce286d32 7180 /*
e008b5fc 7181 * Push all migratable network devices back to the
ce286d32
EB
7182 * initial network namespace
7183 */
7184 rtnl_lock();
e008b5fc 7185 for_each_netdev_safe(net, dev, aux) {
ce286d32 7186 int err;
aca51397 7187 char fb_name[IFNAMSIZ];
ce286d32
EB
7188
7189 /* Ignore unmoveable devices (i.e. loopback) */
7190 if (dev->features & NETIF_F_NETNS_LOCAL)
7191 continue;
7192
e008b5fc
EB
7193 /* Leave virtual devices for the generic cleanup */
7194 if (dev->rtnl_link_ops)
7195 continue;
d0c082ce 7196
25985edc 7197 /* Push remaining network devices to init_net */
aca51397
PE
7198 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7199 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 7200 if (err) {
7b6cd1ce
JP
7201 pr_emerg("%s: failed to move %s to init_net: %d\n",
7202 __func__, dev->name, err);
aca51397 7203 BUG();
ce286d32
EB
7204 }
7205 }
7206 rtnl_unlock();
7207}
7208
50624c93
EB
7209static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7210{
7211 /* Return with the rtnl_lock held when there are no network
7212 * devices unregistering in any network namespace in net_list.
7213 */
7214 struct net *net;
7215 bool unregistering;
7216 DEFINE_WAIT(wait);
7217
7218 for (;;) {
7219 prepare_to_wait(&netdev_unregistering_wq, &wait,
7220 TASK_UNINTERRUPTIBLE);
7221 unregistering = false;
7222 rtnl_lock();
7223 list_for_each_entry(net, net_list, exit_list) {
7224 if (net->dev_unreg_count > 0) {
7225 unregistering = true;
7226 break;
7227 }
7228 }
7229 if (!unregistering)
7230 break;
7231 __rtnl_unlock();
7232 schedule();
7233 }
7234 finish_wait(&netdev_unregistering_wq, &wait);
7235}
7236
04dc7f6b
EB
7237static void __net_exit default_device_exit_batch(struct list_head *net_list)
7238{
7239 /* At exit all network devices most be removed from a network
b595076a 7240 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
7241 * Do this across as many network namespaces as possible to
7242 * improve batching efficiency.
7243 */
7244 struct net_device *dev;
7245 struct net *net;
7246 LIST_HEAD(dev_kill_list);
7247
50624c93
EB
7248 /* To prevent network device cleanup code from dereferencing
7249 * loopback devices or network devices that have been freed
7250 * wait here for all pending unregistrations to complete,
7251 * before unregistring the loopback device and allowing the
7252 * network namespace be freed.
7253 *
7254 * The netdev todo list containing all network devices
7255 * unregistrations that happen in default_device_exit_batch
7256 * will run in the rtnl_unlock() at the end of
7257 * default_device_exit_batch.
7258 */
7259 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
7260 list_for_each_entry(net, net_list, exit_list) {
7261 for_each_netdev_reverse(net, dev) {
b0ab2fab 7262 if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
04dc7f6b
EB
7263 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7264 else
7265 unregister_netdevice_queue(dev, &dev_kill_list);
7266 }
7267 }
7268 unregister_netdevice_many(&dev_kill_list);
7269 rtnl_unlock();
7270}
7271
022cbae6 7272static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 7273 .exit = default_device_exit,
04dc7f6b 7274 .exit_batch = default_device_exit_batch,
ce286d32
EB
7275};
7276
1da177e4
LT
7277/*
7278 * Initialize the DEV module. At boot time this walks the device list and
7279 * unhooks any devices that fail to initialise (normally hardware not
7280 * present) and leaves us with a valid list of present and active devices.
7281 *
7282 */
7283
7284/*
7285 * This is called single threaded during boot, so no need
7286 * to take the rtnl semaphore.
7287 */
7288static int __init net_dev_init(void)
7289{
7290 int i, rc = -ENOMEM;
7291
7292 BUG_ON(!dev_boot_phase);
7293
1da177e4
LT
7294 if (dev_proc_init())
7295 goto out;
7296
8b41d188 7297 if (netdev_kobject_init())
1da177e4
LT
7298 goto out;
7299
7300 INIT_LIST_HEAD(&ptype_all);
82d8a867 7301 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
7302 INIT_LIST_HEAD(&ptype_base[i]);
7303
62532da9
VY
7304 INIT_LIST_HEAD(&offload_base);
7305
881d966b
EB
7306 if (register_pernet_subsys(&netdev_net_ops))
7307 goto out;
1da177e4
LT
7308
7309 /*
7310 * Initialise the packet receive queues.
7311 */
7312
6f912042 7313 for_each_possible_cpu(i) {
e36fa2f7 7314 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 7315
e36fa2f7 7316 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 7317 skb_queue_head_init(&sd->process_queue);
e36fa2f7 7318 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588 7319 sd->output_queue_tailp = &sd->output_queue;
df334545 7320#ifdef CONFIG_RPS
e36fa2f7
ED
7321 sd->csd.func = rps_trigger_softirq;
7322 sd->csd.info = sd;
e36fa2f7 7323 sd->cpu = i;
1e94d72f 7324#endif
0a9627f2 7325
e36fa2f7
ED
7326 sd->backlog.poll = process_backlog;
7327 sd->backlog.weight = weight_p;
1da177e4
LT
7328 }
7329
1da177e4
LT
7330 dev_boot_phase = 0;
7331
505d4f73
EB
7332 /* The loopback device is special if any other network devices
7333 * is present in a network namespace the loopback device must
7334 * be present. Since we now dynamically allocate and free the
7335 * loopback device ensure this invariant is maintained by
7336 * keeping the loopback device as the first device on the
7337 * list of network devices. Ensuring the loopback devices
7338 * is the first device that appears and the last network device
7339 * that disappears.
7340 */
7341 if (register_pernet_device(&loopback_net_ops))
7342 goto out;
7343
7344 if (register_pernet_device(&default_device_ops))
7345 goto out;
7346
962cf36c
CM
7347 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7348 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
7349
7350 hotcpu_notifier(dev_cpu_callback, 0);
7351 dst_init();
1da177e4
LT
7352 rc = 0;
7353out:
7354 return rc;
7355}
7356
7357subsys_initcall(net_dev_init);
This page took 1.847828 seconds and 5 git commands to generate.