net: qmi_wwan: Add ID for Telewell TW-LTE 4G v2
[deliverable/linux.git] / net / core / dev.c
CommitLineData
1da177e4
LT
1/*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
02c30a84 10 * Authors: Ross Biro
1da177e4
LT
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75#include <asm/uaccess.h>
1da177e4 76#include <linux/bitops.h>
4fc268d2 77#include <linux/capability.h>
1da177e4
LT
78#include <linux/cpu.h>
79#include <linux/types.h>
80#include <linux/kernel.h>
08e9897d 81#include <linux/hash.h>
5a0e3ad6 82#include <linux/slab.h>
1da177e4 83#include <linux/sched.h>
4a3e2f71 84#include <linux/mutex.h>
1da177e4
LT
85#include <linux/string.h>
86#include <linux/mm.h>
87#include <linux/socket.h>
88#include <linux/sockios.h>
89#include <linux/errno.h>
90#include <linux/interrupt.h>
91#include <linux/if_ether.h>
92#include <linux/netdevice.h>
93#include <linux/etherdevice.h>
0187bdfb 94#include <linux/ethtool.h>
1da177e4
LT
95#include <linux/notifier.h>
96#include <linux/skbuff.h>
457c4cbc 97#include <net/net_namespace.h>
1da177e4
LT
98#include <net/sock.h>
99#include <linux/rtnetlink.h>
1da177e4 100#include <linux/stat.h>
1da177e4
LT
101#include <net/dst.h>
102#include <net/pkt_sched.h>
103#include <net/checksum.h>
44540960 104#include <net/xfrm.h>
1da177e4
LT
105#include <linux/highmem.h>
106#include <linux/init.h>
1da177e4 107#include <linux/module.h>
1da177e4
LT
108#include <linux/netpoll.h>
109#include <linux/rcupdate.h>
110#include <linux/delay.h>
1da177e4 111#include <net/iw_handler.h>
1da177e4 112#include <asm/current.h>
5bdb9886 113#include <linux/audit.h>
db217334 114#include <linux/dmaengine.h>
f6a78bfc 115#include <linux/err.h>
c7fa9d18 116#include <linux/ctype.h>
723e98b7 117#include <linux/if_arp.h>
6de329e2 118#include <linux/if_vlan.h>
8f0f2223 119#include <linux/ip.h>
ad55dcaf 120#include <net/ip.h>
8f0f2223
DM
121#include <linux/ipv6.h>
122#include <linux/in.h>
b6b2fed1
DM
123#include <linux/jhash.h>
124#include <linux/random.h>
9cbc1cb8 125#include <trace/events/napi.h>
cf66ba58 126#include <trace/events/net.h>
07dc22e7 127#include <trace/events/skb.h>
5acbbd42 128#include <linux/pci.h>
caeda9b9 129#include <linux/inetdevice.h>
c445477d 130#include <linux/cpu_rmap.h>
c5905afb 131#include <linux/static_key.h>
af12fa6e 132#include <linux/hashtable.h>
60877a32 133#include <linux/vmalloc.h>
529d0489 134#include <linux/if_macvlan.h>
1da177e4 135
342709ef
PE
136#include "net-sysfs.h"
137
d565b0a1
HX
138/* Instead of increasing this, you should create a hash table. */
139#define MAX_GRO_SKBS 8
140
5d38a079
HX
141/* This should be increased if a protocol with a bigger head is added. */
142#define GRO_MAX_HEAD (MAX_HEADER + 128)
143
1da177e4 144static DEFINE_SPINLOCK(ptype_lock);
62532da9 145static DEFINE_SPINLOCK(offload_lock);
900ff8c6
CW
146struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
147struct list_head ptype_all __read_mostly; /* Taps */
62532da9 148static struct list_head offload_base __read_mostly;
1da177e4 149
ae78dbfa
BH
150static int netif_rx_internal(struct sk_buff *skb);
151
1da177e4 152/*
7562f876 153 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
1da177e4
LT
154 * semaphore.
155 *
c6d14c84 156 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
1da177e4
LT
157 *
158 * Writers must hold the rtnl semaphore while they loop through the
7562f876 159 * dev_base_head list, and hold dev_base_lock for writing when they do the
1da177e4
LT
160 * actual updates. This allows pure readers to access the list even
161 * while a writer is preparing to update it.
162 *
163 * To put it another way, dev_base_lock is held for writing only to
164 * protect against pure readers; the rtnl semaphore provides the
165 * protection against other writers.
166 *
167 * See, for example usages, register_netdevice() and
168 * unregister_netdevice(), which must be called with the rtnl
169 * semaphore held.
170 */
1da177e4 171DEFINE_RWLOCK(dev_base_lock);
1da177e4
LT
172EXPORT_SYMBOL(dev_base_lock);
173
af12fa6e
ET
174/* protects napi_hash addition/deletion and napi_gen_id */
175static DEFINE_SPINLOCK(napi_hash_lock);
176
177static unsigned int napi_gen_id;
178static DEFINE_HASHTABLE(napi_hash, 8);
179
18afa4b0 180static seqcount_t devnet_rename_seq;
c91f6df2 181
4e985ada
TG
182static inline void dev_base_seq_inc(struct net *net)
183{
184 while (++net->dev_base_seq == 0);
185}
186
881d966b 187static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
1da177e4 188{
95c96174
ED
189 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
190
08e9897d 191 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
1da177e4
LT
192}
193
881d966b 194static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
1da177e4 195{
7c28bd0b 196 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
1da177e4
LT
197}
198
e36fa2f7 199static inline void rps_lock(struct softnet_data *sd)
152102c7
CG
200{
201#ifdef CONFIG_RPS
e36fa2f7 202 spin_lock(&sd->input_pkt_queue.lock);
152102c7
CG
203#endif
204}
205
e36fa2f7 206static inline void rps_unlock(struct softnet_data *sd)
152102c7
CG
207{
208#ifdef CONFIG_RPS
e36fa2f7 209 spin_unlock(&sd->input_pkt_queue.lock);
152102c7
CG
210#endif
211}
212
ce286d32 213/* Device list insertion */
53759be9 214static void list_netdevice(struct net_device *dev)
ce286d32 215{
c346dca1 216 struct net *net = dev_net(dev);
ce286d32
EB
217
218 ASSERT_RTNL();
219
220 write_lock_bh(&dev_base_lock);
c6d14c84 221 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
72c9528b 222 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
fb699dfd
ED
223 hlist_add_head_rcu(&dev->index_hlist,
224 dev_index_hash(net, dev->ifindex));
ce286d32 225 write_unlock_bh(&dev_base_lock);
4e985ada
TG
226
227 dev_base_seq_inc(net);
ce286d32
EB
228}
229
fb699dfd
ED
230/* Device list removal
231 * caller must respect a RCU grace period before freeing/reusing dev
232 */
ce286d32
EB
233static void unlist_netdevice(struct net_device *dev)
234{
235 ASSERT_RTNL();
236
237 /* Unlink dev from the device chain */
238 write_lock_bh(&dev_base_lock);
c6d14c84 239 list_del_rcu(&dev->dev_list);
72c9528b 240 hlist_del_rcu(&dev->name_hlist);
fb699dfd 241 hlist_del_rcu(&dev->index_hlist);
ce286d32 242 write_unlock_bh(&dev_base_lock);
4e985ada
TG
243
244 dev_base_seq_inc(dev_net(dev));
ce286d32
EB
245}
246
1da177e4
LT
247/*
248 * Our notifier list
249 */
250
f07d5b94 251static RAW_NOTIFIER_HEAD(netdev_chain);
1da177e4
LT
252
253/*
254 * Device drivers call our routines to queue packets here. We empty the
255 * queue in the local softnet handler.
256 */
bea3348e 257
9958da05 258DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
d1b19dff 259EXPORT_PER_CPU_SYMBOL(softnet_data);
1da177e4 260
cf508b12 261#ifdef CONFIG_LOCKDEP
723e98b7 262/*
c773e847 263 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
723e98b7
JP
264 * according to dev->type
265 */
266static const unsigned short netdev_lock_type[] =
267 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
268 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
269 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
270 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
271 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
272 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
273 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
274 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
275 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
276 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
277 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
278 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
211ed865
PG
279 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
280 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
281 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
723e98b7 282
36cbd3dc 283static const char *const netdev_lock_name[] =
723e98b7
JP
284 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
285 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
286 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
287 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
288 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
289 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
290 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
291 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
292 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
293 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
294 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
295 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
211ed865
PG
296 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
297 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
298 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
723e98b7
JP
299
300static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
cf508b12 301static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
723e98b7
JP
302
303static inline unsigned short netdev_lock_pos(unsigned short dev_type)
304{
305 int i;
306
307 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
308 if (netdev_lock_type[i] == dev_type)
309 return i;
310 /* the last key is used by default */
311 return ARRAY_SIZE(netdev_lock_type) - 1;
312}
313
cf508b12
DM
314static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
315 unsigned short dev_type)
723e98b7
JP
316{
317 int i;
318
319 i = netdev_lock_pos(dev_type);
320 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
321 netdev_lock_name[i]);
322}
cf508b12
DM
323
324static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
325{
326 int i;
327
328 i = netdev_lock_pos(dev->type);
329 lockdep_set_class_and_name(&dev->addr_list_lock,
330 &netdev_addr_lock_key[i],
331 netdev_lock_name[i]);
332}
723e98b7 333#else
cf508b12
DM
334static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
335 unsigned short dev_type)
336{
337}
338static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
723e98b7
JP
339{
340}
341#endif
1da177e4
LT
342
343/*******************************************************************************
344
345 Protocol management and registration routines
346
347*******************************************************************************/
348
1da177e4
LT
349/*
350 * Add a protocol ID to the list. Now that the input handler is
351 * smarter we can dispense with all the messy stuff that used to be
352 * here.
353 *
354 * BEWARE!!! Protocol handlers, mangling input packets,
355 * MUST BE last in hash buckets and checking protocol handlers
356 * MUST start from promiscuous ptype_all chain in net_bh.
357 * It is true now, do not change it.
358 * Explanation follows: if protocol handler, mangling packet, will
359 * be the first on list, it is not able to sense, that packet
360 * is cloned and should be copied-on-write, so that it will
361 * change it and subsequent readers will get broken packet.
362 * --ANK (980803)
363 */
364
c07b68e8
ED
365static inline struct list_head *ptype_head(const struct packet_type *pt)
366{
367 if (pt->type == htons(ETH_P_ALL))
368 return &ptype_all;
369 else
370 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
371}
372
1da177e4
LT
373/**
374 * dev_add_pack - add packet handler
375 * @pt: packet type declaration
376 *
377 * Add a protocol handler to the networking stack. The passed &packet_type
378 * is linked into kernel lists and may not be freed until it has been
379 * removed from the kernel lists.
380 *
4ec93edb 381 * This call does not sleep therefore it can not
1da177e4
LT
382 * guarantee all CPU's that are in middle of receiving packets
383 * will see the new packet type (until the next received packet).
384 */
385
386void dev_add_pack(struct packet_type *pt)
387{
c07b68e8 388 struct list_head *head = ptype_head(pt);
1da177e4 389
c07b68e8
ED
390 spin_lock(&ptype_lock);
391 list_add_rcu(&pt->list, head);
392 spin_unlock(&ptype_lock);
1da177e4 393}
d1b19dff 394EXPORT_SYMBOL(dev_add_pack);
1da177e4 395
1da177e4
LT
396/**
397 * __dev_remove_pack - remove packet handler
398 * @pt: packet type declaration
399 *
400 * Remove a protocol handler that was previously added to the kernel
401 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
402 * from the kernel lists and can be freed or reused once this function
4ec93edb 403 * returns.
1da177e4
LT
404 *
405 * The packet type might still be in use by receivers
406 * and must not be freed until after all the CPU's have gone
407 * through a quiescent state.
408 */
409void __dev_remove_pack(struct packet_type *pt)
410{
c07b68e8 411 struct list_head *head = ptype_head(pt);
1da177e4
LT
412 struct packet_type *pt1;
413
c07b68e8 414 spin_lock(&ptype_lock);
1da177e4
LT
415
416 list_for_each_entry(pt1, head, list) {
417 if (pt == pt1) {
418 list_del_rcu(&pt->list);
419 goto out;
420 }
421 }
422
7b6cd1ce 423 pr_warn("dev_remove_pack: %p not found\n", pt);
1da177e4 424out:
c07b68e8 425 spin_unlock(&ptype_lock);
1da177e4 426}
d1b19dff
ED
427EXPORT_SYMBOL(__dev_remove_pack);
428
1da177e4
LT
429/**
430 * dev_remove_pack - remove packet handler
431 * @pt: packet type declaration
432 *
433 * Remove a protocol handler that was previously added to the kernel
434 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
435 * from the kernel lists and can be freed or reused once this function
436 * returns.
437 *
438 * This call sleeps to guarantee that no CPU is looking at the packet
439 * type after return.
440 */
441void dev_remove_pack(struct packet_type *pt)
442{
443 __dev_remove_pack(pt);
4ec93edb 444
1da177e4
LT
445 synchronize_net();
446}
d1b19dff 447EXPORT_SYMBOL(dev_remove_pack);
1da177e4 448
62532da9
VY
449
450/**
451 * dev_add_offload - register offload handlers
452 * @po: protocol offload declaration
453 *
454 * Add protocol offload handlers to the networking stack. The passed
455 * &proto_offload is linked into kernel lists and may not be freed until
456 * it has been removed from the kernel lists.
457 *
458 * This call does not sleep therefore it can not
459 * guarantee all CPU's that are in middle of receiving packets
460 * will see the new offload handlers (until the next received packet).
461 */
462void dev_add_offload(struct packet_offload *po)
463{
464 struct list_head *head = &offload_base;
465
466 spin_lock(&offload_lock);
467 list_add_rcu(&po->list, head);
468 spin_unlock(&offload_lock);
469}
470EXPORT_SYMBOL(dev_add_offload);
471
472/**
473 * __dev_remove_offload - remove offload handler
474 * @po: packet offload declaration
475 *
476 * Remove a protocol offload handler that was previously added to the
477 * kernel offload handlers by dev_add_offload(). The passed &offload_type
478 * is removed from the kernel lists and can be freed or reused once this
479 * function returns.
480 *
481 * The packet type might still be in use by receivers
482 * and must not be freed until after all the CPU's have gone
483 * through a quiescent state.
484 */
1d143d9f 485static void __dev_remove_offload(struct packet_offload *po)
62532da9
VY
486{
487 struct list_head *head = &offload_base;
488 struct packet_offload *po1;
489
c53aa505 490 spin_lock(&offload_lock);
62532da9
VY
491
492 list_for_each_entry(po1, head, list) {
493 if (po == po1) {
494 list_del_rcu(&po->list);
495 goto out;
496 }
497 }
498
499 pr_warn("dev_remove_offload: %p not found\n", po);
500out:
c53aa505 501 spin_unlock(&offload_lock);
62532da9 502}
62532da9
VY
503
504/**
505 * dev_remove_offload - remove packet offload handler
506 * @po: packet offload declaration
507 *
508 * Remove a packet offload handler that was previously added to the kernel
509 * offload handlers by dev_add_offload(). The passed &offload_type is
510 * removed from the kernel lists and can be freed or reused once this
511 * function returns.
512 *
513 * This call sleeps to guarantee that no CPU is looking at the packet
514 * type after return.
515 */
516void dev_remove_offload(struct packet_offload *po)
517{
518 __dev_remove_offload(po);
519
520 synchronize_net();
521}
522EXPORT_SYMBOL(dev_remove_offload);
523
1da177e4
LT
524/******************************************************************************
525
526 Device Boot-time Settings Routines
527
528*******************************************************************************/
529
530/* Boot time configuration table */
531static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
532
533/**
534 * netdev_boot_setup_add - add new setup entry
535 * @name: name of the device
536 * @map: configured settings for the device
537 *
538 * Adds new setup entry to the dev_boot_setup list. The function
539 * returns 0 on error and 1 on success. This is a generic routine to
540 * all netdevices.
541 */
542static int netdev_boot_setup_add(char *name, struct ifmap *map)
543{
544 struct netdev_boot_setup *s;
545 int i;
546
547 s = dev_boot_setup;
548 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
549 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
550 memset(s[i].name, 0, sizeof(s[i].name));
93b3cff9 551 strlcpy(s[i].name, name, IFNAMSIZ);
1da177e4
LT
552 memcpy(&s[i].map, map, sizeof(s[i].map));
553 break;
554 }
555 }
556
557 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
558}
559
560/**
561 * netdev_boot_setup_check - check boot time settings
562 * @dev: the netdevice
563 *
564 * Check boot time settings for the device.
565 * The found settings are set for the device to be used
566 * later in the device probing.
567 * Returns 0 if no settings found, 1 if they are.
568 */
569int netdev_boot_setup_check(struct net_device *dev)
570{
571 struct netdev_boot_setup *s = dev_boot_setup;
572 int i;
573
574 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
575 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
93b3cff9 576 !strcmp(dev->name, s[i].name)) {
1da177e4
LT
577 dev->irq = s[i].map.irq;
578 dev->base_addr = s[i].map.base_addr;
579 dev->mem_start = s[i].map.mem_start;
580 dev->mem_end = s[i].map.mem_end;
581 return 1;
582 }
583 }
584 return 0;
585}
d1b19dff 586EXPORT_SYMBOL(netdev_boot_setup_check);
1da177e4
LT
587
588
589/**
590 * netdev_boot_base - get address from boot time settings
591 * @prefix: prefix for network device
592 * @unit: id for network device
593 *
594 * Check boot time settings for the base address of device.
595 * The found settings are set for the device to be used
596 * later in the device probing.
597 * Returns 0 if no settings found.
598 */
599unsigned long netdev_boot_base(const char *prefix, int unit)
600{
601 const struct netdev_boot_setup *s = dev_boot_setup;
602 char name[IFNAMSIZ];
603 int i;
604
605 sprintf(name, "%s%d", prefix, unit);
606
607 /*
608 * If device already registered then return base of 1
609 * to indicate not to probe for this interface
610 */
881d966b 611 if (__dev_get_by_name(&init_net, name))
1da177e4
LT
612 return 1;
613
614 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
615 if (!strcmp(name, s[i].name))
616 return s[i].map.base_addr;
617 return 0;
618}
619
620/*
621 * Saves at boot time configured settings for any netdevice.
622 */
623int __init netdev_boot_setup(char *str)
624{
625 int ints[5];
626 struct ifmap map;
627
628 str = get_options(str, ARRAY_SIZE(ints), ints);
629 if (!str || !*str)
630 return 0;
631
632 /* Save settings */
633 memset(&map, 0, sizeof(map));
634 if (ints[0] > 0)
635 map.irq = ints[1];
636 if (ints[0] > 1)
637 map.base_addr = ints[2];
638 if (ints[0] > 2)
639 map.mem_start = ints[3];
640 if (ints[0] > 3)
641 map.mem_end = ints[4];
642
643 /* Add new entry to the list */
644 return netdev_boot_setup_add(str, &map);
645}
646
647__setup("netdev=", netdev_boot_setup);
648
649/*******************************************************************************
650
651 Device Interface Subroutines
652
653*******************************************************************************/
654
655/**
656 * __dev_get_by_name - find a device by its name
c4ea43c5 657 * @net: the applicable net namespace
1da177e4
LT
658 * @name: name to find
659 *
660 * Find an interface by name. Must be called under RTNL semaphore
661 * or @dev_base_lock. If the name is found a pointer to the device
662 * is returned. If the name is not found then %NULL is returned. The
663 * reference counters are not incremented so the caller must be
664 * careful with locks.
665 */
666
881d966b 667struct net_device *__dev_get_by_name(struct net *net, const char *name)
1da177e4 668{
0bd8d536
ED
669 struct net_device *dev;
670 struct hlist_head *head = dev_name_hash(net, name);
1da177e4 671
b67bfe0d 672 hlist_for_each_entry(dev, head, name_hlist)
1da177e4
LT
673 if (!strncmp(dev->name, name, IFNAMSIZ))
674 return dev;
0bd8d536 675
1da177e4
LT
676 return NULL;
677}
d1b19dff 678EXPORT_SYMBOL(__dev_get_by_name);
1da177e4 679
72c9528b
ED
680/**
681 * dev_get_by_name_rcu - find a device by its name
682 * @net: the applicable net namespace
683 * @name: name to find
684 *
685 * Find an interface by name.
686 * If the name is found a pointer to the device is returned.
687 * If the name is not found then %NULL is returned.
688 * The reference counters are not incremented so the caller must be
689 * careful with locks. The caller must hold RCU lock.
690 */
691
692struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
693{
72c9528b
ED
694 struct net_device *dev;
695 struct hlist_head *head = dev_name_hash(net, name);
696
b67bfe0d 697 hlist_for_each_entry_rcu(dev, head, name_hlist)
72c9528b
ED
698 if (!strncmp(dev->name, name, IFNAMSIZ))
699 return dev;
700
701 return NULL;
702}
703EXPORT_SYMBOL(dev_get_by_name_rcu);
704
1da177e4
LT
705/**
706 * dev_get_by_name - find a device by its name
c4ea43c5 707 * @net: the applicable net namespace
1da177e4
LT
708 * @name: name to find
709 *
710 * Find an interface by name. This can be called from any
711 * context and does its own locking. The returned handle has
712 * the usage count incremented and the caller must use dev_put() to
713 * release it when it is no longer needed. %NULL is returned if no
714 * matching device is found.
715 */
716
881d966b 717struct net_device *dev_get_by_name(struct net *net, const char *name)
1da177e4
LT
718{
719 struct net_device *dev;
720
72c9528b
ED
721 rcu_read_lock();
722 dev = dev_get_by_name_rcu(net, name);
1da177e4
LT
723 if (dev)
724 dev_hold(dev);
72c9528b 725 rcu_read_unlock();
1da177e4
LT
726 return dev;
727}
d1b19dff 728EXPORT_SYMBOL(dev_get_by_name);
1da177e4
LT
729
730/**
731 * __dev_get_by_index - find a device by its ifindex
c4ea43c5 732 * @net: the applicable net namespace
1da177e4
LT
733 * @ifindex: index of device
734 *
735 * Search for an interface by index. Returns %NULL if the device
736 * is not found or a pointer to the device. The device has not
737 * had its reference counter increased so the caller must be careful
738 * about locking. The caller must hold either the RTNL semaphore
739 * or @dev_base_lock.
740 */
741
881d966b 742struct net_device *__dev_get_by_index(struct net *net, int ifindex)
1da177e4 743{
0bd8d536
ED
744 struct net_device *dev;
745 struct hlist_head *head = dev_index_hash(net, ifindex);
1da177e4 746
b67bfe0d 747 hlist_for_each_entry(dev, head, index_hlist)
1da177e4
LT
748 if (dev->ifindex == ifindex)
749 return dev;
0bd8d536 750
1da177e4
LT
751 return NULL;
752}
d1b19dff 753EXPORT_SYMBOL(__dev_get_by_index);
1da177e4 754
fb699dfd
ED
755/**
756 * dev_get_by_index_rcu - find a device by its ifindex
757 * @net: the applicable net namespace
758 * @ifindex: index of device
759 *
760 * Search for an interface by index. Returns %NULL if the device
761 * is not found or a pointer to the device. The device has not
762 * had its reference counter increased so the caller must be careful
763 * about locking. The caller must hold RCU lock.
764 */
765
766struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
767{
fb699dfd
ED
768 struct net_device *dev;
769 struct hlist_head *head = dev_index_hash(net, ifindex);
770
b67bfe0d 771 hlist_for_each_entry_rcu(dev, head, index_hlist)
fb699dfd
ED
772 if (dev->ifindex == ifindex)
773 return dev;
774
775 return NULL;
776}
777EXPORT_SYMBOL(dev_get_by_index_rcu);
778
1da177e4
LT
779
780/**
781 * dev_get_by_index - find a device by its ifindex
c4ea43c5 782 * @net: the applicable net namespace
1da177e4
LT
783 * @ifindex: index of device
784 *
785 * Search for an interface by index. Returns NULL if the device
786 * is not found or a pointer to the device. The device returned has
787 * had a reference added and the pointer is safe until the user calls
788 * dev_put to indicate they have finished with it.
789 */
790
881d966b 791struct net_device *dev_get_by_index(struct net *net, int ifindex)
1da177e4
LT
792{
793 struct net_device *dev;
794
fb699dfd
ED
795 rcu_read_lock();
796 dev = dev_get_by_index_rcu(net, ifindex);
1da177e4
LT
797 if (dev)
798 dev_hold(dev);
fb699dfd 799 rcu_read_unlock();
1da177e4
LT
800 return dev;
801}
d1b19dff 802EXPORT_SYMBOL(dev_get_by_index);
1da177e4 803
5dbe7c17
NS
804/**
805 * netdev_get_name - get a netdevice name, knowing its ifindex.
806 * @net: network namespace
807 * @name: a pointer to the buffer where the name will be stored.
808 * @ifindex: the ifindex of the interface to get the name from.
809 *
810 * The use of raw_seqcount_begin() and cond_resched() before
811 * retrying is required as we want to give the writers a chance
812 * to complete when CONFIG_PREEMPT is not set.
813 */
814int netdev_get_name(struct net *net, char *name, int ifindex)
815{
816 struct net_device *dev;
817 unsigned int seq;
818
819retry:
820 seq = raw_seqcount_begin(&devnet_rename_seq);
821 rcu_read_lock();
822 dev = dev_get_by_index_rcu(net, ifindex);
823 if (!dev) {
824 rcu_read_unlock();
825 return -ENODEV;
826 }
827
828 strcpy(name, dev->name);
829 rcu_read_unlock();
830 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
831 cond_resched();
832 goto retry;
833 }
834
835 return 0;
836}
837
1da177e4 838/**
941666c2 839 * dev_getbyhwaddr_rcu - find a device by its hardware address
c4ea43c5 840 * @net: the applicable net namespace
1da177e4
LT
841 * @type: media type of device
842 * @ha: hardware address
843 *
844 * Search for an interface by MAC address. Returns NULL if the device
c506653d
ED
845 * is not found or a pointer to the device.
846 * The caller must hold RCU or RTNL.
941666c2 847 * The returned device has not had its ref count increased
1da177e4
LT
848 * and the caller must therefore be careful about locking
849 *
1da177e4
LT
850 */
851
941666c2
ED
852struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
853 const char *ha)
1da177e4
LT
854{
855 struct net_device *dev;
856
941666c2 857 for_each_netdev_rcu(net, dev)
1da177e4
LT
858 if (dev->type == type &&
859 !memcmp(dev->dev_addr, ha, dev->addr_len))
7562f876
PE
860 return dev;
861
862 return NULL;
1da177e4 863}
941666c2 864EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
cf309e3f 865
881d966b 866struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
1da177e4
LT
867{
868 struct net_device *dev;
869
4e9cac2b 870 ASSERT_RTNL();
881d966b 871 for_each_netdev(net, dev)
4e9cac2b 872 if (dev->type == type)
7562f876
PE
873 return dev;
874
875 return NULL;
4e9cac2b 876}
4e9cac2b
PM
877EXPORT_SYMBOL(__dev_getfirstbyhwtype);
878
881d966b 879struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
4e9cac2b 880{
99fe3c39 881 struct net_device *dev, *ret = NULL;
4e9cac2b 882
99fe3c39
ED
883 rcu_read_lock();
884 for_each_netdev_rcu(net, dev)
885 if (dev->type == type) {
886 dev_hold(dev);
887 ret = dev;
888 break;
889 }
890 rcu_read_unlock();
891 return ret;
1da177e4 892}
1da177e4
LT
893EXPORT_SYMBOL(dev_getfirstbyhwtype);
894
895/**
bb69ae04 896 * dev_get_by_flags_rcu - find any device with given flags
c4ea43c5 897 * @net: the applicable net namespace
1da177e4
LT
898 * @if_flags: IFF_* values
899 * @mask: bitmask of bits in if_flags to check
900 *
901 * Search for any interface with the given flags. Returns NULL if a device
bb69ae04
ED
902 * is not found or a pointer to the device. Must be called inside
903 * rcu_read_lock(), and result refcount is unchanged.
1da177e4
LT
904 */
905
bb69ae04 906struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
d1b19dff 907 unsigned short mask)
1da177e4 908{
7562f876 909 struct net_device *dev, *ret;
1da177e4 910
7562f876 911 ret = NULL;
c6d14c84 912 for_each_netdev_rcu(net, dev) {
1da177e4 913 if (((dev->flags ^ if_flags) & mask) == 0) {
7562f876 914 ret = dev;
1da177e4
LT
915 break;
916 }
917 }
7562f876 918 return ret;
1da177e4 919}
bb69ae04 920EXPORT_SYMBOL(dev_get_by_flags_rcu);
1da177e4
LT
921
922/**
923 * dev_valid_name - check if name is okay for network device
924 * @name: name string
925 *
926 * Network device names need to be valid file names to
c7fa9d18
DM
927 * to allow sysfs to work. We also disallow any kind of
928 * whitespace.
1da177e4 929 */
95f050bf 930bool dev_valid_name(const char *name)
1da177e4 931{
c7fa9d18 932 if (*name == '\0')
95f050bf 933 return false;
b6fe17d6 934 if (strlen(name) >= IFNAMSIZ)
95f050bf 935 return false;
c7fa9d18 936 if (!strcmp(name, ".") || !strcmp(name, ".."))
95f050bf 937 return false;
c7fa9d18
DM
938
939 while (*name) {
940 if (*name == '/' || isspace(*name))
95f050bf 941 return false;
c7fa9d18
DM
942 name++;
943 }
95f050bf 944 return true;
1da177e4 945}
d1b19dff 946EXPORT_SYMBOL(dev_valid_name);
1da177e4
LT
947
948/**
b267b179
EB
949 * __dev_alloc_name - allocate a name for a device
950 * @net: network namespace to allocate the device name in
1da177e4 951 * @name: name format string
b267b179 952 * @buf: scratch buffer and result name string
1da177e4
LT
953 *
954 * Passed a format string - eg "lt%d" it will try and find a suitable
3041a069
SH
955 * id. It scans list of devices to build up a free map, then chooses
956 * the first empty slot. The caller must hold the dev_base or rtnl lock
957 * while allocating the name and adding the device in order to avoid
958 * duplicates.
959 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
960 * Returns the number of the unit assigned or a negative errno code.
1da177e4
LT
961 */
962
b267b179 963static int __dev_alloc_name(struct net *net, const char *name, char *buf)
1da177e4
LT
964{
965 int i = 0;
1da177e4
LT
966 const char *p;
967 const int max_netdevices = 8*PAGE_SIZE;
cfcabdcc 968 unsigned long *inuse;
1da177e4
LT
969 struct net_device *d;
970
971 p = strnchr(name, IFNAMSIZ-1, '%');
972 if (p) {
973 /*
974 * Verify the string as this thing may have come from
975 * the user. There must be either one "%d" and no other "%"
976 * characters.
977 */
978 if (p[1] != 'd' || strchr(p + 2, '%'))
979 return -EINVAL;
980
981 /* Use one page as a bit array of possible slots */
cfcabdcc 982 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1da177e4
LT
983 if (!inuse)
984 return -ENOMEM;
985
881d966b 986 for_each_netdev(net, d) {
1da177e4
LT
987 if (!sscanf(d->name, name, &i))
988 continue;
989 if (i < 0 || i >= max_netdevices)
990 continue;
991
992 /* avoid cases where sscanf is not exact inverse of printf */
b267b179 993 snprintf(buf, IFNAMSIZ, name, i);
1da177e4
LT
994 if (!strncmp(buf, d->name, IFNAMSIZ))
995 set_bit(i, inuse);
996 }
997
998 i = find_first_zero_bit(inuse, max_netdevices);
999 free_page((unsigned long) inuse);
1000 }
1001
d9031024
OP
1002 if (buf != name)
1003 snprintf(buf, IFNAMSIZ, name, i);
b267b179 1004 if (!__dev_get_by_name(net, buf))
1da177e4 1005 return i;
1da177e4
LT
1006
1007 /* It is possible to run out of possible slots
1008 * when the name is long and there isn't enough space left
1009 * for the digits, or if all bits are used.
1010 */
1011 return -ENFILE;
1012}
1013
b267b179
EB
1014/**
1015 * dev_alloc_name - allocate a name for a device
1016 * @dev: device
1017 * @name: name format string
1018 *
1019 * Passed a format string - eg "lt%d" it will try and find a suitable
1020 * id. It scans list of devices to build up a free map, then chooses
1021 * the first empty slot. The caller must hold the dev_base or rtnl lock
1022 * while allocating the name and adding the device in order to avoid
1023 * duplicates.
1024 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1025 * Returns the number of the unit assigned or a negative errno code.
1026 */
1027
1028int dev_alloc_name(struct net_device *dev, const char *name)
1029{
1030 char buf[IFNAMSIZ];
1031 struct net *net;
1032 int ret;
1033
c346dca1
YH
1034 BUG_ON(!dev_net(dev));
1035 net = dev_net(dev);
b267b179
EB
1036 ret = __dev_alloc_name(net, name, buf);
1037 if (ret >= 0)
1038 strlcpy(dev->name, buf, IFNAMSIZ);
1039 return ret;
1040}
d1b19dff 1041EXPORT_SYMBOL(dev_alloc_name);
b267b179 1042
828de4f6
G
1043static int dev_alloc_name_ns(struct net *net,
1044 struct net_device *dev,
1045 const char *name)
d9031024 1046{
828de4f6
G
1047 char buf[IFNAMSIZ];
1048 int ret;
8ce6cebc 1049
828de4f6
G
1050 ret = __dev_alloc_name(net, name, buf);
1051 if (ret >= 0)
1052 strlcpy(dev->name, buf, IFNAMSIZ);
1053 return ret;
1054}
1055
1056static int dev_get_valid_name(struct net *net,
1057 struct net_device *dev,
1058 const char *name)
1059{
1060 BUG_ON(!net);
8ce6cebc 1061
d9031024
OP
1062 if (!dev_valid_name(name))
1063 return -EINVAL;
1064
1c5cae81 1065 if (strchr(name, '%'))
828de4f6 1066 return dev_alloc_name_ns(net, dev, name);
d9031024
OP
1067 else if (__dev_get_by_name(net, name))
1068 return -EEXIST;
8ce6cebc
DL
1069 else if (dev->name != name)
1070 strlcpy(dev->name, name, IFNAMSIZ);
d9031024
OP
1071
1072 return 0;
1073}
1da177e4
LT
1074
1075/**
1076 * dev_change_name - change name of a device
1077 * @dev: device
1078 * @newname: name (or format string) must be at least IFNAMSIZ
1079 *
1080 * Change name of a device, can pass format strings "eth%d".
1081 * for wildcarding.
1082 */
cf04a4c7 1083int dev_change_name(struct net_device *dev, const char *newname)
1da177e4 1084{
fcc5a03a 1085 char oldname[IFNAMSIZ];
1da177e4 1086 int err = 0;
fcc5a03a 1087 int ret;
881d966b 1088 struct net *net;
1da177e4
LT
1089
1090 ASSERT_RTNL();
c346dca1 1091 BUG_ON(!dev_net(dev));
1da177e4 1092
c346dca1 1093 net = dev_net(dev);
1da177e4
LT
1094 if (dev->flags & IFF_UP)
1095 return -EBUSY;
1096
30e6c9fa 1097 write_seqcount_begin(&devnet_rename_seq);
c91f6df2
BH
1098
1099 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
30e6c9fa 1100 write_seqcount_end(&devnet_rename_seq);
c8d90dca 1101 return 0;
c91f6df2 1102 }
c8d90dca 1103
fcc5a03a
HX
1104 memcpy(oldname, dev->name, IFNAMSIZ);
1105
828de4f6 1106 err = dev_get_valid_name(net, dev, newname);
c91f6df2 1107 if (err < 0) {
30e6c9fa 1108 write_seqcount_end(&devnet_rename_seq);
d9031024 1109 return err;
c91f6df2 1110 }
1da177e4 1111
fcc5a03a 1112rollback:
a1b3f594
EB
1113 ret = device_rename(&dev->dev, dev->name);
1114 if (ret) {
1115 memcpy(dev->name, oldname, IFNAMSIZ);
30e6c9fa 1116 write_seqcount_end(&devnet_rename_seq);
a1b3f594 1117 return ret;
dcc99773 1118 }
7f988eab 1119
30e6c9fa 1120 write_seqcount_end(&devnet_rename_seq);
c91f6df2 1121
5bb025fa
VF
1122 netdev_adjacent_rename_links(dev, oldname);
1123
7f988eab 1124 write_lock_bh(&dev_base_lock);
372b2312 1125 hlist_del_rcu(&dev->name_hlist);
72c9528b
ED
1126 write_unlock_bh(&dev_base_lock);
1127
1128 synchronize_rcu();
1129
1130 write_lock_bh(&dev_base_lock);
1131 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
7f988eab
HX
1132 write_unlock_bh(&dev_base_lock);
1133
056925ab 1134 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
fcc5a03a
HX
1135 ret = notifier_to_errno(ret);
1136
1137 if (ret) {
91e9c07b
ED
1138 /* err >= 0 after dev_alloc_name() or stores the first errno */
1139 if (err >= 0) {
fcc5a03a 1140 err = ret;
30e6c9fa 1141 write_seqcount_begin(&devnet_rename_seq);
fcc5a03a 1142 memcpy(dev->name, oldname, IFNAMSIZ);
5bb025fa 1143 memcpy(oldname, newname, IFNAMSIZ);
fcc5a03a 1144 goto rollback;
91e9c07b 1145 } else {
7b6cd1ce 1146 pr_err("%s: name change rollback failed: %d\n",
91e9c07b 1147 dev->name, ret);
fcc5a03a
HX
1148 }
1149 }
1da177e4
LT
1150
1151 return err;
1152}
1153
0b815a1a
SH
1154/**
1155 * dev_set_alias - change ifalias of a device
1156 * @dev: device
1157 * @alias: name up to IFALIASZ
f0db275a 1158 * @len: limit of bytes to copy from info
0b815a1a
SH
1159 *
1160 * Set ifalias for a device,
1161 */
1162int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1163{
7364e445
AK
1164 char *new_ifalias;
1165
0b815a1a
SH
1166 ASSERT_RTNL();
1167
1168 if (len >= IFALIASZ)
1169 return -EINVAL;
1170
96ca4a2c 1171 if (!len) {
388dfc2d
SK
1172 kfree(dev->ifalias);
1173 dev->ifalias = NULL;
96ca4a2c
OH
1174 return 0;
1175 }
1176
7364e445
AK
1177 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1178 if (!new_ifalias)
0b815a1a 1179 return -ENOMEM;
7364e445 1180 dev->ifalias = new_ifalias;
0b815a1a
SH
1181
1182 strlcpy(dev->ifalias, alias, len+1);
1183 return len;
1184}
1185
1186
d8a33ac4 1187/**
3041a069 1188 * netdev_features_change - device changes features
d8a33ac4
SH
1189 * @dev: device to cause notification
1190 *
1191 * Called to indicate a device has changed features.
1192 */
1193void netdev_features_change(struct net_device *dev)
1194{
056925ab 1195 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
d8a33ac4
SH
1196}
1197EXPORT_SYMBOL(netdev_features_change);
1198
1da177e4
LT
1199/**
1200 * netdev_state_change - device changes state
1201 * @dev: device to cause notification
1202 *
1203 * Called to indicate a device has changed state. This function calls
1204 * the notifier chains for netdev_chain and sends a NEWLINK message
1205 * to the routing socket.
1206 */
1207void netdev_state_change(struct net_device *dev)
1208{
1209 if (dev->flags & IFF_UP) {
056925ab 1210 call_netdevice_notifiers(NETDEV_CHANGE, dev);
7f294054 1211 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1da177e4
LT
1212 }
1213}
d1b19dff 1214EXPORT_SYMBOL(netdev_state_change);
1da177e4 1215
ee89bab1
AW
1216/**
1217 * netdev_notify_peers - notify network peers about existence of @dev
1218 * @dev: network device
1219 *
1220 * Generate traffic such that interested network peers are aware of
1221 * @dev, such as by generating a gratuitous ARP. This may be used when
1222 * a device wants to inform the rest of the network about some sort of
1223 * reconfiguration such as a failover event or virtual machine
1224 * migration.
1225 */
1226void netdev_notify_peers(struct net_device *dev)
c1da4ac7 1227{
ee89bab1
AW
1228 rtnl_lock();
1229 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1230 rtnl_unlock();
c1da4ac7 1231}
ee89bab1 1232EXPORT_SYMBOL(netdev_notify_peers);
c1da4ac7 1233
bd380811 1234static int __dev_open(struct net_device *dev)
1da177e4 1235{
d314774c 1236 const struct net_device_ops *ops = dev->netdev_ops;
3b8bcfd5 1237 int ret;
1da177e4 1238
e46b66bc
BH
1239 ASSERT_RTNL();
1240
1da177e4
LT
1241 if (!netif_device_present(dev))
1242 return -ENODEV;
1243
ca99ca14
NH
1244 /* Block netpoll from trying to do any rx path servicing.
1245 * If we don't do this there is a chance ndo_poll_controller
1246 * or ndo_poll may be running while we open the device
1247 */
66b5552f 1248 netpoll_poll_disable(dev);
ca99ca14 1249
3b8bcfd5
JB
1250 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1251 ret = notifier_to_errno(ret);
1252 if (ret)
1253 return ret;
1254
1da177e4 1255 set_bit(__LINK_STATE_START, &dev->state);
bada339b 1256
d314774c
SH
1257 if (ops->ndo_validate_addr)
1258 ret = ops->ndo_validate_addr(dev);
bada339b 1259
d314774c
SH
1260 if (!ret && ops->ndo_open)
1261 ret = ops->ndo_open(dev);
1da177e4 1262
66b5552f 1263 netpoll_poll_enable(dev);
ca99ca14 1264
bada339b
JG
1265 if (ret)
1266 clear_bit(__LINK_STATE_START, &dev->state);
1267 else {
1da177e4 1268 dev->flags |= IFF_UP;
b4bd07c2 1269 net_dmaengine_get();
4417da66 1270 dev_set_rx_mode(dev);
1da177e4 1271 dev_activate(dev);
7bf23575 1272 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 1273 }
bada339b 1274
1da177e4
LT
1275 return ret;
1276}
1277
1278/**
bd380811
PM
1279 * dev_open - prepare an interface for use.
1280 * @dev: device to open
1da177e4 1281 *
bd380811
PM
1282 * Takes a device from down to up state. The device's private open
1283 * function is invoked and then the multicast lists are loaded. Finally
1284 * the device is moved into the up state and a %NETDEV_UP message is
1285 * sent to the netdev notifier chain.
1286 *
1287 * Calling this function on an active interface is a nop. On a failure
1288 * a negative errno code is returned.
1da177e4 1289 */
bd380811
PM
1290int dev_open(struct net_device *dev)
1291{
1292 int ret;
1293
bd380811
PM
1294 if (dev->flags & IFF_UP)
1295 return 0;
1296
bd380811
PM
1297 ret = __dev_open(dev);
1298 if (ret < 0)
1299 return ret;
1300
7f294054 1301 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
bd380811
PM
1302 call_netdevice_notifiers(NETDEV_UP, dev);
1303
1304 return ret;
1305}
1306EXPORT_SYMBOL(dev_open);
1307
44345724 1308static int __dev_close_many(struct list_head *head)
1da177e4 1309{
44345724 1310 struct net_device *dev;
e46b66bc 1311
bd380811 1312 ASSERT_RTNL();
9d5010db
DM
1313 might_sleep();
1314
5cde2829 1315 list_for_each_entry(dev, head, close_list) {
3f4df206 1316 /* Temporarily disable netpoll until the interface is down */
66b5552f 1317 netpoll_poll_disable(dev);
3f4df206 1318
44345724 1319 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1da177e4 1320
44345724 1321 clear_bit(__LINK_STATE_START, &dev->state);
1da177e4 1322
44345724
OP
1323 /* Synchronize to scheduled poll. We cannot touch poll list, it
1324 * can be even on different cpu. So just clear netif_running().
1325 *
1326 * dev->stop() will invoke napi_disable() on all of it's
1327 * napi_struct instances on this device.
1328 */
4e857c58 1329 smp_mb__after_atomic(); /* Commit netif_running(). */
44345724 1330 }
1da177e4 1331
44345724 1332 dev_deactivate_many(head);
d8b2a4d2 1333
5cde2829 1334 list_for_each_entry(dev, head, close_list) {
44345724 1335 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4 1336
44345724
OP
1337 /*
1338 * Call the device specific close. This cannot fail.
1339 * Only if device is UP
1340 *
1341 * We allow it to be called even after a DETACH hot-plug
1342 * event.
1343 */
1344 if (ops->ndo_stop)
1345 ops->ndo_stop(dev);
1346
44345724 1347 dev->flags &= ~IFF_UP;
44345724 1348 net_dmaengine_put();
66b5552f 1349 netpoll_poll_enable(dev);
44345724
OP
1350 }
1351
1352 return 0;
1353}
1354
1355static int __dev_close(struct net_device *dev)
1356{
f87e6f47 1357 int retval;
44345724
OP
1358 LIST_HEAD(single);
1359
5cde2829 1360 list_add(&dev->close_list, &single);
f87e6f47
LT
1361 retval = __dev_close_many(&single);
1362 list_del(&single);
ca99ca14 1363
f87e6f47 1364 return retval;
44345724
OP
1365}
1366
3fbd8758 1367static int dev_close_many(struct list_head *head)
44345724
OP
1368{
1369 struct net_device *dev, *tmp;
1da177e4 1370
5cde2829
EB
1371 /* Remove the devices that don't need to be closed */
1372 list_for_each_entry_safe(dev, tmp, head, close_list)
44345724 1373 if (!(dev->flags & IFF_UP))
5cde2829 1374 list_del_init(&dev->close_list);
44345724
OP
1375
1376 __dev_close_many(head);
1da177e4 1377
5cde2829 1378 list_for_each_entry_safe(dev, tmp, head, close_list) {
7f294054 1379 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
44345724 1380 call_netdevice_notifiers(NETDEV_DOWN, dev);
5cde2829 1381 list_del_init(&dev->close_list);
44345724 1382 }
bd380811
PM
1383
1384 return 0;
1385}
1386
1387/**
1388 * dev_close - shutdown an interface.
1389 * @dev: device to shutdown
1390 *
1391 * This function moves an active device into down state. A
1392 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1393 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1394 * chain.
1395 */
1396int dev_close(struct net_device *dev)
1397{
e14a5993
ED
1398 if (dev->flags & IFF_UP) {
1399 LIST_HEAD(single);
1da177e4 1400
5cde2829 1401 list_add(&dev->close_list, &single);
e14a5993
ED
1402 dev_close_many(&single);
1403 list_del(&single);
1404 }
da6e378b 1405 return 0;
1da177e4 1406}
d1b19dff 1407EXPORT_SYMBOL(dev_close);
1da177e4
LT
1408
1409
0187bdfb
BH
1410/**
1411 * dev_disable_lro - disable Large Receive Offload on a device
1412 * @dev: device
1413 *
1414 * Disable Large Receive Offload (LRO) on a net device. Must be
1415 * called under RTNL. This is needed if received packets may be
1416 * forwarded to another interface.
1417 */
1418void dev_disable_lro(struct net_device *dev)
1419{
f11970e3
NH
1420 /*
1421 * If we're trying to disable lro on a vlan device
1422 * use the underlying physical device instead
1423 */
1424 if (is_vlan_dev(dev))
1425 dev = vlan_dev_real_dev(dev);
1426
529d0489
MK
1427 /* the same for macvlan devices */
1428 if (netif_is_macvlan(dev))
1429 dev = macvlan_dev_real_dev(dev);
1430
bc5787c6
MM
1431 dev->wanted_features &= ~NETIF_F_LRO;
1432 netdev_update_features(dev);
27660515 1433
22d5969f
MM
1434 if (unlikely(dev->features & NETIF_F_LRO))
1435 netdev_WARN(dev, "failed to disable LRO!\n");
0187bdfb
BH
1436}
1437EXPORT_SYMBOL(dev_disable_lro);
1438
351638e7
JP
1439static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1440 struct net_device *dev)
1441{
1442 struct netdev_notifier_info info;
1443
1444 netdev_notifier_info_init(&info, dev);
1445 return nb->notifier_call(nb, val, &info);
1446}
0187bdfb 1447
881d966b
EB
1448static int dev_boot_phase = 1;
1449
1da177e4
LT
1450/**
1451 * register_netdevice_notifier - register a network notifier block
1452 * @nb: notifier
1453 *
1454 * Register a notifier to be called when network device events occur.
1455 * The notifier passed is linked into the kernel structures and must
1456 * not be reused until it has been unregistered. A negative errno code
1457 * is returned on a failure.
1458 *
1459 * When registered all registration and up events are replayed
4ec93edb 1460 * to the new notifier to allow device to have a race free
1da177e4
LT
1461 * view of the network device list.
1462 */
1463
1464int register_netdevice_notifier(struct notifier_block *nb)
1465{
1466 struct net_device *dev;
fcc5a03a 1467 struct net_device *last;
881d966b 1468 struct net *net;
1da177e4
LT
1469 int err;
1470
1471 rtnl_lock();
f07d5b94 1472 err = raw_notifier_chain_register(&netdev_chain, nb);
fcc5a03a
HX
1473 if (err)
1474 goto unlock;
881d966b
EB
1475 if (dev_boot_phase)
1476 goto unlock;
1477 for_each_net(net) {
1478 for_each_netdev(net, dev) {
351638e7 1479 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
881d966b
EB
1480 err = notifier_to_errno(err);
1481 if (err)
1482 goto rollback;
1483
1484 if (!(dev->flags & IFF_UP))
1485 continue;
1da177e4 1486
351638e7 1487 call_netdevice_notifier(nb, NETDEV_UP, dev);
881d966b 1488 }
1da177e4 1489 }
fcc5a03a
HX
1490
1491unlock:
1da177e4
LT
1492 rtnl_unlock();
1493 return err;
fcc5a03a
HX
1494
1495rollback:
1496 last = dev;
881d966b
EB
1497 for_each_net(net) {
1498 for_each_netdev(net, dev) {
1499 if (dev == last)
8f891489 1500 goto outroll;
fcc5a03a 1501
881d966b 1502 if (dev->flags & IFF_UP) {
351638e7
JP
1503 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1504 dev);
1505 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
881d966b 1506 }
351638e7 1507 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
fcc5a03a 1508 }
fcc5a03a 1509 }
c67625a1 1510
8f891489 1511outroll:
c67625a1 1512 raw_notifier_chain_unregister(&netdev_chain, nb);
fcc5a03a 1513 goto unlock;
1da177e4 1514}
d1b19dff 1515EXPORT_SYMBOL(register_netdevice_notifier);
1da177e4
LT
1516
1517/**
1518 * unregister_netdevice_notifier - unregister a network notifier block
1519 * @nb: notifier
1520 *
1521 * Unregister a notifier previously registered by
1522 * register_netdevice_notifier(). The notifier is unlinked into the
1523 * kernel structures and may then be reused. A negative errno code
1524 * is returned on a failure.
7d3d43da
EB
1525 *
1526 * After unregistering unregister and down device events are synthesized
1527 * for all devices on the device list to the removed notifier to remove
1528 * the need for special case cleanup code.
1da177e4
LT
1529 */
1530
1531int unregister_netdevice_notifier(struct notifier_block *nb)
1532{
7d3d43da
EB
1533 struct net_device *dev;
1534 struct net *net;
9f514950
HX
1535 int err;
1536
1537 rtnl_lock();
f07d5b94 1538 err = raw_notifier_chain_unregister(&netdev_chain, nb);
7d3d43da
EB
1539 if (err)
1540 goto unlock;
1541
1542 for_each_net(net) {
1543 for_each_netdev(net, dev) {
1544 if (dev->flags & IFF_UP) {
351638e7
JP
1545 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1546 dev);
1547 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
7d3d43da 1548 }
351638e7 1549 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
7d3d43da
EB
1550 }
1551 }
1552unlock:
9f514950
HX
1553 rtnl_unlock();
1554 return err;
1da177e4 1555}
d1b19dff 1556EXPORT_SYMBOL(unregister_netdevice_notifier);
1da177e4 1557
351638e7
JP
1558/**
1559 * call_netdevice_notifiers_info - call all network notifier blocks
1560 * @val: value passed unmodified to notifier function
1561 * @dev: net_device pointer passed unmodified to notifier function
1562 * @info: notifier information data
1563 *
1564 * Call all network notifier blocks. Parameters and return value
1565 * are as for raw_notifier_call_chain().
1566 */
1567
1d143d9f 1568static int call_netdevice_notifiers_info(unsigned long val,
1569 struct net_device *dev,
1570 struct netdev_notifier_info *info)
351638e7
JP
1571{
1572 ASSERT_RTNL();
1573 netdev_notifier_info_init(info, dev);
1574 return raw_notifier_call_chain(&netdev_chain, val, info);
1575}
351638e7 1576
1da177e4
LT
1577/**
1578 * call_netdevice_notifiers - call all network notifier blocks
1579 * @val: value passed unmodified to notifier function
c4ea43c5 1580 * @dev: net_device pointer passed unmodified to notifier function
1da177e4
LT
1581 *
1582 * Call all network notifier blocks. Parameters and return value
f07d5b94 1583 * are as for raw_notifier_call_chain().
1da177e4
LT
1584 */
1585
ad7379d4 1586int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1da177e4 1587{
351638e7
JP
1588 struct netdev_notifier_info info;
1589
1590 return call_netdevice_notifiers_info(val, dev, &info);
1da177e4 1591}
edf947f1 1592EXPORT_SYMBOL(call_netdevice_notifiers);
1da177e4 1593
c5905afb 1594static struct static_key netstamp_needed __read_mostly;
b90e5794 1595#ifdef HAVE_JUMP_LABEL
c5905afb 1596/* We are not allowed to call static_key_slow_dec() from irq context
b90e5794 1597 * If net_disable_timestamp() is called from irq context, defer the
c5905afb 1598 * static_key_slow_dec() calls.
b90e5794
ED
1599 */
1600static atomic_t netstamp_needed_deferred;
1601#endif
1da177e4
LT
1602
1603void net_enable_timestamp(void)
1604{
b90e5794
ED
1605#ifdef HAVE_JUMP_LABEL
1606 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1607
1608 if (deferred) {
1609 while (--deferred)
c5905afb 1610 static_key_slow_dec(&netstamp_needed);
b90e5794
ED
1611 return;
1612 }
1613#endif
c5905afb 1614 static_key_slow_inc(&netstamp_needed);
1da177e4 1615}
d1b19dff 1616EXPORT_SYMBOL(net_enable_timestamp);
1da177e4
LT
1617
1618void net_disable_timestamp(void)
1619{
b90e5794
ED
1620#ifdef HAVE_JUMP_LABEL
1621 if (in_interrupt()) {
1622 atomic_inc(&netstamp_needed_deferred);
1623 return;
1624 }
1625#endif
c5905afb 1626 static_key_slow_dec(&netstamp_needed);
1da177e4 1627}
d1b19dff 1628EXPORT_SYMBOL(net_disable_timestamp);
1da177e4 1629
3b098e2d 1630static inline void net_timestamp_set(struct sk_buff *skb)
1da177e4 1631{
588f0330 1632 skb->tstamp.tv64 = 0;
c5905afb 1633 if (static_key_false(&netstamp_needed))
a61bbcf2 1634 __net_timestamp(skb);
1da177e4
LT
1635}
1636
588f0330 1637#define net_timestamp_check(COND, SKB) \
c5905afb 1638 if (static_key_false(&netstamp_needed)) { \
588f0330
ED
1639 if ((COND) && !(SKB)->tstamp.tv64) \
1640 __net_timestamp(SKB); \
1641 } \
3b098e2d 1642
1ee481fb 1643bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
79b569f0
DL
1644{
1645 unsigned int len;
1646
1647 if (!(dev->flags & IFF_UP))
1648 return false;
1649
1650 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1651 if (skb->len <= len)
1652 return true;
1653
1654 /* if TSO is enabled, we don't care about the length as the packet
1655 * could be forwarded without being segmented before
1656 */
1657 if (skb_is_gso(skb))
1658 return true;
1659
1660 return false;
1661}
1ee481fb 1662EXPORT_SYMBOL_GPL(is_skb_forwardable);
79b569f0 1663
a0265d28
HX
1664int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1665{
1666 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1667 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1668 atomic_long_inc(&dev->rx_dropped);
1669 kfree_skb(skb);
1670 return NET_RX_DROP;
1671 }
1672 }
1673
1674 if (unlikely(!is_skb_forwardable(dev, skb))) {
1675 atomic_long_inc(&dev->rx_dropped);
1676 kfree_skb(skb);
1677 return NET_RX_DROP;
1678 }
1679
1680 skb_scrub_packet(skb, true);
1681 skb->protocol = eth_type_trans(skb, dev);
1682
1683 return 0;
1684}
1685EXPORT_SYMBOL_GPL(__dev_forward_skb);
1686
44540960
AB
1687/**
1688 * dev_forward_skb - loopback an skb to another netif
1689 *
1690 * @dev: destination network device
1691 * @skb: buffer to forward
1692 *
1693 * return values:
1694 * NET_RX_SUCCESS (no congestion)
6ec82562 1695 * NET_RX_DROP (packet was dropped, but freed)
44540960
AB
1696 *
1697 * dev_forward_skb can be used for injecting an skb from the
1698 * start_xmit function of one device into the receive queue
1699 * of another device.
1700 *
1701 * The receiving device may be in another namespace, so
1702 * we have to clear all information in the skb that could
1703 * impact namespace isolation.
1704 */
1705int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1706{
a0265d28 1707 return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
44540960
AB
1708}
1709EXPORT_SYMBOL_GPL(dev_forward_skb);
1710
71d9dec2
CG
1711static inline int deliver_skb(struct sk_buff *skb,
1712 struct packet_type *pt_prev,
1713 struct net_device *orig_dev)
1714{
1080e512
MT
1715 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1716 return -ENOMEM;
71d9dec2
CG
1717 atomic_inc(&skb->users);
1718 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1719}
1720
c0de08d0
EL
1721static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1722{
a3d744e9 1723 if (!ptype->af_packet_priv || !skb->sk)
c0de08d0
EL
1724 return false;
1725
1726 if (ptype->id_match)
1727 return ptype->id_match(ptype, skb->sk);
1728 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1729 return true;
1730
1731 return false;
1732}
1733
1da177e4
LT
1734/*
1735 * Support routine. Sends outgoing frames to any network
1736 * taps currently in use.
1737 */
1738
f6a78bfc 1739static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1da177e4
LT
1740{
1741 struct packet_type *ptype;
71d9dec2
CG
1742 struct sk_buff *skb2 = NULL;
1743 struct packet_type *pt_prev = NULL;
a61bbcf2 1744
1da177e4
LT
1745 rcu_read_lock();
1746 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1747 /* Never send packets back to the socket
1748 * they originated from - MvS (miquels@drinkel.ow.org)
1749 */
1750 if ((ptype->dev == dev || !ptype->dev) &&
c0de08d0 1751 (!skb_loop_sk(ptype, skb))) {
71d9dec2
CG
1752 if (pt_prev) {
1753 deliver_skb(skb2, pt_prev, skb->dev);
1754 pt_prev = ptype;
1755 continue;
1756 }
1757
1758 skb2 = skb_clone(skb, GFP_ATOMIC);
1da177e4
LT
1759 if (!skb2)
1760 break;
1761
70978182
ED
1762 net_timestamp_set(skb2);
1763
1da177e4
LT
1764 /* skb->nh should be correctly
1765 set by sender, so that the second statement is
1766 just protection against buggy protocols.
1767 */
459a98ed 1768 skb_reset_mac_header(skb2);
1da177e4 1769
d56f90a7 1770 if (skb_network_header(skb2) < skb2->data ||
ced14f68 1771 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
e87cc472
JP
1772 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1773 ntohs(skb2->protocol),
1774 dev->name);
c1d2bbe1 1775 skb_reset_network_header(skb2);
1da177e4
LT
1776 }
1777
b0e380b1 1778 skb2->transport_header = skb2->network_header;
1da177e4 1779 skb2->pkt_type = PACKET_OUTGOING;
71d9dec2 1780 pt_prev = ptype;
1da177e4
LT
1781 }
1782 }
71d9dec2
CG
1783 if (pt_prev)
1784 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1da177e4
LT
1785 rcu_read_unlock();
1786}
1787
2c53040f
BH
1788/**
1789 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
4f57c087
JF
1790 * @dev: Network device
1791 * @txq: number of queues available
1792 *
1793 * If real_num_tx_queues is changed the tc mappings may no longer be
1794 * valid. To resolve this verify the tc mapping remains valid and if
1795 * not NULL the mapping. With no priorities mapping to this
1796 * offset/count pair it will no longer be used. In the worst case TC0
1797 * is invalid nothing can be done so disable priority mappings. If is
1798 * expected that drivers will fix this mapping if they can before
1799 * calling netif_set_real_num_tx_queues.
1800 */
bb134d22 1801static void netif_setup_tc(struct net_device *dev, unsigned int txq)
4f57c087
JF
1802{
1803 int i;
1804 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1805
1806 /* If TC0 is invalidated disable TC mapping */
1807 if (tc->offset + tc->count > txq) {
7b6cd1ce 1808 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
4f57c087
JF
1809 dev->num_tc = 0;
1810 return;
1811 }
1812
1813 /* Invalidated prio to tc mappings set to TC0 */
1814 for (i = 1; i < TC_BITMASK + 1; i++) {
1815 int q = netdev_get_prio_tc_map(dev, i);
1816
1817 tc = &dev->tc_to_txq[q];
1818 if (tc->offset + tc->count > txq) {
7b6cd1ce
JP
1819 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1820 i, q);
4f57c087
JF
1821 netdev_set_prio_tc_map(dev, i, 0);
1822 }
1823 }
1824}
1825
537c00de
AD
1826#ifdef CONFIG_XPS
1827static DEFINE_MUTEX(xps_map_mutex);
1828#define xmap_dereference(P) \
1829 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1830
10cdc3f3
AD
1831static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1832 int cpu, u16 index)
537c00de 1833{
10cdc3f3
AD
1834 struct xps_map *map = NULL;
1835 int pos;
537c00de 1836
10cdc3f3
AD
1837 if (dev_maps)
1838 map = xmap_dereference(dev_maps->cpu_map[cpu]);
537c00de 1839
10cdc3f3
AD
1840 for (pos = 0; map && pos < map->len; pos++) {
1841 if (map->queues[pos] == index) {
537c00de
AD
1842 if (map->len > 1) {
1843 map->queues[pos] = map->queues[--map->len];
1844 } else {
10cdc3f3 1845 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
537c00de
AD
1846 kfree_rcu(map, rcu);
1847 map = NULL;
1848 }
10cdc3f3 1849 break;
537c00de 1850 }
537c00de
AD
1851 }
1852
10cdc3f3
AD
1853 return map;
1854}
1855
024e9679 1856static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
10cdc3f3
AD
1857{
1858 struct xps_dev_maps *dev_maps;
024e9679 1859 int cpu, i;
10cdc3f3
AD
1860 bool active = false;
1861
1862 mutex_lock(&xps_map_mutex);
1863 dev_maps = xmap_dereference(dev->xps_maps);
1864
1865 if (!dev_maps)
1866 goto out_no_maps;
1867
1868 for_each_possible_cpu(cpu) {
024e9679
AD
1869 for (i = index; i < dev->num_tx_queues; i++) {
1870 if (!remove_xps_queue(dev_maps, cpu, i))
1871 break;
1872 }
1873 if (i == dev->num_tx_queues)
10cdc3f3
AD
1874 active = true;
1875 }
1876
1877 if (!active) {
537c00de
AD
1878 RCU_INIT_POINTER(dev->xps_maps, NULL);
1879 kfree_rcu(dev_maps, rcu);
1880 }
1881
024e9679
AD
1882 for (i = index; i < dev->num_tx_queues; i++)
1883 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1884 NUMA_NO_NODE);
1885
537c00de
AD
1886out_no_maps:
1887 mutex_unlock(&xps_map_mutex);
1888}
1889
01c5f864
AD
1890static struct xps_map *expand_xps_map(struct xps_map *map,
1891 int cpu, u16 index)
1892{
1893 struct xps_map *new_map;
1894 int alloc_len = XPS_MIN_MAP_ALLOC;
1895 int i, pos;
1896
1897 for (pos = 0; map && pos < map->len; pos++) {
1898 if (map->queues[pos] != index)
1899 continue;
1900 return map;
1901 }
1902
1903 /* Need to add queue to this CPU's existing map */
1904 if (map) {
1905 if (pos < map->alloc_len)
1906 return map;
1907
1908 alloc_len = map->alloc_len * 2;
1909 }
1910
1911 /* Need to allocate new map to store queue on this CPU's map */
1912 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1913 cpu_to_node(cpu));
1914 if (!new_map)
1915 return NULL;
1916
1917 for (i = 0; i < pos; i++)
1918 new_map->queues[i] = map->queues[i];
1919 new_map->alloc_len = alloc_len;
1920 new_map->len = pos;
1921
1922 return new_map;
1923}
1924
3573540c
MT
1925int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1926 u16 index)
537c00de 1927{
01c5f864 1928 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
537c00de 1929 struct xps_map *map, *new_map;
537c00de 1930 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
01c5f864
AD
1931 int cpu, numa_node_id = -2;
1932 bool active = false;
537c00de
AD
1933
1934 mutex_lock(&xps_map_mutex);
1935
1936 dev_maps = xmap_dereference(dev->xps_maps);
1937
01c5f864
AD
1938 /* allocate memory for queue storage */
1939 for_each_online_cpu(cpu) {
1940 if (!cpumask_test_cpu(cpu, mask))
1941 continue;
1942
1943 if (!new_dev_maps)
1944 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2bb60cb9
AD
1945 if (!new_dev_maps) {
1946 mutex_unlock(&xps_map_mutex);
01c5f864 1947 return -ENOMEM;
2bb60cb9 1948 }
01c5f864
AD
1949
1950 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1951 NULL;
1952
1953 map = expand_xps_map(map, cpu, index);
1954 if (!map)
1955 goto error;
1956
1957 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1958 }
1959
1960 if (!new_dev_maps)
1961 goto out_no_new_maps;
1962
537c00de 1963 for_each_possible_cpu(cpu) {
01c5f864
AD
1964 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1965 /* add queue to CPU maps */
1966 int pos = 0;
1967
1968 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1969 while ((pos < map->len) && (map->queues[pos] != index))
1970 pos++;
1971
1972 if (pos == map->len)
1973 map->queues[map->len++] = index;
537c00de 1974#ifdef CONFIG_NUMA
537c00de
AD
1975 if (numa_node_id == -2)
1976 numa_node_id = cpu_to_node(cpu);
1977 else if (numa_node_id != cpu_to_node(cpu))
1978 numa_node_id = -1;
537c00de 1979#endif
01c5f864
AD
1980 } else if (dev_maps) {
1981 /* fill in the new device map from the old device map */
1982 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1983 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
537c00de 1984 }
01c5f864 1985
537c00de
AD
1986 }
1987
01c5f864
AD
1988 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1989
537c00de 1990 /* Cleanup old maps */
01c5f864
AD
1991 if (dev_maps) {
1992 for_each_possible_cpu(cpu) {
1993 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1994 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1995 if (map && map != new_map)
1996 kfree_rcu(map, rcu);
1997 }
537c00de 1998
01c5f864 1999 kfree_rcu(dev_maps, rcu);
537c00de
AD
2000 }
2001
01c5f864
AD
2002 dev_maps = new_dev_maps;
2003 active = true;
537c00de 2004
01c5f864
AD
2005out_no_new_maps:
2006 /* update Tx queue numa node */
537c00de
AD
2007 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2008 (numa_node_id >= 0) ? numa_node_id :
2009 NUMA_NO_NODE);
2010
01c5f864
AD
2011 if (!dev_maps)
2012 goto out_no_maps;
2013
2014 /* removes queue from unused CPUs */
2015 for_each_possible_cpu(cpu) {
2016 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2017 continue;
2018
2019 if (remove_xps_queue(dev_maps, cpu, index))
2020 active = true;
2021 }
2022
2023 /* free map if not active */
2024 if (!active) {
2025 RCU_INIT_POINTER(dev->xps_maps, NULL);
2026 kfree_rcu(dev_maps, rcu);
2027 }
2028
2029out_no_maps:
537c00de
AD
2030 mutex_unlock(&xps_map_mutex);
2031
2032 return 0;
2033error:
01c5f864
AD
2034 /* remove any maps that we added */
2035 for_each_possible_cpu(cpu) {
2036 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2037 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2038 NULL;
2039 if (new_map && new_map != map)
2040 kfree(new_map);
2041 }
2042
537c00de
AD
2043 mutex_unlock(&xps_map_mutex);
2044
537c00de
AD
2045 kfree(new_dev_maps);
2046 return -ENOMEM;
2047}
2048EXPORT_SYMBOL(netif_set_xps_queue);
2049
2050#endif
f0796d5c
JF
2051/*
2052 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2053 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2054 */
e6484930 2055int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
f0796d5c 2056{
1d24eb48
TH
2057 int rc;
2058
e6484930
TH
2059 if (txq < 1 || txq > dev->num_tx_queues)
2060 return -EINVAL;
f0796d5c 2061
5c56580b
BH
2062 if (dev->reg_state == NETREG_REGISTERED ||
2063 dev->reg_state == NETREG_UNREGISTERING) {
e6484930
TH
2064 ASSERT_RTNL();
2065
1d24eb48
TH
2066 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2067 txq);
bf264145
TH
2068 if (rc)
2069 return rc;
2070
4f57c087
JF
2071 if (dev->num_tc)
2072 netif_setup_tc(dev, txq);
2073
024e9679 2074 if (txq < dev->real_num_tx_queues) {
e6484930 2075 qdisc_reset_all_tx_gt(dev, txq);
024e9679
AD
2076#ifdef CONFIG_XPS
2077 netif_reset_xps_queues_gt(dev, txq);
2078#endif
2079 }
f0796d5c 2080 }
e6484930
TH
2081
2082 dev->real_num_tx_queues = txq;
2083 return 0;
f0796d5c
JF
2084}
2085EXPORT_SYMBOL(netif_set_real_num_tx_queues);
56079431 2086
a953be53 2087#ifdef CONFIG_SYSFS
62fe0b40
BH
2088/**
2089 * netif_set_real_num_rx_queues - set actual number of RX queues used
2090 * @dev: Network device
2091 * @rxq: Actual number of RX queues
2092 *
2093 * This must be called either with the rtnl_lock held or before
2094 * registration of the net device. Returns 0 on success, or a
4e7f7951
BH
2095 * negative error code. If called before registration, it always
2096 * succeeds.
62fe0b40
BH
2097 */
2098int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2099{
2100 int rc;
2101
bd25fa7b
TH
2102 if (rxq < 1 || rxq > dev->num_rx_queues)
2103 return -EINVAL;
2104
62fe0b40
BH
2105 if (dev->reg_state == NETREG_REGISTERED) {
2106 ASSERT_RTNL();
2107
62fe0b40
BH
2108 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2109 rxq);
2110 if (rc)
2111 return rc;
62fe0b40
BH
2112 }
2113
2114 dev->real_num_rx_queues = rxq;
2115 return 0;
2116}
2117EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2118#endif
2119
2c53040f
BH
2120/**
2121 * netif_get_num_default_rss_queues - default number of RSS queues
16917b87
YM
2122 *
2123 * This routine should set an upper limit on the number of RSS queues
2124 * used by default by multiqueue devices.
2125 */
a55b138b 2126int netif_get_num_default_rss_queues(void)
16917b87
YM
2127{
2128 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2129}
2130EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2131
def82a1d 2132static inline void __netif_reschedule(struct Qdisc *q)
56079431 2133{
def82a1d
JP
2134 struct softnet_data *sd;
2135 unsigned long flags;
56079431 2136
def82a1d
JP
2137 local_irq_save(flags);
2138 sd = &__get_cpu_var(softnet_data);
a9cbd588
CG
2139 q->next_sched = NULL;
2140 *sd->output_queue_tailp = q;
2141 sd->output_queue_tailp = &q->next_sched;
def82a1d
JP
2142 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2143 local_irq_restore(flags);
2144}
2145
2146void __netif_schedule(struct Qdisc *q)
2147{
2148 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2149 __netif_reschedule(q);
56079431
DV
2150}
2151EXPORT_SYMBOL(__netif_schedule);
2152
e6247027
ED
2153struct dev_kfree_skb_cb {
2154 enum skb_free_reason reason;
2155};
2156
2157static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
56079431 2158{
e6247027
ED
2159 return (struct dev_kfree_skb_cb *)skb->cb;
2160}
2161
2162void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
56079431 2163{
e6247027 2164 unsigned long flags;
56079431 2165
e6247027
ED
2166 if (likely(atomic_read(&skb->users) == 1)) {
2167 smp_rmb();
2168 atomic_set(&skb->users, 0);
2169 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2170 return;
bea3348e 2171 }
e6247027
ED
2172 get_kfree_skb_cb(skb)->reason = reason;
2173 local_irq_save(flags);
2174 skb->next = __this_cpu_read(softnet_data.completion_queue);
2175 __this_cpu_write(softnet_data.completion_queue, skb);
2176 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2177 local_irq_restore(flags);
56079431 2178}
e6247027 2179EXPORT_SYMBOL(__dev_kfree_skb_irq);
56079431 2180
e6247027 2181void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
56079431
DV
2182{
2183 if (in_irq() || irqs_disabled())
e6247027 2184 __dev_kfree_skb_irq(skb, reason);
56079431
DV
2185 else
2186 dev_kfree_skb(skb);
2187}
e6247027 2188EXPORT_SYMBOL(__dev_kfree_skb_any);
56079431
DV
2189
2190
bea3348e
SH
2191/**
2192 * netif_device_detach - mark device as removed
2193 * @dev: network device
2194 *
2195 * Mark device as removed from system and therefore no longer available.
2196 */
56079431
DV
2197void netif_device_detach(struct net_device *dev)
2198{
2199 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2200 netif_running(dev)) {
d543103a 2201 netif_tx_stop_all_queues(dev);
56079431
DV
2202 }
2203}
2204EXPORT_SYMBOL(netif_device_detach);
2205
bea3348e
SH
2206/**
2207 * netif_device_attach - mark device as attached
2208 * @dev: network device
2209 *
2210 * Mark device as attached from system and restart if needed.
2211 */
56079431
DV
2212void netif_device_attach(struct net_device *dev)
2213{
2214 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2215 netif_running(dev)) {
d543103a 2216 netif_tx_wake_all_queues(dev);
4ec93edb 2217 __netdev_watchdog_up(dev);
56079431
DV
2218 }
2219}
2220EXPORT_SYMBOL(netif_device_attach);
2221
36c92474
BH
2222static void skb_warn_bad_offload(const struct sk_buff *skb)
2223{
65e9d2fa 2224 static const netdev_features_t null_features = 0;
36c92474
BH
2225 struct net_device *dev = skb->dev;
2226 const char *driver = "";
2227
c846ad9b
BG
2228 if (!net_ratelimit())
2229 return;
2230
36c92474
BH
2231 if (dev && dev->dev.parent)
2232 driver = dev_driver_string(dev->dev.parent);
2233
2234 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2235 "gso_type=%d ip_summed=%d\n",
65e9d2fa
MM
2236 driver, dev ? &dev->features : &null_features,
2237 skb->sk ? &skb->sk->sk_route_caps : &null_features,
36c92474
BH
2238 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2239 skb_shinfo(skb)->gso_type, skb->ip_summed);
2240}
2241
1da177e4
LT
2242/*
2243 * Invalidate hardware checksum when packet is to be mangled, and
2244 * complete checksum manually on outgoing path.
2245 */
84fa7933 2246int skb_checksum_help(struct sk_buff *skb)
1da177e4 2247{
d3bc23e7 2248 __wsum csum;
663ead3b 2249 int ret = 0, offset;
1da177e4 2250
84fa7933 2251 if (skb->ip_summed == CHECKSUM_COMPLETE)
a430a43d
HX
2252 goto out_set_summed;
2253
2254 if (unlikely(skb_shinfo(skb)->gso_size)) {
36c92474
BH
2255 skb_warn_bad_offload(skb);
2256 return -EINVAL;
1da177e4
LT
2257 }
2258
cef401de
ED
2259 /* Before computing a checksum, we should make sure no frag could
2260 * be modified by an external entity : checksum could be wrong.
2261 */
2262 if (skb_has_shared_frag(skb)) {
2263 ret = __skb_linearize(skb);
2264 if (ret)
2265 goto out;
2266 }
2267
55508d60 2268 offset = skb_checksum_start_offset(skb);
a030847e
HX
2269 BUG_ON(offset >= skb_headlen(skb));
2270 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2271
2272 offset += skb->csum_offset;
2273 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2274
2275 if (skb_cloned(skb) &&
2276 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1da177e4
LT
2277 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2278 if (ret)
2279 goto out;
2280 }
2281
a030847e 2282 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
a430a43d 2283out_set_summed:
1da177e4 2284 skb->ip_summed = CHECKSUM_NONE;
4ec93edb 2285out:
1da177e4
LT
2286 return ret;
2287}
d1b19dff 2288EXPORT_SYMBOL(skb_checksum_help);
1da177e4 2289
53d6471c 2290__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
f6a78bfc 2291{
4b9b1cdf 2292 unsigned int vlan_depth = skb->mac_len;
252e3346 2293 __be16 type = skb->protocol;
f6a78bfc 2294
19acc327
PS
2295 /* Tunnel gso handlers can set protocol to ethernet. */
2296 if (type == htons(ETH_P_TEB)) {
2297 struct ethhdr *eth;
2298
2299 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2300 return 0;
2301
2302 eth = (struct ethhdr *)skb_mac_header(skb);
2303 type = eth->h_proto;
2304 }
2305
4b9b1cdf
NA
2306 /* if skb->protocol is 802.1Q/AD then the header should already be
2307 * present at mac_len - VLAN_HLEN (if mac_len > 0), or at
2308 * ETH_HLEN otherwise
2309 */
2310 if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2311 if (vlan_depth) {
2312 if (unlikely(WARN_ON(vlan_depth < VLAN_HLEN)))
2313 return 0;
2314 vlan_depth -= VLAN_HLEN;
2315 } else {
2316 vlan_depth = ETH_HLEN;
2317 }
2318 do {
2319 struct vlan_hdr *vh;
2320
2321 if (unlikely(!pskb_may_pull(skb,
2322 vlan_depth + VLAN_HLEN)))
2323 return 0;
2324
2325 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2326 type = vh->h_vlan_encapsulated_proto;
2327 vlan_depth += VLAN_HLEN;
2328 } while (type == htons(ETH_P_8021Q) ||
2329 type == htons(ETH_P_8021AD));
7b9c6090
JG
2330 }
2331
53d6471c
VY
2332 *depth = vlan_depth;
2333
ec5f0615
PS
2334 return type;
2335}
2336
2337/**
2338 * skb_mac_gso_segment - mac layer segmentation handler.
2339 * @skb: buffer to segment
2340 * @features: features for the output path (see dev->features)
2341 */
2342struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2343 netdev_features_t features)
2344{
2345 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2346 struct packet_offload *ptype;
53d6471c
VY
2347 int vlan_depth = skb->mac_len;
2348 __be16 type = skb_network_protocol(skb, &vlan_depth);
ec5f0615
PS
2349
2350 if (unlikely(!type))
2351 return ERR_PTR(-EINVAL);
2352
53d6471c 2353 __skb_pull(skb, vlan_depth);
f6a78bfc
HX
2354
2355 rcu_read_lock();
22061d80 2356 list_for_each_entry_rcu(ptype, &offload_base, list) {
f191a1d1 2357 if (ptype->type == type && ptype->callbacks.gso_segment) {
84fa7933 2358 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
05e8ef4a
PS
2359 int err;
2360
f191a1d1 2361 err = ptype->callbacks.gso_send_check(skb);
a430a43d
HX
2362 segs = ERR_PTR(err);
2363 if (err || skb_gso_ok(skb, features))
2364 break;
d56f90a7
ACM
2365 __skb_push(skb, (skb->data -
2366 skb_network_header(skb)));
a430a43d 2367 }
f191a1d1 2368 segs = ptype->callbacks.gso_segment(skb, features);
f6a78bfc
HX
2369 break;
2370 }
2371 }
2372 rcu_read_unlock();
2373
98e399f8 2374 __skb_push(skb, skb->data - skb_mac_header(skb));
576a30eb 2375
f6a78bfc
HX
2376 return segs;
2377}
05e8ef4a
PS
2378EXPORT_SYMBOL(skb_mac_gso_segment);
2379
2380
2381/* openvswitch calls this on rx path, so we need a different check.
2382 */
2383static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2384{
2385 if (tx_path)
2386 return skb->ip_summed != CHECKSUM_PARTIAL;
2387 else
2388 return skb->ip_summed == CHECKSUM_NONE;
2389}
2390
2391/**
2392 * __skb_gso_segment - Perform segmentation on skb.
2393 * @skb: buffer to segment
2394 * @features: features for the output path (see dev->features)
2395 * @tx_path: whether it is called in TX path
2396 *
2397 * This function segments the given skb and returns a list of segments.
2398 *
2399 * It may return NULL if the skb requires no segmentation. This is
2400 * only possible when GSO is used for verifying header integrity.
2401 */
2402struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2403 netdev_features_t features, bool tx_path)
2404{
2405 if (unlikely(skb_needs_check(skb, tx_path))) {
2406 int err;
2407
2408 skb_warn_bad_offload(skb);
2409
2410 if (skb_header_cloned(skb) &&
2411 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2412 return ERR_PTR(err);
2413 }
2414
68c33163 2415 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
3347c960
ED
2416 SKB_GSO_CB(skb)->encap_level = 0;
2417
05e8ef4a
PS
2418 skb_reset_mac_header(skb);
2419 skb_reset_mac_len(skb);
2420
2421 return skb_mac_gso_segment(skb, features);
2422}
12b0004d 2423EXPORT_SYMBOL(__skb_gso_segment);
f6a78bfc 2424
fb286bb2
HX
2425/* Take action when hardware reception checksum errors are detected. */
2426#ifdef CONFIG_BUG
2427void netdev_rx_csum_fault(struct net_device *dev)
2428{
2429 if (net_ratelimit()) {
7b6cd1ce 2430 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
fb286bb2
HX
2431 dump_stack();
2432 }
2433}
2434EXPORT_SYMBOL(netdev_rx_csum_fault);
2435#endif
2436
1da177e4
LT
2437/* Actually, we should eliminate this check as soon as we know, that:
2438 * 1. IOMMU is present and allows to map all the memory.
2439 * 2. No high memory really exists on this machine.
2440 */
2441
c1e756bf 2442static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1da177e4 2443{
3d3a8533 2444#ifdef CONFIG_HIGHMEM
1da177e4 2445 int i;
5acbbd42 2446 if (!(dev->features & NETIF_F_HIGHDMA)) {
ea2ab693
IC
2447 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2448 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2449 if (PageHighMem(skb_frag_page(frag)))
5acbbd42 2450 return 1;
ea2ab693 2451 }
5acbbd42 2452 }
1da177e4 2453
5acbbd42
FT
2454 if (PCI_DMA_BUS_IS_PHYS) {
2455 struct device *pdev = dev->dev.parent;
1da177e4 2456
9092c658
ED
2457 if (!pdev)
2458 return 0;
5acbbd42 2459 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
ea2ab693
IC
2460 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2461 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
5acbbd42
FT
2462 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2463 return 1;
2464 }
2465 }
3d3a8533 2466#endif
1da177e4
LT
2467 return 0;
2468}
1da177e4 2469
f6a78bfc
HX
2470struct dev_gso_cb {
2471 void (*destructor)(struct sk_buff *skb);
2472};
2473
2474#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2475
2476static void dev_gso_skb_destructor(struct sk_buff *skb)
2477{
2478 struct dev_gso_cb *cb;
2479
289dccbe
ED
2480 kfree_skb_list(skb->next);
2481 skb->next = NULL;
f6a78bfc
HX
2482
2483 cb = DEV_GSO_CB(skb);
2484 if (cb->destructor)
2485 cb->destructor(skb);
2486}
2487
2488/**
2489 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2490 * @skb: buffer to segment
91ecb63c 2491 * @features: device features as applicable to this skb
f6a78bfc
HX
2492 *
2493 * This function segments the given skb and stores the list of segments
2494 * in skb->next.
2495 */
c8f44aff 2496static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
f6a78bfc 2497{
f6a78bfc 2498 struct sk_buff *segs;
576a30eb
HX
2499
2500 segs = skb_gso_segment(skb, features);
2501
2502 /* Verifying header integrity only. */
2503 if (!segs)
2504 return 0;
f6a78bfc 2505
801678c5 2506 if (IS_ERR(segs))
f6a78bfc
HX
2507 return PTR_ERR(segs);
2508
2509 skb->next = segs;
2510 DEV_GSO_CB(skb)->destructor = skb->destructor;
2511 skb->destructor = dev_gso_skb_destructor;
2512
2513 return 0;
2514}
2515
3b392ddb
SH
2516/* If MPLS offload request, verify we are testing hardware MPLS features
2517 * instead of standard features for the netdev.
2518 */
2519#ifdef CONFIG_NET_MPLS_GSO
2520static netdev_features_t net_mpls_features(struct sk_buff *skb,
2521 netdev_features_t features,
2522 __be16 type)
2523{
2524 if (type == htons(ETH_P_MPLS_UC) || type == htons(ETH_P_MPLS_MC))
2525 features &= skb->dev->mpls_features;
2526
2527 return features;
2528}
2529#else
2530static netdev_features_t net_mpls_features(struct sk_buff *skb,
2531 netdev_features_t features,
2532 __be16 type)
2533{
2534 return features;
2535}
2536#endif
2537
c8f44aff 2538static netdev_features_t harmonize_features(struct sk_buff *skb,
c1e756bf 2539 netdev_features_t features)
f01a5236 2540{
53d6471c 2541 int tmp;
3b392ddb
SH
2542 __be16 type;
2543
2544 type = skb_network_protocol(skb, &tmp);
2545 features = net_mpls_features(skb, features, type);
53d6471c 2546
c0d680e5 2547 if (skb->ip_summed != CHECKSUM_NONE &&
3b392ddb 2548 !can_checksum_protocol(features, type)) {
f01a5236 2549 features &= ~NETIF_F_ALL_CSUM;
c1e756bf 2550 } else if (illegal_highdma(skb->dev, skb)) {
f01a5236
JG
2551 features &= ~NETIF_F_SG;
2552 }
2553
2554 return features;
2555}
2556
c1e756bf 2557netdev_features_t netif_skb_features(struct sk_buff *skb)
58e998c6
JG
2558{
2559 __be16 protocol = skb->protocol;
c1e756bf 2560 netdev_features_t features = skb->dev->features;
58e998c6 2561
c1e756bf 2562 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
30b678d8
BH
2563 features &= ~NETIF_F_GSO_MASK;
2564
8ad227ff 2565 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
58e998c6
JG
2566 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2567 protocol = veh->h_vlan_encapsulated_proto;
f01a5236 2568 } else if (!vlan_tx_tag_present(skb)) {
c1e756bf 2569 return harmonize_features(skb, features);
f01a5236 2570 }
58e998c6 2571
c1e756bf 2572 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
8ad227ff 2573 NETIF_F_HW_VLAN_STAG_TX);
f01a5236 2574
cdbaa0bb 2575 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
f01a5236 2576 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
8ad227ff
PM
2577 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2578 NETIF_F_HW_VLAN_STAG_TX;
cdbaa0bb 2579
c1e756bf 2580 return harmonize_features(skb, features);
58e998c6 2581}
c1e756bf 2582EXPORT_SYMBOL(netif_skb_features);
58e998c6 2583
fd2ea0a7 2584int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
f663dd9a 2585 struct netdev_queue *txq)
f6a78bfc 2586{
00829823 2587 const struct net_device_ops *ops = dev->netdev_ops;
572a9d7b 2588 int rc = NETDEV_TX_OK;
ec764bf0 2589 unsigned int skb_len;
00829823 2590
f6a78bfc 2591 if (likely(!skb->next)) {
c8f44aff 2592 netdev_features_t features;
fc741216 2593
93f154b5 2594 /*
25985edc 2595 * If device doesn't need skb->dst, release it right now while
93f154b5
ED
2596 * its hot in this cpu cache
2597 */
adf30907
ED
2598 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2599 skb_dst_drop(skb);
2600
fc741216
JG
2601 features = netif_skb_features(skb);
2602
7b9c6090 2603 if (vlan_tx_tag_present(skb) &&
86a9bad3
PM
2604 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2605 skb = __vlan_put_tag(skb, skb->vlan_proto,
2606 vlan_tx_tag_get(skb));
7b9c6090
JG
2607 if (unlikely(!skb))
2608 goto out;
2609
2610 skb->vlan_tci = 0;
2611 }
2612
fc70fb64
AD
2613 /* If encapsulation offload request, verify we are testing
2614 * hardware encapsulation features instead of standard
2615 * features for the netdev
2616 */
2617 if (skb->encapsulation)
2618 features &= dev->hw_enc_features;
2619
fc741216 2620 if (netif_needs_gso(skb, features)) {
91ecb63c 2621 if (unlikely(dev_gso_segment(skb, features)))
9ccb8975
DM
2622 goto out_kfree_skb;
2623 if (skb->next)
2624 goto gso;
6afff0ca 2625 } else {
02932ce9 2626 if (skb_needs_linearize(skb, features) &&
6afff0ca
JF
2627 __skb_linearize(skb))
2628 goto out_kfree_skb;
2629
2630 /* If packet is not checksummed and device does not
2631 * support checksumming for this protocol, complete
2632 * checksumming here.
2633 */
2634 if (skb->ip_summed == CHECKSUM_PARTIAL) {
fc70fb64
AD
2635 if (skb->encapsulation)
2636 skb_set_inner_transport_header(skb,
2637 skb_checksum_start_offset(skb));
2638 else
2639 skb_set_transport_header(skb,
2640 skb_checksum_start_offset(skb));
03634668 2641 if (!(features & NETIF_F_ALL_CSUM) &&
6afff0ca
JF
2642 skb_checksum_help(skb))
2643 goto out_kfree_skb;
2644 }
9ccb8975
DM
2645 }
2646
b40863c6
ED
2647 if (!list_empty(&ptype_all))
2648 dev_queue_xmit_nit(skb, dev);
2649
ec764bf0 2650 skb_len = skb->len;
d87d04a7 2651 trace_net_dev_start_xmit(skb, dev);
20567661 2652 rc = ops->ndo_start_xmit(skb, dev);
ec764bf0 2653 trace_net_dev_xmit(skb, rc, dev, skb_len);
f663dd9a 2654 if (rc == NETDEV_TX_OK)
08baf561 2655 txq_trans_update(txq);
ac45f602 2656 return rc;
f6a78bfc
HX
2657 }
2658
576a30eb 2659gso:
f6a78bfc
HX
2660 do {
2661 struct sk_buff *nskb = skb->next;
f6a78bfc
HX
2662
2663 skb->next = nskb->next;
2664 nskb->next = NULL;
068a2de5 2665
b40863c6
ED
2666 if (!list_empty(&ptype_all))
2667 dev_queue_xmit_nit(nskb, dev);
2668
ec764bf0 2669 skb_len = nskb->len;
d87d04a7 2670 trace_net_dev_start_xmit(nskb, dev);
f663dd9a 2671 rc = ops->ndo_start_xmit(nskb, dev);
ec764bf0 2672 trace_net_dev_xmit(nskb, rc, dev, skb_len);
ec634fe3 2673 if (unlikely(rc != NETDEV_TX_OK)) {
572a9d7b
PM
2674 if (rc & ~NETDEV_TX_MASK)
2675 goto out_kfree_gso_skb;
f54d9e8d 2676 nskb->next = skb->next;
f6a78bfc
HX
2677 skb->next = nskb;
2678 return rc;
2679 }
08baf561 2680 txq_trans_update(txq);
73466498 2681 if (unlikely(netif_xmit_stopped(txq) && skb->next))
f54d9e8d 2682 return NETDEV_TX_BUSY;
f6a78bfc 2683 } while (skb->next);
4ec93edb 2684
572a9d7b 2685out_kfree_gso_skb:
0c772159 2686 if (likely(skb->next == NULL)) {
572a9d7b 2687 skb->destructor = DEV_GSO_CB(skb)->destructor;
0c772159
SS
2688 consume_skb(skb);
2689 return rc;
2690 }
f6a78bfc
HX
2691out_kfree_skb:
2692 kfree_skb(skb);
7b9c6090 2693out:
572a9d7b 2694 return rc;
f6a78bfc 2695}
a6cc0cfa 2696EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
f6a78bfc 2697
1def9238
ED
2698static void qdisc_pkt_len_init(struct sk_buff *skb)
2699{
2700 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2701
2702 qdisc_skb_cb(skb)->pkt_len = skb->len;
2703
2704 /* To get more precise estimation of bytes sent on wire,
2705 * we add to pkt_len the headers size of all segments
2706 */
2707 if (shinfo->gso_size) {
757b8b1d 2708 unsigned int hdr_len;
15e5a030 2709 u16 gso_segs = shinfo->gso_segs;
1def9238 2710
757b8b1d
ED
2711 /* mac layer + network layer */
2712 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2713
2714 /* + transport layer */
1def9238
ED
2715 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2716 hdr_len += tcp_hdrlen(skb);
2717 else
2718 hdr_len += sizeof(struct udphdr);
15e5a030
JW
2719
2720 if (shinfo->gso_type & SKB_GSO_DODGY)
2721 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2722 shinfo->gso_size);
2723
2724 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
1def9238
ED
2725 }
2726}
2727
bbd8a0d3
KK
2728static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2729 struct net_device *dev,
2730 struct netdev_queue *txq)
2731{
2732 spinlock_t *root_lock = qdisc_lock(q);
a2da570d 2733 bool contended;
bbd8a0d3
KK
2734 int rc;
2735
1def9238 2736 qdisc_pkt_len_init(skb);
a2da570d 2737 qdisc_calculate_pkt_len(skb, q);
79640a4c
ED
2738 /*
2739 * Heuristic to force contended enqueues to serialize on a
2740 * separate lock before trying to get qdisc main lock.
2741 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2742 * and dequeue packets faster.
2743 */
a2da570d 2744 contended = qdisc_is_running(q);
79640a4c
ED
2745 if (unlikely(contended))
2746 spin_lock(&q->busylock);
2747
bbd8a0d3
KK
2748 spin_lock(root_lock);
2749 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2750 kfree_skb(skb);
2751 rc = NET_XMIT_DROP;
2752 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
bc135b23 2753 qdisc_run_begin(q)) {
bbd8a0d3
KK
2754 /*
2755 * This is a work-conserving queue; there are no old skbs
2756 * waiting to be sent out; and the qdisc is not running -
2757 * xmit the skb directly.
2758 */
7fee226a
ED
2759 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2760 skb_dst_force(skb);
bfe0d029 2761
bfe0d029
ED
2762 qdisc_bstats_update(q, skb);
2763
79640a4c
ED
2764 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2765 if (unlikely(contended)) {
2766 spin_unlock(&q->busylock);
2767 contended = false;
2768 }
bbd8a0d3 2769 __qdisc_run(q);
79640a4c 2770 } else
bc135b23 2771 qdisc_run_end(q);
bbd8a0d3
KK
2772
2773 rc = NET_XMIT_SUCCESS;
2774 } else {
7fee226a 2775 skb_dst_force(skb);
a2da570d 2776 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
79640a4c
ED
2777 if (qdisc_run_begin(q)) {
2778 if (unlikely(contended)) {
2779 spin_unlock(&q->busylock);
2780 contended = false;
2781 }
2782 __qdisc_run(q);
2783 }
bbd8a0d3
KK
2784 }
2785 spin_unlock(root_lock);
79640a4c
ED
2786 if (unlikely(contended))
2787 spin_unlock(&q->busylock);
bbd8a0d3
KK
2788 return rc;
2789}
2790
86f8515f 2791#if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
5bc1421e
NH
2792static void skb_update_prio(struct sk_buff *skb)
2793{
6977a79d 2794 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
5bc1421e 2795
91c68ce2
ED
2796 if (!skb->priority && skb->sk && map) {
2797 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2798
2799 if (prioidx < map->priomap_len)
2800 skb->priority = map->priomap[prioidx];
2801 }
5bc1421e
NH
2802}
2803#else
2804#define skb_update_prio(skb)
2805#endif
2806
745e20f1 2807static DEFINE_PER_CPU(int, xmit_recursion);
11a766ce 2808#define RECURSION_LIMIT 10
745e20f1 2809
95603e22
MM
2810/**
2811 * dev_loopback_xmit - loop back @skb
2812 * @skb: buffer to transmit
2813 */
2814int dev_loopback_xmit(struct sk_buff *skb)
2815{
2816 skb_reset_mac_header(skb);
2817 __skb_pull(skb, skb_network_offset(skb));
2818 skb->pkt_type = PACKET_LOOPBACK;
2819 skb->ip_summed = CHECKSUM_UNNECESSARY;
2820 WARN_ON(!skb_dst(skb));
2821 skb_dst_force(skb);
2822 netif_rx_ni(skb);
2823 return 0;
2824}
2825EXPORT_SYMBOL(dev_loopback_xmit);
2826
d29f749e 2827/**
9d08dd3d 2828 * __dev_queue_xmit - transmit a buffer
d29f749e 2829 * @skb: buffer to transmit
9d08dd3d 2830 * @accel_priv: private data used for L2 forwarding offload
d29f749e
DJ
2831 *
2832 * Queue a buffer for transmission to a network device. The caller must
2833 * have set the device and priority and built the buffer before calling
2834 * this function. The function can be called from an interrupt.
2835 *
2836 * A negative errno code is returned on a failure. A success does not
2837 * guarantee the frame will be transmitted as it may be dropped due
2838 * to congestion or traffic shaping.
2839 *
2840 * -----------------------------------------------------------------------------------
2841 * I notice this method can also return errors from the queue disciplines,
2842 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2843 * be positive.
2844 *
2845 * Regardless of the return value, the skb is consumed, so it is currently
2846 * difficult to retry a send to this method. (You can bump the ref count
2847 * before sending to hold a reference for retry if you are careful.)
2848 *
2849 * When calling this method, interrupts MUST be enabled. This is because
2850 * the BH enable code must have IRQs enabled so that it will not deadlock.
2851 * --BLG
2852 */
0a59f3a9 2853static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
1da177e4
LT
2854{
2855 struct net_device *dev = skb->dev;
dc2b4847 2856 struct netdev_queue *txq;
1da177e4
LT
2857 struct Qdisc *q;
2858 int rc = -ENOMEM;
2859
6d1ccff6
ED
2860 skb_reset_mac_header(skb);
2861
4ec93edb
YH
2862 /* Disable soft irqs for various locks below. Also
2863 * stops preemption for RCU.
1da177e4 2864 */
4ec93edb 2865 rcu_read_lock_bh();
1da177e4 2866
5bc1421e
NH
2867 skb_update_prio(skb);
2868
f663dd9a 2869 txq = netdev_pick_tx(dev, skb, accel_priv);
a898def2 2870 q = rcu_dereference_bh(txq->qdisc);
37437bb2 2871
1da177e4 2872#ifdef CONFIG_NET_CLS_ACT
d1b19dff 2873 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
1da177e4 2874#endif
cf66ba58 2875 trace_net_dev_queue(skb);
1da177e4 2876 if (q->enqueue) {
bbd8a0d3 2877 rc = __dev_xmit_skb(skb, q, dev, txq);
37437bb2 2878 goto out;
1da177e4
LT
2879 }
2880
2881 /* The device has no queue. Common case for software devices:
2882 loopback, all the sorts of tunnels...
2883
932ff279
HX
2884 Really, it is unlikely that netif_tx_lock protection is necessary
2885 here. (f.e. loopback and IP tunnels are clean ignoring statistics
1da177e4
LT
2886 counters.)
2887 However, it is possible, that they rely on protection
2888 made by us here.
2889
2890 Check this and shot the lock. It is not prone from deadlocks.
2891 Either shot noqueue qdisc, it is even simpler 8)
2892 */
2893 if (dev->flags & IFF_UP) {
2894 int cpu = smp_processor_id(); /* ok because BHs are off */
2895
c773e847 2896 if (txq->xmit_lock_owner != cpu) {
1da177e4 2897
745e20f1
ED
2898 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2899 goto recursion_alert;
2900
c773e847 2901 HARD_TX_LOCK(dev, txq, cpu);
1da177e4 2902
73466498 2903 if (!netif_xmit_stopped(txq)) {
745e20f1 2904 __this_cpu_inc(xmit_recursion);
f663dd9a 2905 rc = dev_hard_start_xmit(skb, dev, txq);
745e20f1 2906 __this_cpu_dec(xmit_recursion);
572a9d7b 2907 if (dev_xmit_complete(rc)) {
c773e847 2908 HARD_TX_UNLOCK(dev, txq);
1da177e4
LT
2909 goto out;
2910 }
2911 }
c773e847 2912 HARD_TX_UNLOCK(dev, txq);
e87cc472
JP
2913 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2914 dev->name);
1da177e4
LT
2915 } else {
2916 /* Recursion is detected! It is possible,
745e20f1
ED
2917 * unfortunately
2918 */
2919recursion_alert:
e87cc472
JP
2920 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2921 dev->name);
1da177e4
LT
2922 }
2923 }
2924
2925 rc = -ENETDOWN;
d4828d85 2926 rcu_read_unlock_bh();
1da177e4 2927
015f0688 2928 atomic_long_inc(&dev->tx_dropped);
1da177e4
LT
2929 kfree_skb(skb);
2930 return rc;
2931out:
d4828d85 2932 rcu_read_unlock_bh();
1da177e4
LT
2933 return rc;
2934}
f663dd9a
JW
2935
2936int dev_queue_xmit(struct sk_buff *skb)
2937{
2938 return __dev_queue_xmit(skb, NULL);
2939}
d1b19dff 2940EXPORT_SYMBOL(dev_queue_xmit);
1da177e4 2941
f663dd9a
JW
2942int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
2943{
2944 return __dev_queue_xmit(skb, accel_priv);
2945}
2946EXPORT_SYMBOL(dev_queue_xmit_accel);
2947
1da177e4
LT
2948
2949/*=======================================================================
2950 Receiver routines
2951 =======================================================================*/
2952
6b2bedc3 2953int netdev_max_backlog __read_mostly = 1000;
c9e6bc64
ED
2954EXPORT_SYMBOL(netdev_max_backlog);
2955
3b098e2d 2956int netdev_tstamp_prequeue __read_mostly = 1;
6b2bedc3
SH
2957int netdev_budget __read_mostly = 300;
2958int weight_p __read_mostly = 64; /* old backlog weight */
1da177e4 2959
eecfd7c4
ED
2960/* Called with irq disabled */
2961static inline void ____napi_schedule(struct softnet_data *sd,
2962 struct napi_struct *napi)
2963{
2964 list_add_tail(&napi->poll_list, &sd->poll_list);
2965 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2966}
2967
bfb564e7
KK
2968#ifdef CONFIG_RPS
2969
2970/* One global table that all flow-based protocols share. */
6e3f7faf 2971struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
bfb564e7
KK
2972EXPORT_SYMBOL(rps_sock_flow_table);
2973
c5905afb 2974struct static_key rps_needed __read_mostly;
adc9300e 2975
c445477d
BH
2976static struct rps_dev_flow *
2977set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2978 struct rps_dev_flow *rflow, u16 next_cpu)
2979{
09994d1b 2980 if (next_cpu != RPS_NO_CPU) {
c445477d
BH
2981#ifdef CONFIG_RFS_ACCEL
2982 struct netdev_rx_queue *rxqueue;
2983 struct rps_dev_flow_table *flow_table;
2984 struct rps_dev_flow *old_rflow;
2985 u32 flow_id;
2986 u16 rxq_index;
2987 int rc;
2988
2989 /* Should we steer this flow to a different hardware queue? */
69a19ee6
BH
2990 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2991 !(dev->features & NETIF_F_NTUPLE))
c445477d
BH
2992 goto out;
2993 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2994 if (rxq_index == skb_get_rx_queue(skb))
2995 goto out;
2996
2997 rxqueue = dev->_rx + rxq_index;
2998 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2999 if (!flow_table)
3000 goto out;
61b905da 3001 flow_id = skb_get_hash(skb) & flow_table->mask;
c445477d
BH
3002 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3003 rxq_index, flow_id);
3004 if (rc < 0)
3005 goto out;
3006 old_rflow = rflow;
3007 rflow = &flow_table->flows[flow_id];
c445477d
BH
3008 rflow->filter = rc;
3009 if (old_rflow->filter == rflow->filter)
3010 old_rflow->filter = RPS_NO_FILTER;
3011 out:
3012#endif
3013 rflow->last_qtail =
09994d1b 3014 per_cpu(softnet_data, next_cpu).input_queue_head;
c445477d
BH
3015 }
3016
09994d1b 3017 rflow->cpu = next_cpu;
c445477d
BH
3018 return rflow;
3019}
3020
bfb564e7
KK
3021/*
3022 * get_rps_cpu is called from netif_receive_skb and returns the target
3023 * CPU from the RPS map of the receiving queue for a given skb.
3024 * rcu_read_lock must be held on entry.
3025 */
3026static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3027 struct rps_dev_flow **rflowp)
3028{
3029 struct netdev_rx_queue *rxqueue;
6e3f7faf 3030 struct rps_map *map;
bfb564e7
KK
3031 struct rps_dev_flow_table *flow_table;
3032 struct rps_sock_flow_table *sock_flow_table;
3033 int cpu = -1;
3034 u16 tcpu;
61b905da 3035 u32 hash;
bfb564e7
KK
3036
3037 if (skb_rx_queue_recorded(skb)) {
3038 u16 index = skb_get_rx_queue(skb);
62fe0b40
BH
3039 if (unlikely(index >= dev->real_num_rx_queues)) {
3040 WARN_ONCE(dev->real_num_rx_queues > 1,
3041 "%s received packet on queue %u, but number "
3042 "of RX queues is %u\n",
3043 dev->name, index, dev->real_num_rx_queues);
bfb564e7
KK
3044 goto done;
3045 }
3046 rxqueue = dev->_rx + index;
3047 } else
3048 rxqueue = dev->_rx;
3049
6e3f7faf
ED
3050 map = rcu_dereference(rxqueue->rps_map);
3051 if (map) {
85875236 3052 if (map->len == 1 &&
33d480ce 3053 !rcu_access_pointer(rxqueue->rps_flow_table)) {
6febfca9
CG
3054 tcpu = map->cpus[0];
3055 if (cpu_online(tcpu))
3056 cpu = tcpu;
3057 goto done;
3058 }
33d480ce 3059 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
bfb564e7 3060 goto done;
6febfca9 3061 }
bfb564e7 3062
2d47b459 3063 skb_reset_network_header(skb);
61b905da
TH
3064 hash = skb_get_hash(skb);
3065 if (!hash)
bfb564e7
KK
3066 goto done;
3067
fec5e652
TH
3068 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3069 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3070 if (flow_table && sock_flow_table) {
3071 u16 next_cpu;
3072 struct rps_dev_flow *rflow;
3073
61b905da 3074 rflow = &flow_table->flows[hash & flow_table->mask];
fec5e652
TH
3075 tcpu = rflow->cpu;
3076
61b905da 3077 next_cpu = sock_flow_table->ents[hash & sock_flow_table->mask];
fec5e652
TH
3078
3079 /*
3080 * If the desired CPU (where last recvmsg was done) is
3081 * different from current CPU (one in the rx-queue flow
3082 * table entry), switch if one of the following holds:
3083 * - Current CPU is unset (equal to RPS_NO_CPU).
3084 * - Current CPU is offline.
3085 * - The current CPU's queue tail has advanced beyond the
3086 * last packet that was enqueued using this table entry.
3087 * This guarantees that all previous packets for the flow
3088 * have been dequeued, thus preserving in order delivery.
3089 */
3090 if (unlikely(tcpu != next_cpu) &&
3091 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3092 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
baefa31d
TH
3093 rflow->last_qtail)) >= 0)) {
3094 tcpu = next_cpu;
c445477d 3095 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
baefa31d 3096 }
c445477d 3097
fec5e652
TH
3098 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3099 *rflowp = rflow;
3100 cpu = tcpu;
3101 goto done;
3102 }
3103 }
3104
0a9627f2 3105 if (map) {
61b905da 3106 tcpu = map->cpus[((u64) hash * map->len) >> 32];
0a9627f2
TH
3107
3108 if (cpu_online(tcpu)) {
3109 cpu = tcpu;
3110 goto done;
3111 }
3112 }
3113
3114done:
0a9627f2
TH
3115 return cpu;
3116}
3117
c445477d
BH
3118#ifdef CONFIG_RFS_ACCEL
3119
3120/**
3121 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3122 * @dev: Device on which the filter was set
3123 * @rxq_index: RX queue index
3124 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3125 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3126 *
3127 * Drivers that implement ndo_rx_flow_steer() should periodically call
3128 * this function for each installed filter and remove the filters for
3129 * which it returns %true.
3130 */
3131bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3132 u32 flow_id, u16 filter_id)
3133{
3134 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3135 struct rps_dev_flow_table *flow_table;
3136 struct rps_dev_flow *rflow;
3137 bool expire = true;
3138 int cpu;
3139
3140 rcu_read_lock();
3141 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3142 if (flow_table && flow_id <= flow_table->mask) {
3143 rflow = &flow_table->flows[flow_id];
3144 cpu = ACCESS_ONCE(rflow->cpu);
3145 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3146 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3147 rflow->last_qtail) <
3148 (int)(10 * flow_table->mask)))
3149 expire = false;
3150 }
3151 rcu_read_unlock();
3152 return expire;
3153}
3154EXPORT_SYMBOL(rps_may_expire_flow);
3155
3156#endif /* CONFIG_RFS_ACCEL */
3157
0a9627f2 3158/* Called from hardirq (IPI) context */
e36fa2f7 3159static void rps_trigger_softirq(void *data)
0a9627f2 3160{
e36fa2f7
ED
3161 struct softnet_data *sd = data;
3162
eecfd7c4 3163 ____napi_schedule(sd, &sd->backlog);
dee42870 3164 sd->received_rps++;
0a9627f2 3165}
e36fa2f7 3166
fec5e652 3167#endif /* CONFIG_RPS */
0a9627f2 3168
e36fa2f7
ED
3169/*
3170 * Check if this softnet_data structure is another cpu one
3171 * If yes, queue it to our IPI list and return 1
3172 * If no, return 0
3173 */
3174static int rps_ipi_queued(struct softnet_data *sd)
3175{
3176#ifdef CONFIG_RPS
3177 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3178
3179 if (sd != mysd) {
3180 sd->rps_ipi_next = mysd->rps_ipi_list;
3181 mysd->rps_ipi_list = sd;
3182
3183 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3184 return 1;
3185 }
3186#endif /* CONFIG_RPS */
3187 return 0;
3188}
3189
99bbc707
WB
3190#ifdef CONFIG_NET_FLOW_LIMIT
3191int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3192#endif
3193
3194static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3195{
3196#ifdef CONFIG_NET_FLOW_LIMIT
3197 struct sd_flow_limit *fl;
3198 struct softnet_data *sd;
3199 unsigned int old_flow, new_flow;
3200
3201 if (qlen < (netdev_max_backlog >> 1))
3202 return false;
3203
3204 sd = &__get_cpu_var(softnet_data);
3205
3206 rcu_read_lock();
3207 fl = rcu_dereference(sd->flow_limit);
3208 if (fl) {
3958afa1 3209 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
99bbc707
WB
3210 old_flow = fl->history[fl->history_head];
3211 fl->history[fl->history_head] = new_flow;
3212
3213 fl->history_head++;
3214 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3215
3216 if (likely(fl->buckets[old_flow]))
3217 fl->buckets[old_flow]--;
3218
3219 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3220 fl->count++;
3221 rcu_read_unlock();
3222 return true;
3223 }
3224 }
3225 rcu_read_unlock();
3226#endif
3227 return false;
3228}
3229
0a9627f2
TH
3230/*
3231 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3232 * queue (may be a remote CPU queue).
3233 */
fec5e652
TH
3234static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3235 unsigned int *qtail)
0a9627f2 3236{
e36fa2f7 3237 struct softnet_data *sd;
0a9627f2 3238 unsigned long flags;
99bbc707 3239 unsigned int qlen;
0a9627f2 3240
e36fa2f7 3241 sd = &per_cpu(softnet_data, cpu);
0a9627f2
TH
3242
3243 local_irq_save(flags);
0a9627f2 3244
e36fa2f7 3245 rps_lock(sd);
99bbc707
WB
3246 qlen = skb_queue_len(&sd->input_pkt_queue);
3247 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
6e7676c1 3248 if (skb_queue_len(&sd->input_pkt_queue)) {
0a9627f2 3249enqueue:
e36fa2f7 3250 __skb_queue_tail(&sd->input_pkt_queue, skb);
76cc8b13 3251 input_queue_tail_incr_save(sd, qtail);
e36fa2f7 3252 rps_unlock(sd);
152102c7 3253 local_irq_restore(flags);
0a9627f2
TH
3254 return NET_RX_SUCCESS;
3255 }
3256
ebda37c2
ED
3257 /* Schedule NAPI for backlog device
3258 * We can use non atomic operation since we own the queue lock
3259 */
3260 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
e36fa2f7 3261 if (!rps_ipi_queued(sd))
eecfd7c4 3262 ____napi_schedule(sd, &sd->backlog);
0a9627f2
TH
3263 }
3264 goto enqueue;
3265 }
3266
dee42870 3267 sd->dropped++;
e36fa2f7 3268 rps_unlock(sd);
0a9627f2 3269
0a9627f2
TH
3270 local_irq_restore(flags);
3271
caf586e5 3272 atomic_long_inc(&skb->dev->rx_dropped);
0a9627f2
TH
3273 kfree_skb(skb);
3274 return NET_RX_DROP;
3275}
1da177e4 3276
ae78dbfa 3277static int netif_rx_internal(struct sk_buff *skb)
1da177e4 3278{
b0e28f1e 3279 int ret;
1da177e4 3280
588f0330 3281 net_timestamp_check(netdev_tstamp_prequeue, skb);
1da177e4 3282
cf66ba58 3283 trace_netif_rx(skb);
df334545 3284#ifdef CONFIG_RPS
c5905afb 3285 if (static_key_false(&rps_needed)) {
fec5e652 3286 struct rps_dev_flow voidflow, *rflow = &voidflow;
b0e28f1e
ED
3287 int cpu;
3288
cece1945 3289 preempt_disable();
b0e28f1e 3290 rcu_read_lock();
fec5e652
TH
3291
3292 cpu = get_rps_cpu(skb->dev, skb, &rflow);
b0e28f1e
ED
3293 if (cpu < 0)
3294 cpu = smp_processor_id();
fec5e652
TH
3295
3296 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3297
b0e28f1e 3298 rcu_read_unlock();
cece1945 3299 preempt_enable();
adc9300e
ED
3300 } else
3301#endif
fec5e652
TH
3302 {
3303 unsigned int qtail;
3304 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3305 put_cpu();
3306 }
b0e28f1e 3307 return ret;
1da177e4 3308}
ae78dbfa
BH
3309
3310/**
3311 * netif_rx - post buffer to the network code
3312 * @skb: buffer to post
3313 *
3314 * This function receives a packet from a device driver and queues it for
3315 * the upper (protocol) levels to process. It always succeeds. The buffer
3316 * may be dropped during processing for congestion control or by the
3317 * protocol layers.
3318 *
3319 * return values:
3320 * NET_RX_SUCCESS (no congestion)
3321 * NET_RX_DROP (packet was dropped)
3322 *
3323 */
3324
3325int netif_rx(struct sk_buff *skb)
3326{
3327 trace_netif_rx_entry(skb);
3328
3329 return netif_rx_internal(skb);
3330}
d1b19dff 3331EXPORT_SYMBOL(netif_rx);
1da177e4
LT
3332
3333int netif_rx_ni(struct sk_buff *skb)
3334{
3335 int err;
3336
ae78dbfa
BH
3337 trace_netif_rx_ni_entry(skb);
3338
1da177e4 3339 preempt_disable();
ae78dbfa 3340 err = netif_rx_internal(skb);
1da177e4
LT
3341 if (local_softirq_pending())
3342 do_softirq();
3343 preempt_enable();
3344
3345 return err;
3346}
1da177e4
LT
3347EXPORT_SYMBOL(netif_rx_ni);
3348
1da177e4
LT
3349static void net_tx_action(struct softirq_action *h)
3350{
3351 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3352
3353 if (sd->completion_queue) {
3354 struct sk_buff *clist;
3355
3356 local_irq_disable();
3357 clist = sd->completion_queue;
3358 sd->completion_queue = NULL;
3359 local_irq_enable();
3360
3361 while (clist) {
3362 struct sk_buff *skb = clist;
3363 clist = clist->next;
3364
547b792c 3365 WARN_ON(atomic_read(&skb->users));
e6247027
ED
3366 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3367 trace_consume_skb(skb);
3368 else
3369 trace_kfree_skb(skb, net_tx_action);
1da177e4
LT
3370 __kfree_skb(skb);
3371 }
3372 }
3373
3374 if (sd->output_queue) {
37437bb2 3375 struct Qdisc *head;
1da177e4
LT
3376
3377 local_irq_disable();
3378 head = sd->output_queue;
3379 sd->output_queue = NULL;
a9cbd588 3380 sd->output_queue_tailp = &sd->output_queue;
1da177e4
LT
3381 local_irq_enable();
3382
3383 while (head) {
37437bb2
DM
3384 struct Qdisc *q = head;
3385 spinlock_t *root_lock;
3386
1da177e4
LT
3387 head = head->next_sched;
3388
5fb66229 3389 root_lock = qdisc_lock(q);
37437bb2 3390 if (spin_trylock(root_lock)) {
4e857c58 3391 smp_mb__before_atomic();
def82a1d
JP
3392 clear_bit(__QDISC_STATE_SCHED,
3393 &q->state);
37437bb2
DM
3394 qdisc_run(q);
3395 spin_unlock(root_lock);
1da177e4 3396 } else {
195648bb 3397 if (!test_bit(__QDISC_STATE_DEACTIVATED,
e8a83e10 3398 &q->state)) {
195648bb 3399 __netif_reschedule(q);
e8a83e10 3400 } else {
4e857c58 3401 smp_mb__before_atomic();
e8a83e10
JP
3402 clear_bit(__QDISC_STATE_SCHED,
3403 &q->state);
3404 }
1da177e4
LT
3405 }
3406 }
3407 }
3408}
3409
ab95bfe0
JP
3410#if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3411 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
da678292
MM
3412/* This hook is defined here for ATM LANE */
3413int (*br_fdb_test_addr_hook)(struct net_device *dev,
3414 unsigned char *addr) __read_mostly;
4fb019a0 3415EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
da678292 3416#endif
1da177e4 3417
1da177e4
LT
3418#ifdef CONFIG_NET_CLS_ACT
3419/* TODO: Maybe we should just force sch_ingress to be compiled in
3420 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3421 * a compare and 2 stores extra right now if we dont have it on
3422 * but have CONFIG_NET_CLS_ACT
25985edc
LDM
3423 * NOTE: This doesn't stop any functionality; if you dont have
3424 * the ingress scheduler, you just can't add policies on ingress.
1da177e4
LT
3425 *
3426 */
24824a09 3427static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
1da177e4 3428{
1da177e4 3429 struct net_device *dev = skb->dev;
f697c3e8 3430 u32 ttl = G_TC_RTTL(skb->tc_verd);
555353cf
DM
3431 int result = TC_ACT_OK;
3432 struct Qdisc *q;
4ec93edb 3433
de384830 3434 if (unlikely(MAX_RED_LOOP < ttl++)) {
e87cc472
JP
3435 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3436 skb->skb_iif, dev->ifindex);
f697c3e8
HX
3437 return TC_ACT_SHOT;
3438 }
1da177e4 3439
f697c3e8
HX
3440 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3441 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
1da177e4 3442
83874000 3443 q = rxq->qdisc;
8d50b53d 3444 if (q != &noop_qdisc) {
83874000 3445 spin_lock(qdisc_lock(q));
a9312ae8
DM
3446 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3447 result = qdisc_enqueue_root(skb, q);
83874000
DM
3448 spin_unlock(qdisc_lock(q));
3449 }
f697c3e8
HX
3450
3451 return result;
3452}
86e65da9 3453
f697c3e8
HX
3454static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3455 struct packet_type **pt_prev,
3456 int *ret, struct net_device *orig_dev)
3457{
24824a09
ED
3458 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3459
3460 if (!rxq || rxq->qdisc == &noop_qdisc)
f697c3e8 3461 goto out;
1da177e4 3462
f697c3e8
HX
3463 if (*pt_prev) {
3464 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3465 *pt_prev = NULL;
1da177e4
LT
3466 }
3467
24824a09 3468 switch (ing_filter(skb, rxq)) {
f697c3e8
HX
3469 case TC_ACT_SHOT:
3470 case TC_ACT_STOLEN:
3471 kfree_skb(skb);
3472 return NULL;
3473 }
3474
3475out:
3476 skb->tc_verd = 0;
3477 return skb;
1da177e4
LT
3478}
3479#endif
3480
ab95bfe0
JP
3481/**
3482 * netdev_rx_handler_register - register receive handler
3483 * @dev: device to register a handler for
3484 * @rx_handler: receive handler to register
93e2c32b 3485 * @rx_handler_data: data pointer that is used by rx handler
ab95bfe0 3486 *
e227867f 3487 * Register a receive handler for a device. This handler will then be
ab95bfe0
JP
3488 * called from __netif_receive_skb. A negative errno code is returned
3489 * on a failure.
3490 *
3491 * The caller must hold the rtnl_mutex.
8a4eb573
JP
3492 *
3493 * For a general description of rx_handler, see enum rx_handler_result.
ab95bfe0
JP
3494 */
3495int netdev_rx_handler_register(struct net_device *dev,
93e2c32b
JP
3496 rx_handler_func_t *rx_handler,
3497 void *rx_handler_data)
ab95bfe0
JP
3498{
3499 ASSERT_RTNL();
3500
3501 if (dev->rx_handler)
3502 return -EBUSY;
3503
00cfec37 3504 /* Note: rx_handler_data must be set before rx_handler */
93e2c32b 3505 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
ab95bfe0
JP
3506 rcu_assign_pointer(dev->rx_handler, rx_handler);
3507
3508 return 0;
3509}
3510EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3511
3512/**
3513 * netdev_rx_handler_unregister - unregister receive handler
3514 * @dev: device to unregister a handler from
3515 *
166ec369 3516 * Unregister a receive handler from a device.
ab95bfe0
JP
3517 *
3518 * The caller must hold the rtnl_mutex.
3519 */
3520void netdev_rx_handler_unregister(struct net_device *dev)
3521{
3522
3523 ASSERT_RTNL();
a9b3cd7f 3524 RCU_INIT_POINTER(dev->rx_handler, NULL);
00cfec37
ED
3525 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3526 * section has a guarantee to see a non NULL rx_handler_data
3527 * as well.
3528 */
3529 synchronize_net();
a9b3cd7f 3530 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
ab95bfe0
JP
3531}
3532EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3533
b4b9e355
MG
3534/*
3535 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3536 * the special handling of PFMEMALLOC skbs.
3537 */
3538static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3539{
3540 switch (skb->protocol) {
2b8837ae
JP
3541 case htons(ETH_P_ARP):
3542 case htons(ETH_P_IP):
3543 case htons(ETH_P_IPV6):
3544 case htons(ETH_P_8021Q):
3545 case htons(ETH_P_8021AD):
b4b9e355
MG
3546 return true;
3547 default:
3548 return false;
3549 }
3550}
3551
9754e293 3552static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
1da177e4
LT
3553{
3554 struct packet_type *ptype, *pt_prev;
ab95bfe0 3555 rx_handler_func_t *rx_handler;
f2ccd8fa 3556 struct net_device *orig_dev;
63d8ea7f 3557 struct net_device *null_or_dev;
8a4eb573 3558 bool deliver_exact = false;
1da177e4 3559 int ret = NET_RX_DROP;
252e3346 3560 __be16 type;
1da177e4 3561
588f0330 3562 net_timestamp_check(!netdev_tstamp_prequeue, skb);
81bbb3d4 3563
cf66ba58 3564 trace_netif_receive_skb(skb);
9b22ea56 3565
cc9bd5ce 3566 orig_dev = skb->dev;
8f903c70 3567
c1d2bbe1 3568 skb_reset_network_header(skb);
fda55eca
ED
3569 if (!skb_transport_header_was_set(skb))
3570 skb_reset_transport_header(skb);
0b5c9db1 3571 skb_reset_mac_len(skb);
1da177e4
LT
3572
3573 pt_prev = NULL;
3574
3575 rcu_read_lock();
3576
63d8ea7f 3577another_round:
b6858177 3578 skb->skb_iif = skb->dev->ifindex;
63d8ea7f
DM
3579
3580 __this_cpu_inc(softnet_data.processed);
3581
8ad227ff
PM
3582 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3583 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
bcc6d479
JP
3584 skb = vlan_untag(skb);
3585 if (unlikely(!skb))
b4b9e355 3586 goto unlock;
bcc6d479
JP
3587 }
3588
1da177e4
LT
3589#ifdef CONFIG_NET_CLS_ACT
3590 if (skb->tc_verd & TC_NCLS) {
3591 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3592 goto ncls;
3593 }
3594#endif
3595
9754e293 3596 if (pfmemalloc)
b4b9e355
MG
3597 goto skip_taps;
3598
1da177e4 3599 list_for_each_entry_rcu(ptype, &ptype_all, list) {
63d8ea7f 3600 if (!ptype->dev || ptype->dev == skb->dev) {
4ec93edb 3601 if (pt_prev)
f2ccd8fa 3602 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
3603 pt_prev = ptype;
3604 }
3605 }
3606
b4b9e355 3607skip_taps:
1da177e4 3608#ifdef CONFIG_NET_CLS_ACT
f697c3e8
HX
3609 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3610 if (!skb)
b4b9e355 3611 goto unlock;
1da177e4
LT
3612ncls:
3613#endif
3614
9754e293 3615 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
b4b9e355
MG
3616 goto drop;
3617
2425717b
JF
3618 if (vlan_tx_tag_present(skb)) {
3619 if (pt_prev) {
3620 ret = deliver_skb(skb, pt_prev, orig_dev);
3621 pt_prev = NULL;
3622 }
48cc32d3 3623 if (vlan_do_receive(&skb))
2425717b
JF
3624 goto another_round;
3625 else if (unlikely(!skb))
b4b9e355 3626 goto unlock;
2425717b
JF
3627 }
3628
48cc32d3 3629 rx_handler = rcu_dereference(skb->dev->rx_handler);
ab95bfe0
JP
3630 if (rx_handler) {
3631 if (pt_prev) {
3632 ret = deliver_skb(skb, pt_prev, orig_dev);
3633 pt_prev = NULL;
3634 }
8a4eb573
JP
3635 switch (rx_handler(&skb)) {
3636 case RX_HANDLER_CONSUMED:
3bc1b1ad 3637 ret = NET_RX_SUCCESS;
b4b9e355 3638 goto unlock;
8a4eb573 3639 case RX_HANDLER_ANOTHER:
63d8ea7f 3640 goto another_round;
8a4eb573
JP
3641 case RX_HANDLER_EXACT:
3642 deliver_exact = true;
3643 case RX_HANDLER_PASS:
3644 break;
3645 default:
3646 BUG();
3647 }
ab95bfe0 3648 }
1da177e4 3649
d4b812de
ED
3650 if (unlikely(vlan_tx_tag_present(skb))) {
3651 if (vlan_tx_tag_get_id(skb))
3652 skb->pkt_type = PACKET_OTHERHOST;
3653 /* Note: we might in the future use prio bits
3654 * and set skb->priority like in vlan_do_receive()
3655 * For the time being, just ignore Priority Code Point
3656 */
3657 skb->vlan_tci = 0;
3658 }
48cc32d3 3659
63d8ea7f 3660 /* deliver only exact match when indicated */
8a4eb573 3661 null_or_dev = deliver_exact ? skb->dev : NULL;
1f3c8804 3662
1da177e4 3663 type = skb->protocol;
82d8a867
PE
3664 list_for_each_entry_rcu(ptype,
3665 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
63d8ea7f 3666 if (ptype->type == type &&
e3f48d37
JP
3667 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3668 ptype->dev == orig_dev)) {
4ec93edb 3669 if (pt_prev)
f2ccd8fa 3670 ret = deliver_skb(skb, pt_prev, orig_dev);
1da177e4
LT
3671 pt_prev = ptype;
3672 }
3673 }
3674
3675 if (pt_prev) {
1080e512 3676 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
0e698bf6 3677 goto drop;
1080e512
MT
3678 else
3679 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1da177e4 3680 } else {
b4b9e355 3681drop:
caf586e5 3682 atomic_long_inc(&skb->dev->rx_dropped);
1da177e4
LT
3683 kfree_skb(skb);
3684 /* Jamal, now you will not able to escape explaining
3685 * me how you were going to use this. :-)
3686 */
3687 ret = NET_RX_DROP;
3688 }
3689
b4b9e355 3690unlock:
1da177e4 3691 rcu_read_unlock();
9754e293
DM
3692 return ret;
3693}
3694
3695static int __netif_receive_skb(struct sk_buff *skb)
3696{
3697 int ret;
3698
3699 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3700 unsigned long pflags = current->flags;
3701
3702 /*
3703 * PFMEMALLOC skbs are special, they should
3704 * - be delivered to SOCK_MEMALLOC sockets only
3705 * - stay away from userspace
3706 * - have bounded memory usage
3707 *
3708 * Use PF_MEMALLOC as this saves us from propagating the allocation
3709 * context down to all allocation sites.
3710 */
3711 current->flags |= PF_MEMALLOC;
3712 ret = __netif_receive_skb_core(skb, true);
3713 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3714 } else
3715 ret = __netif_receive_skb_core(skb, false);
3716
1da177e4
LT
3717 return ret;
3718}
0a9627f2 3719
ae78dbfa 3720static int netif_receive_skb_internal(struct sk_buff *skb)
0a9627f2 3721{
588f0330 3722 net_timestamp_check(netdev_tstamp_prequeue, skb);
3b098e2d 3723
c1f19b51
RC
3724 if (skb_defer_rx_timestamp(skb))
3725 return NET_RX_SUCCESS;
3726
df334545 3727#ifdef CONFIG_RPS
c5905afb 3728 if (static_key_false(&rps_needed)) {
3b098e2d
ED
3729 struct rps_dev_flow voidflow, *rflow = &voidflow;
3730 int cpu, ret;
fec5e652 3731
3b098e2d
ED
3732 rcu_read_lock();
3733
3734 cpu = get_rps_cpu(skb->dev, skb, &rflow);
0a9627f2 3735
3b098e2d
ED
3736 if (cpu >= 0) {
3737 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3738 rcu_read_unlock();
adc9300e 3739 return ret;
3b098e2d 3740 }
adc9300e 3741 rcu_read_unlock();
fec5e652 3742 }
1e94d72f 3743#endif
adc9300e 3744 return __netif_receive_skb(skb);
0a9627f2 3745}
ae78dbfa
BH
3746
3747/**
3748 * netif_receive_skb - process receive buffer from network
3749 * @skb: buffer to process
3750 *
3751 * netif_receive_skb() is the main receive data processing function.
3752 * It always succeeds. The buffer may be dropped during processing
3753 * for congestion control or by the protocol layers.
3754 *
3755 * This function may only be called from softirq context and interrupts
3756 * should be enabled.
3757 *
3758 * Return values (usually ignored):
3759 * NET_RX_SUCCESS: no congestion
3760 * NET_RX_DROP: packet was dropped
3761 */
3762int netif_receive_skb(struct sk_buff *skb)
3763{
3764 trace_netif_receive_skb_entry(skb);
3765
3766 return netif_receive_skb_internal(skb);
3767}
d1b19dff 3768EXPORT_SYMBOL(netif_receive_skb);
1da177e4 3769
88751275
ED
3770/* Network device is going away, flush any packets still pending
3771 * Called with irqs disabled.
3772 */
152102c7 3773static void flush_backlog(void *arg)
6e583ce5 3774{
152102c7 3775 struct net_device *dev = arg;
e36fa2f7 3776 struct softnet_data *sd = &__get_cpu_var(softnet_data);
6e583ce5
SH
3777 struct sk_buff *skb, *tmp;
3778
e36fa2f7 3779 rps_lock(sd);
6e7676c1 3780 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
6e583ce5 3781 if (skb->dev == dev) {
e36fa2f7 3782 __skb_unlink(skb, &sd->input_pkt_queue);
6e583ce5 3783 kfree_skb(skb);
76cc8b13 3784 input_queue_head_incr(sd);
6e583ce5 3785 }
6e7676c1 3786 }
e36fa2f7 3787 rps_unlock(sd);
6e7676c1
CG
3788
3789 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3790 if (skb->dev == dev) {
3791 __skb_unlink(skb, &sd->process_queue);
3792 kfree_skb(skb);
76cc8b13 3793 input_queue_head_incr(sd);
6e7676c1
CG
3794 }
3795 }
6e583ce5
SH
3796}
3797
d565b0a1
HX
3798static int napi_gro_complete(struct sk_buff *skb)
3799{
22061d80 3800 struct packet_offload *ptype;
d565b0a1 3801 __be16 type = skb->protocol;
22061d80 3802 struct list_head *head = &offload_base;
d565b0a1
HX
3803 int err = -ENOENT;
3804
c3c7c254
ED
3805 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3806
fc59f9a3
HX
3807 if (NAPI_GRO_CB(skb)->count == 1) {
3808 skb_shinfo(skb)->gso_size = 0;
d565b0a1 3809 goto out;
fc59f9a3 3810 }
d565b0a1
HX
3811
3812 rcu_read_lock();
3813 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 3814 if (ptype->type != type || !ptype->callbacks.gro_complete)
d565b0a1
HX
3815 continue;
3816
299603e8 3817 err = ptype->callbacks.gro_complete(skb, 0);
d565b0a1
HX
3818 break;
3819 }
3820 rcu_read_unlock();
3821
3822 if (err) {
3823 WARN_ON(&ptype->list == head);
3824 kfree_skb(skb);
3825 return NET_RX_SUCCESS;
3826 }
3827
3828out:
ae78dbfa 3829 return netif_receive_skb_internal(skb);
d565b0a1
HX
3830}
3831
2e71a6f8
ED
3832/* napi->gro_list contains packets ordered by age.
3833 * youngest packets at the head of it.
3834 * Complete skbs in reverse order to reduce latencies.
3835 */
3836void napi_gro_flush(struct napi_struct *napi, bool flush_old)
d565b0a1 3837{
2e71a6f8 3838 struct sk_buff *skb, *prev = NULL;
d565b0a1 3839
2e71a6f8
ED
3840 /* scan list and build reverse chain */
3841 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3842 skb->prev = prev;
3843 prev = skb;
3844 }
3845
3846 for (skb = prev; skb; skb = prev) {
d565b0a1 3847 skb->next = NULL;
2e71a6f8
ED
3848
3849 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3850 return;
3851
3852 prev = skb->prev;
d565b0a1 3853 napi_gro_complete(skb);
2e71a6f8 3854 napi->gro_count--;
d565b0a1
HX
3855 }
3856
3857 napi->gro_list = NULL;
3858}
86cac58b 3859EXPORT_SYMBOL(napi_gro_flush);
d565b0a1 3860
89c5fa33
ED
3861static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3862{
3863 struct sk_buff *p;
3864 unsigned int maclen = skb->dev->hard_header_len;
0b4cec8c 3865 u32 hash = skb_get_hash_raw(skb);
89c5fa33
ED
3866
3867 for (p = napi->gro_list; p; p = p->next) {
3868 unsigned long diffs;
3869
0b4cec8c
TH
3870 NAPI_GRO_CB(p)->flush = 0;
3871
3872 if (hash != skb_get_hash_raw(p)) {
3873 NAPI_GRO_CB(p)->same_flow = 0;
3874 continue;
3875 }
3876
89c5fa33
ED
3877 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3878 diffs |= p->vlan_tci ^ skb->vlan_tci;
3879 if (maclen == ETH_HLEN)
3880 diffs |= compare_ether_header(skb_mac_header(p),
a50e233c 3881 skb_mac_header(skb));
89c5fa33
ED
3882 else if (!diffs)
3883 diffs = memcmp(skb_mac_header(p),
a50e233c 3884 skb_mac_header(skb),
89c5fa33
ED
3885 maclen);
3886 NAPI_GRO_CB(p)->same_flow = !diffs;
89c5fa33
ED
3887 }
3888}
3889
299603e8
JC
3890static void skb_gro_reset_offset(struct sk_buff *skb)
3891{
3892 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3893 const skb_frag_t *frag0 = &pinfo->frags[0];
3894
3895 NAPI_GRO_CB(skb)->data_offset = 0;
3896 NAPI_GRO_CB(skb)->frag0 = NULL;
3897 NAPI_GRO_CB(skb)->frag0_len = 0;
3898
3899 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3900 pinfo->nr_frags &&
3901 !PageHighMem(skb_frag_page(frag0))) {
3902 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3903 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
89c5fa33
ED
3904 }
3905}
3906
a50e233c
ED
3907static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
3908{
3909 struct skb_shared_info *pinfo = skb_shinfo(skb);
3910
3911 BUG_ON(skb->end - skb->tail < grow);
3912
3913 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3914
3915 skb->data_len -= grow;
3916 skb->tail += grow;
3917
3918 pinfo->frags[0].page_offset += grow;
3919 skb_frag_size_sub(&pinfo->frags[0], grow);
3920
3921 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
3922 skb_frag_unref(skb, 0);
3923 memmove(pinfo->frags, pinfo->frags + 1,
3924 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
3925 }
3926}
3927
bb728820 3928static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
d565b0a1
HX
3929{
3930 struct sk_buff **pp = NULL;
22061d80 3931 struct packet_offload *ptype;
d565b0a1 3932 __be16 type = skb->protocol;
22061d80 3933 struct list_head *head = &offload_base;
0da2afd5 3934 int same_flow;
5b252f0c 3935 enum gro_result ret;
a50e233c 3936 int grow;
d565b0a1 3937
9c62a68d 3938 if (!(skb->dev->features & NETIF_F_GRO))
d565b0a1
HX
3939 goto normal;
3940
21dc3301 3941 if (skb_is_gso(skb) || skb_has_frag_list(skb))
f17f5c91
HX
3942 goto normal;
3943
89c5fa33 3944 gro_list_prepare(napi, skb);
bf5a755f 3945 NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
89c5fa33 3946
d565b0a1
HX
3947 rcu_read_lock();
3948 list_for_each_entry_rcu(ptype, head, list) {
f191a1d1 3949 if (ptype->type != type || !ptype->callbacks.gro_receive)
d565b0a1
HX
3950 continue;
3951
86911732 3952 skb_set_network_header(skb, skb_gro_offset(skb));
efd9450e 3953 skb_reset_mac_len(skb);
d565b0a1
HX
3954 NAPI_GRO_CB(skb)->same_flow = 0;
3955 NAPI_GRO_CB(skb)->flush = 0;
5d38a079 3956 NAPI_GRO_CB(skb)->free = 0;
b582ef09 3957 NAPI_GRO_CB(skb)->udp_mark = 0;
d565b0a1 3958
f191a1d1 3959 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
d565b0a1
HX
3960 break;
3961 }
3962 rcu_read_unlock();
3963
3964 if (&ptype->list == head)
3965 goto normal;
3966
0da2afd5 3967 same_flow = NAPI_GRO_CB(skb)->same_flow;
5d0d9be8 3968 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
0da2afd5 3969
d565b0a1
HX
3970 if (pp) {
3971 struct sk_buff *nskb = *pp;
3972
3973 *pp = nskb->next;
3974 nskb->next = NULL;
3975 napi_gro_complete(nskb);
4ae5544f 3976 napi->gro_count--;
d565b0a1
HX
3977 }
3978
0da2afd5 3979 if (same_flow)
d565b0a1
HX
3980 goto ok;
3981
600adc18 3982 if (NAPI_GRO_CB(skb)->flush)
d565b0a1 3983 goto normal;
d565b0a1 3984
600adc18
ED
3985 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
3986 struct sk_buff *nskb = napi->gro_list;
3987
3988 /* locate the end of the list to select the 'oldest' flow */
3989 while (nskb->next) {
3990 pp = &nskb->next;
3991 nskb = *pp;
3992 }
3993 *pp = NULL;
3994 nskb->next = NULL;
3995 napi_gro_complete(nskb);
3996 } else {
3997 napi->gro_count++;
3998 }
d565b0a1 3999 NAPI_GRO_CB(skb)->count = 1;
2e71a6f8 4000 NAPI_GRO_CB(skb)->age = jiffies;
29e98242 4001 NAPI_GRO_CB(skb)->last = skb;
86911732 4002 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
d565b0a1
HX
4003 skb->next = napi->gro_list;
4004 napi->gro_list = skb;
5d0d9be8 4005 ret = GRO_HELD;
d565b0a1 4006
ad0f9904 4007pull:
a50e233c
ED
4008 grow = skb_gro_offset(skb) - skb_headlen(skb);
4009 if (grow > 0)
4010 gro_pull_from_frag0(skb, grow);
d565b0a1 4011ok:
5d0d9be8 4012 return ret;
d565b0a1
HX
4013
4014normal:
ad0f9904
HX
4015 ret = GRO_NORMAL;
4016 goto pull;
5d38a079 4017}
96e93eab 4018
bf5a755f
JC
4019struct packet_offload *gro_find_receive_by_type(__be16 type)
4020{
4021 struct list_head *offload_head = &offload_base;
4022 struct packet_offload *ptype;
4023
4024 list_for_each_entry_rcu(ptype, offload_head, list) {
4025 if (ptype->type != type || !ptype->callbacks.gro_receive)
4026 continue;
4027 return ptype;
4028 }
4029 return NULL;
4030}
e27a2f83 4031EXPORT_SYMBOL(gro_find_receive_by_type);
bf5a755f
JC
4032
4033struct packet_offload *gro_find_complete_by_type(__be16 type)
4034{
4035 struct list_head *offload_head = &offload_base;
4036 struct packet_offload *ptype;
4037
4038 list_for_each_entry_rcu(ptype, offload_head, list) {
4039 if (ptype->type != type || !ptype->callbacks.gro_complete)
4040 continue;
4041 return ptype;
4042 }
4043 return NULL;
4044}
e27a2f83 4045EXPORT_SYMBOL(gro_find_complete_by_type);
5d38a079 4046
bb728820 4047static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
5d38a079 4048{
5d0d9be8
HX
4049 switch (ret) {
4050 case GRO_NORMAL:
ae78dbfa 4051 if (netif_receive_skb_internal(skb))
c7c4b3b6
BH
4052 ret = GRO_DROP;
4053 break;
5d38a079 4054
5d0d9be8 4055 case GRO_DROP:
5d38a079
HX
4056 kfree_skb(skb);
4057 break;
5b252f0c 4058
daa86548 4059 case GRO_MERGED_FREE:
d7e8883c
ED
4060 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4061 kmem_cache_free(skbuff_head_cache, skb);
4062 else
4063 __kfree_skb(skb);
daa86548
ED
4064 break;
4065
5b252f0c
BH
4066 case GRO_HELD:
4067 case GRO_MERGED:
4068 break;
5d38a079
HX
4069 }
4070
c7c4b3b6 4071 return ret;
5d0d9be8 4072}
5d0d9be8 4073
c7c4b3b6 4074gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
5d0d9be8 4075{
ae78dbfa 4076 trace_napi_gro_receive_entry(skb);
86911732 4077
a50e233c
ED
4078 skb_gro_reset_offset(skb);
4079
89c5fa33 4080 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
d565b0a1
HX
4081}
4082EXPORT_SYMBOL(napi_gro_receive);
4083
d0c2b0d2 4084static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
96e93eab 4085{
96e93eab 4086 __skb_pull(skb, skb_headlen(skb));
2a2a459e
ED
4087 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4088 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3701e513 4089 skb->vlan_tci = 0;
66c46d74 4090 skb->dev = napi->dev;
6d152e23 4091 skb->skb_iif = 0;
e33d0ba8 4092 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
96e93eab
HX
4093
4094 napi->skb = skb;
4095}
96e93eab 4096
76620aaf 4097struct sk_buff *napi_get_frags(struct napi_struct *napi)
5d38a079 4098{
5d38a079 4099 struct sk_buff *skb = napi->skb;
5d38a079
HX
4100
4101 if (!skb) {
89d71a66 4102 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
84b9cd63 4103 napi->skb = skb;
80595d59 4104 }
96e93eab
HX
4105 return skb;
4106}
76620aaf 4107EXPORT_SYMBOL(napi_get_frags);
96e93eab 4108
a50e233c
ED
4109static gro_result_t napi_frags_finish(struct napi_struct *napi,
4110 struct sk_buff *skb,
4111 gro_result_t ret)
96e93eab 4112{
5d0d9be8
HX
4113 switch (ret) {
4114 case GRO_NORMAL:
a50e233c
ED
4115 case GRO_HELD:
4116 __skb_push(skb, ETH_HLEN);
4117 skb->protocol = eth_type_trans(skb, skb->dev);
4118 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
c7c4b3b6 4119 ret = GRO_DROP;
86911732 4120 break;
5d38a079 4121
5d0d9be8 4122 case GRO_DROP:
5d0d9be8
HX
4123 case GRO_MERGED_FREE:
4124 napi_reuse_skb(napi, skb);
4125 break;
5b252f0c
BH
4126
4127 case GRO_MERGED:
4128 break;
5d0d9be8 4129 }
5d38a079 4130
c7c4b3b6 4131 return ret;
5d38a079 4132}
5d0d9be8 4133
a50e233c
ED
4134/* Upper GRO stack assumes network header starts at gro_offset=0
4135 * Drivers could call both napi_gro_frags() and napi_gro_receive()
4136 * We copy ethernet header into skb->data to have a common layout.
4137 */
4adb9c4a 4138static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
76620aaf
HX
4139{
4140 struct sk_buff *skb = napi->skb;
a50e233c
ED
4141 const struct ethhdr *eth;
4142 unsigned int hlen = sizeof(*eth);
76620aaf
HX
4143
4144 napi->skb = NULL;
4145
a50e233c
ED
4146 skb_reset_mac_header(skb);
4147 skb_gro_reset_offset(skb);
4148
4149 eth = skb_gro_header_fast(skb, 0);
4150 if (unlikely(skb_gro_header_hard(skb, hlen))) {
4151 eth = skb_gro_header_slow(skb, hlen, 0);
4152 if (unlikely(!eth)) {
4153 napi_reuse_skb(napi, skb);
4154 return NULL;
4155 }
4156 } else {
4157 gro_pull_from_frag0(skb, hlen);
4158 NAPI_GRO_CB(skb)->frag0 += hlen;
4159 NAPI_GRO_CB(skb)->frag0_len -= hlen;
76620aaf 4160 }
a50e233c
ED
4161 __skb_pull(skb, hlen);
4162
4163 /*
4164 * This works because the only protocols we care about don't require
4165 * special handling.
4166 * We'll fix it up properly in napi_frags_finish()
4167 */
4168 skb->protocol = eth->h_proto;
76620aaf 4169
76620aaf
HX
4170 return skb;
4171}
76620aaf 4172
c7c4b3b6 4173gro_result_t napi_gro_frags(struct napi_struct *napi)
5d0d9be8 4174{
76620aaf 4175 struct sk_buff *skb = napi_frags_skb(napi);
5d0d9be8
HX
4176
4177 if (!skb)
c7c4b3b6 4178 return GRO_DROP;
5d0d9be8 4179
ae78dbfa
BH
4180 trace_napi_gro_frags_entry(skb);
4181
89c5fa33 4182 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
5d0d9be8 4183}
5d38a079
HX
4184EXPORT_SYMBOL(napi_gro_frags);
4185
e326bed2 4186/*
855abcf0 4187 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
e326bed2
ED
4188 * Note: called with local irq disabled, but exits with local irq enabled.
4189 */
4190static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4191{
4192#ifdef CONFIG_RPS
4193 struct softnet_data *remsd = sd->rps_ipi_list;
4194
4195 if (remsd) {
4196 sd->rps_ipi_list = NULL;
4197
4198 local_irq_enable();
4199
4200 /* Send pending IPI's to kick RPS processing on remote cpus. */
4201 while (remsd) {
4202 struct softnet_data *next = remsd->rps_ipi_next;
4203
4204 if (cpu_online(remsd->cpu))
c46fff2a 4205 smp_call_function_single_async(remsd->cpu,
fce8ad15 4206 &remsd->csd);
e326bed2
ED
4207 remsd = next;
4208 }
4209 } else
4210#endif
4211 local_irq_enable();
4212}
4213
bea3348e 4214static int process_backlog(struct napi_struct *napi, int quota)
1da177e4
LT
4215{
4216 int work = 0;
eecfd7c4 4217 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
1da177e4 4218
e326bed2
ED
4219#ifdef CONFIG_RPS
4220 /* Check if we have pending ipi, its better to send them now,
4221 * not waiting net_rx_action() end.
4222 */
4223 if (sd->rps_ipi_list) {
4224 local_irq_disable();
4225 net_rps_action_and_irq_enable(sd);
4226 }
4227#endif
bea3348e 4228 napi->weight = weight_p;
6e7676c1 4229 local_irq_disable();
11ef7a89 4230 while (1) {
1da177e4 4231 struct sk_buff *skb;
6e7676c1
CG
4232
4233 while ((skb = __skb_dequeue(&sd->process_queue))) {
4234 local_irq_enable();
4235 __netif_receive_skb(skb);
6e7676c1 4236 local_irq_disable();
76cc8b13
TH
4237 input_queue_head_incr(sd);
4238 if (++work >= quota) {
4239 local_irq_enable();
4240 return work;
4241 }
6e7676c1 4242 }
1da177e4 4243
e36fa2f7 4244 rps_lock(sd);
11ef7a89 4245 if (skb_queue_empty(&sd->input_pkt_queue)) {
eecfd7c4
ED
4246 /*
4247 * Inline a custom version of __napi_complete().
4248 * only current cpu owns and manipulates this napi,
11ef7a89
TH
4249 * and NAPI_STATE_SCHED is the only possible flag set
4250 * on backlog.
4251 * We can use a plain write instead of clear_bit(),
eecfd7c4
ED
4252 * and we dont need an smp_mb() memory barrier.
4253 */
4254 list_del(&napi->poll_list);
4255 napi->state = 0;
11ef7a89 4256 rps_unlock(sd);
eecfd7c4 4257
11ef7a89 4258 break;
bea3348e 4259 }
11ef7a89
TH
4260
4261 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4262 &sd->process_queue);
e36fa2f7 4263 rps_unlock(sd);
6e7676c1
CG
4264 }
4265 local_irq_enable();
1da177e4 4266
bea3348e
SH
4267 return work;
4268}
1da177e4 4269
bea3348e
SH
4270/**
4271 * __napi_schedule - schedule for receive
c4ea43c5 4272 * @n: entry to schedule
bea3348e
SH
4273 *
4274 * The entry's receive function will be scheduled to run
4275 */
b5606c2d 4276void __napi_schedule(struct napi_struct *n)
bea3348e
SH
4277{
4278 unsigned long flags;
1da177e4 4279
bea3348e 4280 local_irq_save(flags);
eecfd7c4 4281 ____napi_schedule(&__get_cpu_var(softnet_data), n);
bea3348e 4282 local_irq_restore(flags);
1da177e4 4283}
bea3348e
SH
4284EXPORT_SYMBOL(__napi_schedule);
4285
d565b0a1
HX
4286void __napi_complete(struct napi_struct *n)
4287{
4288 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4289 BUG_ON(n->gro_list);
4290
4291 list_del(&n->poll_list);
4e857c58 4292 smp_mb__before_atomic();
d565b0a1
HX
4293 clear_bit(NAPI_STATE_SCHED, &n->state);
4294}
4295EXPORT_SYMBOL(__napi_complete);
4296
4297void napi_complete(struct napi_struct *n)
4298{
4299 unsigned long flags;
4300
4301 /*
4302 * don't let napi dequeue from the cpu poll list
4303 * just in case its running on a different cpu
4304 */
4305 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4306 return;
4307
2e71a6f8 4308 napi_gro_flush(n, false);
d565b0a1
HX
4309 local_irq_save(flags);
4310 __napi_complete(n);
4311 local_irq_restore(flags);
4312}
4313EXPORT_SYMBOL(napi_complete);
4314
af12fa6e
ET
4315/* must be called under rcu_read_lock(), as we dont take a reference */
4316struct napi_struct *napi_by_id(unsigned int napi_id)
4317{
4318 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4319 struct napi_struct *napi;
4320
4321 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4322 if (napi->napi_id == napi_id)
4323 return napi;
4324
4325 return NULL;
4326}
4327EXPORT_SYMBOL_GPL(napi_by_id);
4328
4329void napi_hash_add(struct napi_struct *napi)
4330{
4331 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4332
4333 spin_lock(&napi_hash_lock);
4334
4335 /* 0 is not a valid id, we also skip an id that is taken
4336 * we expect both events to be extremely rare
4337 */
4338 napi->napi_id = 0;
4339 while (!napi->napi_id) {
4340 napi->napi_id = ++napi_gen_id;
4341 if (napi_by_id(napi->napi_id))
4342 napi->napi_id = 0;
4343 }
4344
4345 hlist_add_head_rcu(&napi->napi_hash_node,
4346 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4347
4348 spin_unlock(&napi_hash_lock);
4349 }
4350}
4351EXPORT_SYMBOL_GPL(napi_hash_add);
4352
4353/* Warning : caller is responsible to make sure rcu grace period
4354 * is respected before freeing memory containing @napi
4355 */
4356void napi_hash_del(struct napi_struct *napi)
4357{
4358 spin_lock(&napi_hash_lock);
4359
4360 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4361 hlist_del_rcu(&napi->napi_hash_node);
4362
4363 spin_unlock(&napi_hash_lock);
4364}
4365EXPORT_SYMBOL_GPL(napi_hash_del);
4366
d565b0a1
HX
4367void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4368 int (*poll)(struct napi_struct *, int), int weight)
4369{
4370 INIT_LIST_HEAD(&napi->poll_list);
4ae5544f 4371 napi->gro_count = 0;
d565b0a1 4372 napi->gro_list = NULL;
5d38a079 4373 napi->skb = NULL;
d565b0a1 4374 napi->poll = poll;
82dc3c63
ED
4375 if (weight > NAPI_POLL_WEIGHT)
4376 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4377 weight, dev->name);
d565b0a1
HX
4378 napi->weight = weight;
4379 list_add(&napi->dev_list, &dev->napi_list);
d565b0a1 4380 napi->dev = dev;
5d38a079 4381#ifdef CONFIG_NETPOLL
d565b0a1
HX
4382 spin_lock_init(&napi->poll_lock);
4383 napi->poll_owner = -1;
4384#endif
4385 set_bit(NAPI_STATE_SCHED, &napi->state);
4386}
4387EXPORT_SYMBOL(netif_napi_add);
4388
4389void netif_napi_del(struct napi_struct *napi)
4390{
d7b06636 4391 list_del_init(&napi->dev_list);
76620aaf 4392 napi_free_frags(napi);
d565b0a1 4393
289dccbe 4394 kfree_skb_list(napi->gro_list);
d565b0a1 4395 napi->gro_list = NULL;
4ae5544f 4396 napi->gro_count = 0;
d565b0a1
HX
4397}
4398EXPORT_SYMBOL(netif_napi_del);
4399
1da177e4
LT
4400static void net_rx_action(struct softirq_action *h)
4401{
e326bed2 4402 struct softnet_data *sd = &__get_cpu_var(softnet_data);
24f8b238 4403 unsigned long time_limit = jiffies + 2;
51b0bded 4404 int budget = netdev_budget;
53fb95d3
MM
4405 void *have;
4406
1da177e4
LT
4407 local_irq_disable();
4408
e326bed2 4409 while (!list_empty(&sd->poll_list)) {
bea3348e
SH
4410 struct napi_struct *n;
4411 int work, weight;
1da177e4 4412
bea3348e 4413 /* If softirq window is exhuasted then punt.
24f8b238
SH
4414 * Allow this to run for 2 jiffies since which will allow
4415 * an average latency of 1.5/HZ.
bea3348e 4416 */
d1f41b67 4417 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
1da177e4
LT
4418 goto softnet_break;
4419
4420 local_irq_enable();
4421
bea3348e
SH
4422 /* Even though interrupts have been re-enabled, this
4423 * access is safe because interrupts can only add new
4424 * entries to the tail of this list, and only ->poll()
4425 * calls can remove this head entry from the list.
4426 */
e326bed2 4427 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
1da177e4 4428
bea3348e
SH
4429 have = netpoll_poll_lock(n);
4430
4431 weight = n->weight;
4432
0a7606c1
DM
4433 /* This NAPI_STATE_SCHED test is for avoiding a race
4434 * with netpoll's poll_napi(). Only the entity which
4435 * obtains the lock and sees NAPI_STATE_SCHED set will
4436 * actually make the ->poll() call. Therefore we avoid
25985edc 4437 * accidentally calling ->poll() when NAPI is not scheduled.
0a7606c1
DM
4438 */
4439 work = 0;
4ea7e386 4440 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
0a7606c1 4441 work = n->poll(n, weight);
4ea7e386
NH
4442 trace_napi_poll(n);
4443 }
bea3348e
SH
4444
4445 WARN_ON_ONCE(work > weight);
4446
4447 budget -= work;
4448
4449 local_irq_disable();
4450
4451 /* Drivers must not modify the NAPI state if they
4452 * consume the entire weight. In such cases this code
4453 * still "owns" the NAPI instance and therefore can
4454 * move the instance around on the list at-will.
4455 */
fed17f30 4456 if (unlikely(work == weight)) {
ff780cd8
HX
4457 if (unlikely(napi_disable_pending(n))) {
4458 local_irq_enable();
4459 napi_complete(n);
4460 local_irq_disable();
2e71a6f8
ED
4461 } else {
4462 if (n->gro_list) {
4463 /* flush too old packets
4464 * If HZ < 1000, flush all packets.
4465 */
4466 local_irq_enable();
4467 napi_gro_flush(n, HZ >= 1000);
4468 local_irq_disable();
4469 }
e326bed2 4470 list_move_tail(&n->poll_list, &sd->poll_list);
2e71a6f8 4471 }
fed17f30 4472 }
bea3348e
SH
4473
4474 netpoll_poll_unlock(have);
1da177e4
LT
4475 }
4476out:
e326bed2 4477 net_rps_action_and_irq_enable(sd);
0a9627f2 4478
db217334
CL
4479#ifdef CONFIG_NET_DMA
4480 /*
4481 * There may not be any more sk_buffs coming right now, so push
4482 * any pending DMA copies to hardware
4483 */
2ba05622 4484 dma_issue_pending_all();
db217334 4485#endif
bea3348e 4486
1da177e4
LT
4487 return;
4488
4489softnet_break:
dee42870 4490 sd->time_squeeze++;
1da177e4
LT
4491 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4492 goto out;
4493}
4494
aa9d8560 4495struct netdev_adjacent {
9ff162a8 4496 struct net_device *dev;
5d261913
VF
4497
4498 /* upper master flag, there can only be one master device per list */
9ff162a8 4499 bool master;
5d261913 4500
5d261913
VF
4501 /* counter for the number of times this device was added to us */
4502 u16 ref_nr;
4503
402dae96
VF
4504 /* private field for the users */
4505 void *private;
4506
9ff162a8
JP
4507 struct list_head list;
4508 struct rcu_head rcu;
9ff162a8
JP
4509};
4510
5d261913
VF
4511static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4512 struct net_device *adj_dev,
2f268f12 4513 struct list_head *adj_list)
9ff162a8 4514{
5d261913 4515 struct netdev_adjacent *adj;
5d261913 4516
2f268f12 4517 list_for_each_entry(adj, adj_list, list) {
5d261913
VF
4518 if (adj->dev == adj_dev)
4519 return adj;
9ff162a8
JP
4520 }
4521 return NULL;
4522}
4523
4524/**
4525 * netdev_has_upper_dev - Check if device is linked to an upper device
4526 * @dev: device
4527 * @upper_dev: upper device to check
4528 *
4529 * Find out if a device is linked to specified upper device and return true
4530 * in case it is. Note that this checks only immediate upper device,
4531 * not through a complete stack of devices. The caller must hold the RTNL lock.
4532 */
4533bool netdev_has_upper_dev(struct net_device *dev,
4534 struct net_device *upper_dev)
4535{
4536 ASSERT_RTNL();
4537
2f268f12 4538 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
9ff162a8
JP
4539}
4540EXPORT_SYMBOL(netdev_has_upper_dev);
4541
4542/**
4543 * netdev_has_any_upper_dev - Check if device is linked to some device
4544 * @dev: device
4545 *
4546 * Find out if a device is linked to an upper device and return true in case
4547 * it is. The caller must hold the RTNL lock.
4548 */
1d143d9f 4549static bool netdev_has_any_upper_dev(struct net_device *dev)
9ff162a8
JP
4550{
4551 ASSERT_RTNL();
4552
2f268f12 4553 return !list_empty(&dev->all_adj_list.upper);
9ff162a8 4554}
9ff162a8
JP
4555
4556/**
4557 * netdev_master_upper_dev_get - Get master upper device
4558 * @dev: device
4559 *
4560 * Find a master upper device and return pointer to it or NULL in case
4561 * it's not there. The caller must hold the RTNL lock.
4562 */
4563struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4564{
aa9d8560 4565 struct netdev_adjacent *upper;
9ff162a8
JP
4566
4567 ASSERT_RTNL();
4568
2f268f12 4569 if (list_empty(&dev->adj_list.upper))
9ff162a8
JP
4570 return NULL;
4571
2f268f12 4572 upper = list_first_entry(&dev->adj_list.upper,
aa9d8560 4573 struct netdev_adjacent, list);
9ff162a8
JP
4574 if (likely(upper->master))
4575 return upper->dev;
4576 return NULL;
4577}
4578EXPORT_SYMBOL(netdev_master_upper_dev_get);
4579
b6ccba4c
VF
4580void *netdev_adjacent_get_private(struct list_head *adj_list)
4581{
4582 struct netdev_adjacent *adj;
4583
4584 adj = list_entry(adj_list, struct netdev_adjacent, list);
4585
4586 return adj->private;
4587}
4588EXPORT_SYMBOL(netdev_adjacent_get_private);
4589
44a40855
VY
4590/**
4591 * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4592 * @dev: device
4593 * @iter: list_head ** of the current position
4594 *
4595 * Gets the next device from the dev's upper list, starting from iter
4596 * position. The caller must hold RCU read lock.
4597 */
4598struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4599 struct list_head **iter)
4600{
4601 struct netdev_adjacent *upper;
4602
4603 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4604
4605 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4606
4607 if (&upper->list == &dev->adj_list.upper)
4608 return NULL;
4609
4610 *iter = &upper->list;
4611
4612 return upper->dev;
4613}
4614EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4615
31088a11
VF
4616/**
4617 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
48311f46
VF
4618 * @dev: device
4619 * @iter: list_head ** of the current position
4620 *
4621 * Gets the next device from the dev's upper list, starting from iter
4622 * position. The caller must hold RCU read lock.
4623 */
2f268f12
VF
4624struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4625 struct list_head **iter)
48311f46
VF
4626{
4627 struct netdev_adjacent *upper;
4628
85328240 4629 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
48311f46
VF
4630
4631 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4632
2f268f12 4633 if (&upper->list == &dev->all_adj_list.upper)
48311f46
VF
4634 return NULL;
4635
4636 *iter = &upper->list;
4637
4638 return upper->dev;
4639}
2f268f12 4640EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
48311f46 4641
31088a11
VF
4642/**
4643 * netdev_lower_get_next_private - Get the next ->private from the
4644 * lower neighbour list
4645 * @dev: device
4646 * @iter: list_head ** of the current position
4647 *
4648 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4649 * list, starting from iter position. The caller must hold either hold the
4650 * RTNL lock or its own locking that guarantees that the neighbour lower
4651 * list will remain unchainged.
4652 */
4653void *netdev_lower_get_next_private(struct net_device *dev,
4654 struct list_head **iter)
4655{
4656 struct netdev_adjacent *lower;
4657
4658 lower = list_entry(*iter, struct netdev_adjacent, list);
4659
4660 if (&lower->list == &dev->adj_list.lower)
4661 return NULL;
4662
6859e7df 4663 *iter = lower->list.next;
31088a11
VF
4664
4665 return lower->private;
4666}
4667EXPORT_SYMBOL(netdev_lower_get_next_private);
4668
4669/**
4670 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4671 * lower neighbour list, RCU
4672 * variant
4673 * @dev: device
4674 * @iter: list_head ** of the current position
4675 *
4676 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4677 * list, starting from iter position. The caller must hold RCU read lock.
4678 */
4679void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4680 struct list_head **iter)
4681{
4682 struct netdev_adjacent *lower;
4683
4684 WARN_ON_ONCE(!rcu_read_lock_held());
4685
4686 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4687
4688 if (&lower->list == &dev->adj_list.lower)
4689 return NULL;
4690
6859e7df 4691 *iter = &lower->list;
31088a11
VF
4692
4693 return lower->private;
4694}
4695EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4696
4085ebe8
VY
4697/**
4698 * netdev_lower_get_next - Get the next device from the lower neighbour
4699 * list
4700 * @dev: device
4701 * @iter: list_head ** of the current position
4702 *
4703 * Gets the next netdev_adjacent from the dev's lower neighbour
4704 * list, starting from iter position. The caller must hold RTNL lock or
4705 * its own locking that guarantees that the neighbour lower
4706 * list will remain unchainged.
4707 */
4708void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4709{
4710 struct netdev_adjacent *lower;
4711
4712 lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4713
4714 if (&lower->list == &dev->adj_list.lower)
4715 return NULL;
4716
4717 *iter = &lower->list;
4718
4719 return lower->dev;
4720}
4721EXPORT_SYMBOL(netdev_lower_get_next);
4722
e001bfad 4723/**
4724 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4725 * lower neighbour list, RCU
4726 * variant
4727 * @dev: device
4728 *
4729 * Gets the first netdev_adjacent->private from the dev's lower neighbour
4730 * list. The caller must hold RCU read lock.
4731 */
4732void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4733{
4734 struct netdev_adjacent *lower;
4735
4736 lower = list_first_or_null_rcu(&dev->adj_list.lower,
4737 struct netdev_adjacent, list);
4738 if (lower)
4739 return lower->private;
4740 return NULL;
4741}
4742EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4743
9ff162a8
JP
4744/**
4745 * netdev_master_upper_dev_get_rcu - Get master upper device
4746 * @dev: device
4747 *
4748 * Find a master upper device and return pointer to it or NULL in case
4749 * it's not there. The caller must hold the RCU read lock.
4750 */
4751struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4752{
aa9d8560 4753 struct netdev_adjacent *upper;
9ff162a8 4754
2f268f12 4755 upper = list_first_or_null_rcu(&dev->adj_list.upper,
aa9d8560 4756 struct netdev_adjacent, list);
9ff162a8
JP
4757 if (upper && likely(upper->master))
4758 return upper->dev;
4759 return NULL;
4760}
4761EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4762
0a59f3a9 4763static int netdev_adjacent_sysfs_add(struct net_device *dev,
3ee32707
VF
4764 struct net_device *adj_dev,
4765 struct list_head *dev_list)
4766{
4767 char linkname[IFNAMSIZ+7];
4768 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4769 "upper_%s" : "lower_%s", adj_dev->name);
4770 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4771 linkname);
4772}
0a59f3a9 4773static void netdev_adjacent_sysfs_del(struct net_device *dev,
3ee32707
VF
4774 char *name,
4775 struct list_head *dev_list)
4776{
4777 char linkname[IFNAMSIZ+7];
4778 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4779 "upper_%s" : "lower_%s", name);
4780 sysfs_remove_link(&(dev->dev.kobj), linkname);
4781}
4782
4783#define netdev_adjacent_is_neigh_list(dev, dev_list) \
4784 (dev_list == &dev->adj_list.upper || \
4785 dev_list == &dev->adj_list.lower)
4786
5d261913
VF
4787static int __netdev_adjacent_dev_insert(struct net_device *dev,
4788 struct net_device *adj_dev,
7863c054 4789 struct list_head *dev_list,
402dae96 4790 void *private, bool master)
5d261913
VF
4791{
4792 struct netdev_adjacent *adj;
842d67a7 4793 int ret;
5d261913 4794
7863c054 4795 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5d261913
VF
4796
4797 if (adj) {
5d261913
VF
4798 adj->ref_nr++;
4799 return 0;
4800 }
4801
4802 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4803 if (!adj)
4804 return -ENOMEM;
4805
4806 adj->dev = adj_dev;
4807 adj->master = master;
5d261913 4808 adj->ref_nr = 1;
402dae96 4809 adj->private = private;
5d261913 4810 dev_hold(adj_dev);
2f268f12
VF
4811
4812 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4813 adj_dev->name, dev->name, adj_dev->name);
5d261913 4814
3ee32707
VF
4815 if (netdev_adjacent_is_neigh_list(dev, dev_list)) {
4816 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5831d66e
VF
4817 if (ret)
4818 goto free_adj;
4819 }
4820
7863c054 4821 /* Ensure that master link is always the first item in list. */
842d67a7
VF
4822 if (master) {
4823 ret = sysfs_create_link(&(dev->dev.kobj),
4824 &(adj_dev->dev.kobj), "master");
4825 if (ret)
5831d66e 4826 goto remove_symlinks;
842d67a7 4827
7863c054 4828 list_add_rcu(&adj->list, dev_list);
842d67a7 4829 } else {
7863c054 4830 list_add_tail_rcu(&adj->list, dev_list);
842d67a7 4831 }
5d261913
VF
4832
4833 return 0;
842d67a7 4834
5831d66e 4835remove_symlinks:
3ee32707
VF
4836 if (netdev_adjacent_is_neigh_list(dev, dev_list))
4837 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
842d67a7
VF
4838free_adj:
4839 kfree(adj);
974daef7 4840 dev_put(adj_dev);
842d67a7
VF
4841
4842 return ret;
5d261913
VF
4843}
4844
1d143d9f 4845static void __netdev_adjacent_dev_remove(struct net_device *dev,
4846 struct net_device *adj_dev,
4847 struct list_head *dev_list)
5d261913
VF
4848{
4849 struct netdev_adjacent *adj;
4850
7863c054 4851 adj = __netdev_find_adj(dev, adj_dev, dev_list);
5d261913 4852
2f268f12
VF
4853 if (!adj) {
4854 pr_err("tried to remove device %s from %s\n",
4855 dev->name, adj_dev->name);
5d261913 4856 BUG();
2f268f12 4857 }
5d261913
VF
4858
4859 if (adj->ref_nr > 1) {
2f268f12
VF
4860 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
4861 adj->ref_nr-1);
5d261913
VF
4862 adj->ref_nr--;
4863 return;
4864 }
4865
842d67a7
VF
4866 if (adj->master)
4867 sysfs_remove_link(&(dev->dev.kobj), "master");
4868
3ee32707
VF
4869 if (netdev_adjacent_is_neigh_list(dev, dev_list))
4870 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5831d66e 4871
5d261913 4872 list_del_rcu(&adj->list);
2f268f12
VF
4873 pr_debug("dev_put for %s, because link removed from %s to %s\n",
4874 adj_dev->name, dev->name, adj_dev->name);
5d261913
VF
4875 dev_put(adj_dev);
4876 kfree_rcu(adj, rcu);
4877}
4878
1d143d9f 4879static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
4880 struct net_device *upper_dev,
4881 struct list_head *up_list,
4882 struct list_head *down_list,
4883 void *private, bool master)
5d261913
VF
4884{
4885 int ret;
4886
402dae96
VF
4887 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
4888 master);
5d261913
VF
4889 if (ret)
4890 return ret;
4891
402dae96
VF
4892 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
4893 false);
5d261913 4894 if (ret) {
2f268f12 4895 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5d261913
VF
4896 return ret;
4897 }
4898
4899 return 0;
4900}
4901
1d143d9f 4902static int __netdev_adjacent_dev_link(struct net_device *dev,
4903 struct net_device *upper_dev)
5d261913 4904{
2f268f12
VF
4905 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
4906 &dev->all_adj_list.upper,
4907 &upper_dev->all_adj_list.lower,
402dae96 4908 NULL, false);
5d261913
VF
4909}
4910
1d143d9f 4911static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
4912 struct net_device *upper_dev,
4913 struct list_head *up_list,
4914 struct list_head *down_list)
5d261913 4915{
2f268f12
VF
4916 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
4917 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5d261913
VF
4918}
4919
1d143d9f 4920static void __netdev_adjacent_dev_unlink(struct net_device *dev,
4921 struct net_device *upper_dev)
5d261913 4922{
2f268f12
VF
4923 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4924 &dev->all_adj_list.upper,
4925 &upper_dev->all_adj_list.lower);
4926}
4927
1d143d9f 4928static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
4929 struct net_device *upper_dev,
4930 void *private, bool master)
2f268f12
VF
4931{
4932 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
4933
4934 if (ret)
4935 return ret;
4936
4937 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
4938 &dev->adj_list.upper,
4939 &upper_dev->adj_list.lower,
402dae96 4940 private, master);
2f268f12
VF
4941 if (ret) {
4942 __netdev_adjacent_dev_unlink(dev, upper_dev);
4943 return ret;
4944 }
4945
4946 return 0;
5d261913
VF
4947}
4948
1d143d9f 4949static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
4950 struct net_device *upper_dev)
2f268f12
VF
4951{
4952 __netdev_adjacent_dev_unlink(dev, upper_dev);
4953 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4954 &dev->adj_list.upper,
4955 &upper_dev->adj_list.lower);
4956}
5d261913 4957
9ff162a8 4958static int __netdev_upper_dev_link(struct net_device *dev,
402dae96
VF
4959 struct net_device *upper_dev, bool master,
4960 void *private)
9ff162a8 4961{
5d261913
VF
4962 struct netdev_adjacent *i, *j, *to_i, *to_j;
4963 int ret = 0;
9ff162a8
JP
4964
4965 ASSERT_RTNL();
4966
4967 if (dev == upper_dev)
4968 return -EBUSY;
4969
4970 /* To prevent loops, check if dev is not upper device to upper_dev. */
2f268f12 4971 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
9ff162a8
JP
4972 return -EBUSY;
4973
2f268f12 4974 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
9ff162a8
JP
4975 return -EEXIST;
4976
4977 if (master && netdev_master_upper_dev_get(dev))
4978 return -EBUSY;
4979
402dae96
VF
4980 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
4981 master);
5d261913
VF
4982 if (ret)
4983 return ret;
9ff162a8 4984
5d261913 4985 /* Now that we linked these devs, make all the upper_dev's
2f268f12 4986 * all_adj_list.upper visible to every dev's all_adj_list.lower an
5d261913
VF
4987 * versa, and don't forget the devices itself. All of these
4988 * links are non-neighbours.
4989 */
2f268f12
VF
4990 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4991 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
4992 pr_debug("Interlinking %s with %s, non-neighbour\n",
4993 i->dev->name, j->dev->name);
5d261913
VF
4994 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
4995 if (ret)
4996 goto rollback_mesh;
4997 }
4998 }
4999
5000 /* add dev to every upper_dev's upper device */
2f268f12
VF
5001 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5002 pr_debug("linking %s's upper device %s with %s\n",
5003 upper_dev->name, i->dev->name, dev->name);
5d261913
VF
5004 ret = __netdev_adjacent_dev_link(dev, i->dev);
5005 if (ret)
5006 goto rollback_upper_mesh;
5007 }
5008
5009 /* add upper_dev to every dev's lower device */
2f268f12
VF
5010 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5011 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5012 i->dev->name, upper_dev->name);
5d261913
VF
5013 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5014 if (ret)
5015 goto rollback_lower_mesh;
5016 }
9ff162a8 5017
42e52bf9 5018 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
9ff162a8 5019 return 0;
5d261913
VF
5020
5021rollback_lower_mesh:
5022 to_i = i;
2f268f12 5023 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5d261913
VF
5024 if (i == to_i)
5025 break;
5026 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5027 }
5028
5029 i = NULL;
5030
5031rollback_upper_mesh:
5032 to_i = i;
2f268f12 5033 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5034 if (i == to_i)
5035 break;
5036 __netdev_adjacent_dev_unlink(dev, i->dev);
5037 }
5038
5039 i = j = NULL;
5040
5041rollback_mesh:
5042 to_i = i;
5043 to_j = j;
2f268f12
VF
5044 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5045 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5d261913
VF
5046 if (i == to_i && j == to_j)
5047 break;
5048 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5049 }
5050 if (i == to_i)
5051 break;
5052 }
5053
2f268f12 5054 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5055
5056 return ret;
9ff162a8
JP
5057}
5058
5059/**
5060 * netdev_upper_dev_link - Add a link to the upper device
5061 * @dev: device
5062 * @upper_dev: new upper device
5063 *
5064 * Adds a link to device which is upper to this one. The caller must hold
5065 * the RTNL lock. On a failure a negative errno code is returned.
5066 * On success the reference counts are adjusted and the function
5067 * returns zero.
5068 */
5069int netdev_upper_dev_link(struct net_device *dev,
5070 struct net_device *upper_dev)
5071{
402dae96 5072 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
9ff162a8
JP
5073}
5074EXPORT_SYMBOL(netdev_upper_dev_link);
5075
5076/**
5077 * netdev_master_upper_dev_link - Add a master link to the upper device
5078 * @dev: device
5079 * @upper_dev: new upper device
5080 *
5081 * Adds a link to device which is upper to this one. In this case, only
5082 * one master upper device can be linked, although other non-master devices
5083 * might be linked as well. The caller must hold the RTNL lock.
5084 * On a failure a negative errno code is returned. On success the reference
5085 * counts are adjusted and the function returns zero.
5086 */
5087int netdev_master_upper_dev_link(struct net_device *dev,
5088 struct net_device *upper_dev)
5089{
402dae96 5090 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
9ff162a8
JP
5091}
5092EXPORT_SYMBOL(netdev_master_upper_dev_link);
5093
402dae96
VF
5094int netdev_master_upper_dev_link_private(struct net_device *dev,
5095 struct net_device *upper_dev,
5096 void *private)
5097{
5098 return __netdev_upper_dev_link(dev, upper_dev, true, private);
5099}
5100EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5101
9ff162a8
JP
5102/**
5103 * netdev_upper_dev_unlink - Removes a link to upper device
5104 * @dev: device
5105 * @upper_dev: new upper device
5106 *
5107 * Removes a link to device which is upper to this one. The caller must hold
5108 * the RTNL lock.
5109 */
5110void netdev_upper_dev_unlink(struct net_device *dev,
5111 struct net_device *upper_dev)
5112{
5d261913 5113 struct netdev_adjacent *i, *j;
9ff162a8
JP
5114 ASSERT_RTNL();
5115
2f268f12 5116 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5d261913
VF
5117
5118 /* Here is the tricky part. We must remove all dev's lower
5119 * devices from all upper_dev's upper devices and vice
5120 * versa, to maintain the graph relationship.
5121 */
2f268f12
VF
5122 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5123 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5124 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5125
5126 /* remove also the devices itself from lower/upper device
5127 * list
5128 */
2f268f12 5129 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5d261913
VF
5130 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5131
2f268f12 5132 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5d261913
VF
5133 __netdev_adjacent_dev_unlink(dev, i->dev);
5134
42e52bf9 5135 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
9ff162a8
JP
5136}
5137EXPORT_SYMBOL(netdev_upper_dev_unlink);
5138
5bb025fa 5139void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
402dae96 5140{
5bb025fa 5141 struct netdev_adjacent *iter;
402dae96 5142
5bb025fa
VF
5143 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5144 netdev_adjacent_sysfs_del(iter->dev, oldname,
5145 &iter->dev->adj_list.lower);
5146 netdev_adjacent_sysfs_add(iter->dev, dev,
5147 &iter->dev->adj_list.lower);
5148 }
402dae96 5149
5bb025fa
VF
5150 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5151 netdev_adjacent_sysfs_del(iter->dev, oldname,
5152 &iter->dev->adj_list.upper);
5153 netdev_adjacent_sysfs_add(iter->dev, dev,
5154 &iter->dev->adj_list.upper);
5155 }
402dae96 5156}
402dae96
VF
5157
5158void *netdev_lower_dev_get_private(struct net_device *dev,
5159 struct net_device *lower_dev)
5160{
5161 struct netdev_adjacent *lower;
5162
5163 if (!lower_dev)
5164 return NULL;
5165 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5166 if (!lower)
5167 return NULL;
5168
5169 return lower->private;
5170}
5171EXPORT_SYMBOL(netdev_lower_dev_get_private);
5172
4085ebe8
VY
5173
5174int dev_get_nest_level(struct net_device *dev,
5175 bool (*type_check)(struct net_device *dev))
5176{
5177 struct net_device *lower = NULL;
5178 struct list_head *iter;
5179 int max_nest = -1;
5180 int nest;
5181
5182 ASSERT_RTNL();
5183
5184 netdev_for_each_lower_dev(dev, lower, iter) {
5185 nest = dev_get_nest_level(lower, type_check);
5186 if (max_nest < nest)
5187 max_nest = nest;
5188 }
5189
5190 if (type_check(dev))
5191 max_nest++;
5192
5193 return max_nest;
5194}
5195EXPORT_SYMBOL(dev_get_nest_level);
5196
b6c40d68
PM
5197static void dev_change_rx_flags(struct net_device *dev, int flags)
5198{
d314774c
SH
5199 const struct net_device_ops *ops = dev->netdev_ops;
5200
d2615bf4 5201 if (ops->ndo_change_rx_flags)
d314774c 5202 ops->ndo_change_rx_flags(dev, flags);
b6c40d68
PM
5203}
5204
991fb3f7 5205static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
1da177e4 5206{
b536db93 5207 unsigned int old_flags = dev->flags;
d04a48b0
EB
5208 kuid_t uid;
5209 kgid_t gid;
1da177e4 5210
24023451
PM
5211 ASSERT_RTNL();
5212
dad9b335
WC
5213 dev->flags |= IFF_PROMISC;
5214 dev->promiscuity += inc;
5215 if (dev->promiscuity == 0) {
5216 /*
5217 * Avoid overflow.
5218 * If inc causes overflow, untouch promisc and return error.
5219 */
5220 if (inc < 0)
5221 dev->flags &= ~IFF_PROMISC;
5222 else {
5223 dev->promiscuity -= inc;
7b6cd1ce
JP
5224 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5225 dev->name);
dad9b335
WC
5226 return -EOVERFLOW;
5227 }
5228 }
52609c0b 5229 if (dev->flags != old_flags) {
7b6cd1ce
JP
5230 pr_info("device %s %s promiscuous mode\n",
5231 dev->name,
5232 dev->flags & IFF_PROMISC ? "entered" : "left");
8192b0c4
DH
5233 if (audit_enabled) {
5234 current_uid_gid(&uid, &gid);
7759db82
KHK
5235 audit_log(current->audit_context, GFP_ATOMIC,
5236 AUDIT_ANOM_PROMISCUOUS,
5237 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5238 dev->name, (dev->flags & IFF_PROMISC),
5239 (old_flags & IFF_PROMISC),
e1760bd5 5240 from_kuid(&init_user_ns, audit_get_loginuid(current)),
d04a48b0
EB
5241 from_kuid(&init_user_ns, uid),
5242 from_kgid(&init_user_ns, gid),
7759db82 5243 audit_get_sessionid(current));
8192b0c4 5244 }
24023451 5245
b6c40d68 5246 dev_change_rx_flags(dev, IFF_PROMISC);
1da177e4 5247 }
991fb3f7
ND
5248 if (notify)
5249 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
dad9b335 5250 return 0;
1da177e4
LT
5251}
5252
4417da66
PM
5253/**
5254 * dev_set_promiscuity - update promiscuity count on a device
5255 * @dev: device
5256 * @inc: modifier
5257 *
5258 * Add or remove promiscuity from a device. While the count in the device
5259 * remains above zero the interface remains promiscuous. Once it hits zero
5260 * the device reverts back to normal filtering operation. A negative inc
5261 * value is used to drop promiscuity on the device.
dad9b335 5262 * Return 0 if successful or a negative errno code on error.
4417da66 5263 */
dad9b335 5264int dev_set_promiscuity(struct net_device *dev, int inc)
4417da66 5265{
b536db93 5266 unsigned int old_flags = dev->flags;
dad9b335 5267 int err;
4417da66 5268
991fb3f7 5269 err = __dev_set_promiscuity(dev, inc, true);
4b5a698e 5270 if (err < 0)
dad9b335 5271 return err;
4417da66
PM
5272 if (dev->flags != old_flags)
5273 dev_set_rx_mode(dev);
dad9b335 5274 return err;
4417da66 5275}
d1b19dff 5276EXPORT_SYMBOL(dev_set_promiscuity);
4417da66 5277
991fb3f7 5278static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
1da177e4 5279{
991fb3f7 5280 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
1da177e4 5281
24023451
PM
5282 ASSERT_RTNL();
5283
1da177e4 5284 dev->flags |= IFF_ALLMULTI;
dad9b335
WC
5285 dev->allmulti += inc;
5286 if (dev->allmulti == 0) {
5287 /*
5288 * Avoid overflow.
5289 * If inc causes overflow, untouch allmulti and return error.
5290 */
5291 if (inc < 0)
5292 dev->flags &= ~IFF_ALLMULTI;
5293 else {
5294 dev->allmulti -= inc;
7b6cd1ce
JP
5295 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5296 dev->name);
dad9b335
WC
5297 return -EOVERFLOW;
5298 }
5299 }
24023451 5300 if (dev->flags ^ old_flags) {
b6c40d68 5301 dev_change_rx_flags(dev, IFF_ALLMULTI);
4417da66 5302 dev_set_rx_mode(dev);
991fb3f7
ND
5303 if (notify)
5304 __dev_notify_flags(dev, old_flags,
5305 dev->gflags ^ old_gflags);
24023451 5306 }
dad9b335 5307 return 0;
4417da66 5308}
991fb3f7
ND
5309
5310/**
5311 * dev_set_allmulti - update allmulti count on a device
5312 * @dev: device
5313 * @inc: modifier
5314 *
5315 * Add or remove reception of all multicast frames to a device. While the
5316 * count in the device remains above zero the interface remains listening
5317 * to all interfaces. Once it hits zero the device reverts back to normal
5318 * filtering operation. A negative @inc value is used to drop the counter
5319 * when releasing a resource needing all multicasts.
5320 * Return 0 if successful or a negative errno code on error.
5321 */
5322
5323int dev_set_allmulti(struct net_device *dev, int inc)
5324{
5325 return __dev_set_allmulti(dev, inc, true);
5326}
d1b19dff 5327EXPORT_SYMBOL(dev_set_allmulti);
4417da66
PM
5328
5329/*
5330 * Upload unicast and multicast address lists to device and
5331 * configure RX filtering. When the device doesn't support unicast
53ccaae1 5332 * filtering it is put in promiscuous mode while unicast addresses
4417da66
PM
5333 * are present.
5334 */
5335void __dev_set_rx_mode(struct net_device *dev)
5336{
d314774c
SH
5337 const struct net_device_ops *ops = dev->netdev_ops;
5338
4417da66
PM
5339 /* dev_open will call this function so the list will stay sane. */
5340 if (!(dev->flags&IFF_UP))
5341 return;
5342
5343 if (!netif_device_present(dev))
40b77c94 5344 return;
4417da66 5345
01789349 5346 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4417da66
PM
5347 /* Unicast addresses changes may only happen under the rtnl,
5348 * therefore calling __dev_set_promiscuity here is safe.
5349 */
32e7bfc4 5350 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
991fb3f7 5351 __dev_set_promiscuity(dev, 1, false);
2d348d1f 5352 dev->uc_promisc = true;
32e7bfc4 5353 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
991fb3f7 5354 __dev_set_promiscuity(dev, -1, false);
2d348d1f 5355 dev->uc_promisc = false;
4417da66 5356 }
4417da66 5357 }
01789349
JP
5358
5359 if (ops->ndo_set_rx_mode)
5360 ops->ndo_set_rx_mode(dev);
4417da66
PM
5361}
5362
5363void dev_set_rx_mode(struct net_device *dev)
5364{
b9e40857 5365 netif_addr_lock_bh(dev);
4417da66 5366 __dev_set_rx_mode(dev);
b9e40857 5367 netif_addr_unlock_bh(dev);
1da177e4
LT
5368}
5369
f0db275a
SH
5370/**
5371 * dev_get_flags - get flags reported to userspace
5372 * @dev: device
5373 *
5374 * Get the combination of flag bits exported through APIs to userspace.
5375 */
95c96174 5376unsigned int dev_get_flags(const struct net_device *dev)
1da177e4 5377{
95c96174 5378 unsigned int flags;
1da177e4
LT
5379
5380 flags = (dev->flags & ~(IFF_PROMISC |
5381 IFF_ALLMULTI |
b00055aa
SR
5382 IFF_RUNNING |
5383 IFF_LOWER_UP |
5384 IFF_DORMANT)) |
1da177e4
LT
5385 (dev->gflags & (IFF_PROMISC |
5386 IFF_ALLMULTI));
5387
b00055aa
SR
5388 if (netif_running(dev)) {
5389 if (netif_oper_up(dev))
5390 flags |= IFF_RUNNING;
5391 if (netif_carrier_ok(dev))
5392 flags |= IFF_LOWER_UP;
5393 if (netif_dormant(dev))
5394 flags |= IFF_DORMANT;
5395 }
1da177e4
LT
5396
5397 return flags;
5398}
d1b19dff 5399EXPORT_SYMBOL(dev_get_flags);
1da177e4 5400
bd380811 5401int __dev_change_flags(struct net_device *dev, unsigned int flags)
1da177e4 5402{
b536db93 5403 unsigned int old_flags = dev->flags;
bd380811 5404 int ret;
1da177e4 5405
24023451
PM
5406 ASSERT_RTNL();
5407
1da177e4
LT
5408 /*
5409 * Set the flags on our device.
5410 */
5411
5412 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5413 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5414 IFF_AUTOMEDIA)) |
5415 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5416 IFF_ALLMULTI));
5417
5418 /*
5419 * Load in the correct multicast list now the flags have changed.
5420 */
5421
b6c40d68
PM
5422 if ((old_flags ^ flags) & IFF_MULTICAST)
5423 dev_change_rx_flags(dev, IFF_MULTICAST);
24023451 5424
4417da66 5425 dev_set_rx_mode(dev);
1da177e4
LT
5426
5427 /*
5428 * Have we downed the interface. We handle IFF_UP ourselves
5429 * according to user attempts to set it, rather than blindly
5430 * setting it.
5431 */
5432
5433 ret = 0;
5434 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
bd380811 5435 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
1da177e4
LT
5436
5437 if (!ret)
4417da66 5438 dev_set_rx_mode(dev);
1da177e4
LT
5439 }
5440
1da177e4 5441 if ((flags ^ dev->gflags) & IFF_PROMISC) {
d1b19dff 5442 int inc = (flags & IFF_PROMISC) ? 1 : -1;
991fb3f7 5443 unsigned int old_flags = dev->flags;
d1b19dff 5444
1da177e4 5445 dev->gflags ^= IFF_PROMISC;
991fb3f7
ND
5446
5447 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5448 if (dev->flags != old_flags)
5449 dev_set_rx_mode(dev);
1da177e4
LT
5450 }
5451
5452 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5453 is important. Some (broken) drivers set IFF_PROMISC, when
5454 IFF_ALLMULTI is requested not asking us and not reporting.
5455 */
5456 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
d1b19dff
ED
5457 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5458
1da177e4 5459 dev->gflags ^= IFF_ALLMULTI;
991fb3f7 5460 __dev_set_allmulti(dev, inc, false);
1da177e4
LT
5461 }
5462
bd380811
PM
5463 return ret;
5464}
5465
a528c219
ND
5466void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5467 unsigned int gchanges)
bd380811
PM
5468{
5469 unsigned int changes = dev->flags ^ old_flags;
5470
a528c219 5471 if (gchanges)
7f294054 5472 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
a528c219 5473
bd380811
PM
5474 if (changes & IFF_UP) {
5475 if (dev->flags & IFF_UP)
5476 call_netdevice_notifiers(NETDEV_UP, dev);
5477 else
5478 call_netdevice_notifiers(NETDEV_DOWN, dev);
5479 }
5480
5481 if (dev->flags & IFF_UP &&
be9efd36
JP
5482 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5483 struct netdev_notifier_change_info change_info;
5484
5485 change_info.flags_changed = changes;
5486 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5487 &change_info.info);
5488 }
bd380811
PM
5489}
5490
5491/**
5492 * dev_change_flags - change device settings
5493 * @dev: device
5494 * @flags: device state flags
5495 *
5496 * Change settings on device based state flags. The flags are
5497 * in the userspace exported format.
5498 */
b536db93 5499int dev_change_flags(struct net_device *dev, unsigned int flags)
bd380811 5500{
b536db93 5501 int ret;
991fb3f7 5502 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
bd380811
PM
5503
5504 ret = __dev_change_flags(dev, flags);
5505 if (ret < 0)
5506 return ret;
5507
991fb3f7 5508 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
a528c219 5509 __dev_notify_flags(dev, old_flags, changes);
1da177e4
LT
5510 return ret;
5511}
d1b19dff 5512EXPORT_SYMBOL(dev_change_flags);
1da177e4 5513
2315dc91
VF
5514static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5515{
5516 const struct net_device_ops *ops = dev->netdev_ops;
5517
5518 if (ops->ndo_change_mtu)
5519 return ops->ndo_change_mtu(dev, new_mtu);
5520
5521 dev->mtu = new_mtu;
5522 return 0;
5523}
5524
f0db275a
SH
5525/**
5526 * dev_set_mtu - Change maximum transfer unit
5527 * @dev: device
5528 * @new_mtu: new transfer unit
5529 *
5530 * Change the maximum transfer size of the network device.
5531 */
1da177e4
LT
5532int dev_set_mtu(struct net_device *dev, int new_mtu)
5533{
2315dc91 5534 int err, orig_mtu;
1da177e4
LT
5535
5536 if (new_mtu == dev->mtu)
5537 return 0;
5538
5539 /* MTU must be positive. */
5540 if (new_mtu < 0)
5541 return -EINVAL;
5542
5543 if (!netif_device_present(dev))
5544 return -ENODEV;
5545
1d486bfb
VF
5546 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5547 err = notifier_to_errno(err);
5548 if (err)
5549 return err;
d314774c 5550
2315dc91
VF
5551 orig_mtu = dev->mtu;
5552 err = __dev_set_mtu(dev, new_mtu);
d314774c 5553
2315dc91
VF
5554 if (!err) {
5555 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5556 err = notifier_to_errno(err);
5557 if (err) {
5558 /* setting mtu back and notifying everyone again,
5559 * so that they have a chance to revert changes.
5560 */
5561 __dev_set_mtu(dev, orig_mtu);
5562 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5563 }
5564 }
1da177e4
LT
5565 return err;
5566}
d1b19dff 5567EXPORT_SYMBOL(dev_set_mtu);
1da177e4 5568
cbda10fa
VD
5569/**
5570 * dev_set_group - Change group this device belongs to
5571 * @dev: device
5572 * @new_group: group this device should belong to
5573 */
5574void dev_set_group(struct net_device *dev, int new_group)
5575{
5576 dev->group = new_group;
5577}
5578EXPORT_SYMBOL(dev_set_group);
5579
f0db275a
SH
5580/**
5581 * dev_set_mac_address - Change Media Access Control Address
5582 * @dev: device
5583 * @sa: new address
5584 *
5585 * Change the hardware (MAC) address of the device
5586 */
1da177e4
LT
5587int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5588{
d314774c 5589 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
5590 int err;
5591
d314774c 5592 if (!ops->ndo_set_mac_address)
1da177e4
LT
5593 return -EOPNOTSUPP;
5594 if (sa->sa_family != dev->type)
5595 return -EINVAL;
5596 if (!netif_device_present(dev))
5597 return -ENODEV;
d314774c 5598 err = ops->ndo_set_mac_address(dev, sa);
f6521516
JP
5599 if (err)
5600 return err;
fbdeca2d 5601 dev->addr_assign_type = NET_ADDR_SET;
f6521516 5602 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
7bf23575 5603 add_device_randomness(dev->dev_addr, dev->addr_len);
f6521516 5604 return 0;
1da177e4 5605}
d1b19dff 5606EXPORT_SYMBOL(dev_set_mac_address);
1da177e4 5607
4bf84c35
JP
5608/**
5609 * dev_change_carrier - Change device carrier
5610 * @dev: device
691b3b7e 5611 * @new_carrier: new value
4bf84c35
JP
5612 *
5613 * Change device carrier
5614 */
5615int dev_change_carrier(struct net_device *dev, bool new_carrier)
5616{
5617 const struct net_device_ops *ops = dev->netdev_ops;
5618
5619 if (!ops->ndo_change_carrier)
5620 return -EOPNOTSUPP;
5621 if (!netif_device_present(dev))
5622 return -ENODEV;
5623 return ops->ndo_change_carrier(dev, new_carrier);
5624}
5625EXPORT_SYMBOL(dev_change_carrier);
5626
66b52b0d
JP
5627/**
5628 * dev_get_phys_port_id - Get device physical port ID
5629 * @dev: device
5630 * @ppid: port ID
5631 *
5632 * Get device physical port ID
5633 */
5634int dev_get_phys_port_id(struct net_device *dev,
5635 struct netdev_phys_port_id *ppid)
5636{
5637 const struct net_device_ops *ops = dev->netdev_ops;
5638
5639 if (!ops->ndo_get_phys_port_id)
5640 return -EOPNOTSUPP;
5641 return ops->ndo_get_phys_port_id(dev, ppid);
5642}
5643EXPORT_SYMBOL(dev_get_phys_port_id);
5644
1da177e4
LT
5645/**
5646 * dev_new_index - allocate an ifindex
c4ea43c5 5647 * @net: the applicable net namespace
1da177e4
LT
5648 *
5649 * Returns a suitable unique value for a new device interface
5650 * number. The caller must hold the rtnl semaphore or the
5651 * dev_base_lock to be sure it remains unique.
5652 */
881d966b 5653static int dev_new_index(struct net *net)
1da177e4 5654{
aa79e66e 5655 int ifindex = net->ifindex;
1da177e4
LT
5656 for (;;) {
5657 if (++ifindex <= 0)
5658 ifindex = 1;
881d966b 5659 if (!__dev_get_by_index(net, ifindex))
aa79e66e 5660 return net->ifindex = ifindex;
1da177e4
LT
5661 }
5662}
5663
1da177e4 5664/* Delayed registration/unregisteration */
3b5b34fd 5665static LIST_HEAD(net_todo_list);
200b916f 5666DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
1da177e4 5667
6f05f629 5668static void net_set_todo(struct net_device *dev)
1da177e4 5669{
1da177e4 5670 list_add_tail(&dev->todo_list, &net_todo_list);
50624c93 5671 dev_net(dev)->dev_unreg_count++;
1da177e4
LT
5672}
5673
9b5e383c 5674static void rollback_registered_many(struct list_head *head)
93ee31f1 5675{
e93737b0 5676 struct net_device *dev, *tmp;
5cde2829 5677 LIST_HEAD(close_head);
9b5e383c 5678
93ee31f1
DL
5679 BUG_ON(dev_boot_phase);
5680 ASSERT_RTNL();
5681
e93737b0 5682 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
9b5e383c 5683 /* Some devices call without registering
e93737b0
KK
5684 * for initialization unwind. Remove those
5685 * devices and proceed with the remaining.
9b5e383c
ED
5686 */
5687 if (dev->reg_state == NETREG_UNINITIALIZED) {
7b6cd1ce
JP
5688 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5689 dev->name, dev);
93ee31f1 5690
9b5e383c 5691 WARN_ON(1);
e93737b0
KK
5692 list_del(&dev->unreg_list);
5693 continue;
9b5e383c 5694 }
449f4544 5695 dev->dismantle = true;
9b5e383c 5696 BUG_ON(dev->reg_state != NETREG_REGISTERED);
44345724 5697 }
93ee31f1 5698
44345724 5699 /* If device is running, close it first. */
5cde2829
EB
5700 list_for_each_entry(dev, head, unreg_list)
5701 list_add_tail(&dev->close_list, &close_head);
5702 dev_close_many(&close_head);
93ee31f1 5703
44345724 5704 list_for_each_entry(dev, head, unreg_list) {
9b5e383c
ED
5705 /* And unlink it from device chain. */
5706 unlist_netdevice(dev);
93ee31f1 5707
9b5e383c
ED
5708 dev->reg_state = NETREG_UNREGISTERING;
5709 }
93ee31f1
DL
5710
5711 synchronize_net();
5712
9b5e383c
ED
5713 list_for_each_entry(dev, head, unreg_list) {
5714 /* Shutdown queueing discipline. */
5715 dev_shutdown(dev);
93ee31f1
DL
5716
5717
9b5e383c
ED
5718 /* Notify protocols, that we are about to destroy
5719 this device. They should clean all the things.
5720 */
5721 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
93ee31f1 5722
9b5e383c
ED
5723 /*
5724 * Flush the unicast and multicast chains
5725 */
a748ee24 5726 dev_uc_flush(dev);
22bedad3 5727 dev_mc_flush(dev);
93ee31f1 5728
9b5e383c
ED
5729 if (dev->netdev_ops->ndo_uninit)
5730 dev->netdev_ops->ndo_uninit(dev);
93ee31f1 5731
56bfa7ee
RP
5732 if (!dev->rtnl_link_ops ||
5733 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5734 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
5735
9ff162a8
JP
5736 /* Notifier chain MUST detach us all upper devices. */
5737 WARN_ON(netdev_has_any_upper_dev(dev));
93ee31f1 5738
9b5e383c
ED
5739 /* Remove entries from kobject tree */
5740 netdev_unregister_kobject(dev);
024e9679
AD
5741#ifdef CONFIG_XPS
5742 /* Remove XPS queueing entries */
5743 netif_reset_xps_queues_gt(dev, 0);
5744#endif
9b5e383c 5745 }
93ee31f1 5746
850a545b 5747 synchronize_net();
395264d5 5748
a5ee1551 5749 list_for_each_entry(dev, head, unreg_list)
9b5e383c
ED
5750 dev_put(dev);
5751}
5752
5753static void rollback_registered(struct net_device *dev)
5754{
5755 LIST_HEAD(single);
5756
5757 list_add(&dev->unreg_list, &single);
5758 rollback_registered_many(&single);
ceaaec98 5759 list_del(&single);
93ee31f1
DL
5760}
5761
c8f44aff
MM
5762static netdev_features_t netdev_fix_features(struct net_device *dev,
5763 netdev_features_t features)
b63365a2 5764{
57422dc5
MM
5765 /* Fix illegal checksum combinations */
5766 if ((features & NETIF_F_HW_CSUM) &&
5767 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 5768 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
57422dc5
MM
5769 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5770 }
5771
b63365a2 5772 /* TSO requires that SG is present as well. */
ea2d3688 5773 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6f404e44 5774 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
ea2d3688 5775 features &= ~NETIF_F_ALL_TSO;
b63365a2
HX
5776 }
5777
ec5f0615
PS
5778 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5779 !(features & NETIF_F_IP_CSUM)) {
5780 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5781 features &= ~NETIF_F_TSO;
5782 features &= ~NETIF_F_TSO_ECN;
5783 }
5784
5785 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5786 !(features & NETIF_F_IPV6_CSUM)) {
5787 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5788 features &= ~NETIF_F_TSO6;
5789 }
5790
31d8b9e0
BH
5791 /* TSO ECN requires that TSO is present as well. */
5792 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5793 features &= ~NETIF_F_TSO_ECN;
5794
212b573f
MM
5795 /* Software GSO depends on SG. */
5796 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6f404e44 5797 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
212b573f
MM
5798 features &= ~NETIF_F_GSO;
5799 }
5800
acd1130e 5801 /* UFO needs SG and checksumming */
b63365a2 5802 if (features & NETIF_F_UFO) {
79032644
MM
5803 /* maybe split UFO into V4 and V6? */
5804 if (!((features & NETIF_F_GEN_CSUM) ||
5805 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5806 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6f404e44 5807 netdev_dbg(dev,
acd1130e 5808 "Dropping NETIF_F_UFO since no checksum offload features.\n");
b63365a2
HX
5809 features &= ~NETIF_F_UFO;
5810 }
5811
5812 if (!(features & NETIF_F_SG)) {
6f404e44 5813 netdev_dbg(dev,
acd1130e 5814 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
b63365a2
HX
5815 features &= ~NETIF_F_UFO;
5816 }
5817 }
5818
d0290214
JP
5819#ifdef CONFIG_NET_RX_BUSY_POLL
5820 if (dev->netdev_ops->ndo_busy_poll)
5821 features |= NETIF_F_BUSY_POLL;
5822 else
5823#endif
5824 features &= ~NETIF_F_BUSY_POLL;
5825
b63365a2
HX
5826 return features;
5827}
b63365a2 5828
6cb6a27c 5829int __netdev_update_features(struct net_device *dev)
5455c699 5830{
c8f44aff 5831 netdev_features_t features;
5455c699
MM
5832 int err = 0;
5833
87267485
MM
5834 ASSERT_RTNL();
5835
5455c699
MM
5836 features = netdev_get_wanted_features(dev);
5837
5838 if (dev->netdev_ops->ndo_fix_features)
5839 features = dev->netdev_ops->ndo_fix_features(dev, features);
5840
5841 /* driver might be less strict about feature dependencies */
5842 features = netdev_fix_features(dev, features);
5843
5844 if (dev->features == features)
6cb6a27c 5845 return 0;
5455c699 5846
c8f44aff
MM
5847 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5848 &dev->features, &features);
5455c699
MM
5849
5850 if (dev->netdev_ops->ndo_set_features)
5851 err = dev->netdev_ops->ndo_set_features(dev, features);
5852
6cb6a27c 5853 if (unlikely(err < 0)) {
5455c699 5854 netdev_err(dev,
c8f44aff
MM
5855 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5856 err, &features, &dev->features);
6cb6a27c
MM
5857 return -1;
5858 }
5859
5860 if (!err)
5861 dev->features = features;
5862
5863 return 1;
5864}
5865
afe12cc8
MM
5866/**
5867 * netdev_update_features - recalculate device features
5868 * @dev: the device to check
5869 *
5870 * Recalculate dev->features set and send notifications if it
5871 * has changed. Should be called after driver or hardware dependent
5872 * conditions might have changed that influence the features.
5873 */
6cb6a27c
MM
5874void netdev_update_features(struct net_device *dev)
5875{
5876 if (__netdev_update_features(dev))
5877 netdev_features_change(dev);
5455c699
MM
5878}
5879EXPORT_SYMBOL(netdev_update_features);
5880
afe12cc8
MM
5881/**
5882 * netdev_change_features - recalculate device features
5883 * @dev: the device to check
5884 *
5885 * Recalculate dev->features set and send notifications even
5886 * if they have not changed. Should be called instead of
5887 * netdev_update_features() if also dev->vlan_features might
5888 * have changed to allow the changes to be propagated to stacked
5889 * VLAN devices.
5890 */
5891void netdev_change_features(struct net_device *dev)
5892{
5893 __netdev_update_features(dev);
5894 netdev_features_change(dev);
5895}
5896EXPORT_SYMBOL(netdev_change_features);
5897
fc4a7489
PM
5898/**
5899 * netif_stacked_transfer_operstate - transfer operstate
5900 * @rootdev: the root or lower level device to transfer state from
5901 * @dev: the device to transfer operstate to
5902 *
5903 * Transfer operational state from root to device. This is normally
5904 * called when a stacking relationship exists between the root
5905 * device and the device(a leaf device).
5906 */
5907void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5908 struct net_device *dev)
5909{
5910 if (rootdev->operstate == IF_OPER_DORMANT)
5911 netif_dormant_on(dev);
5912 else
5913 netif_dormant_off(dev);
5914
5915 if (netif_carrier_ok(rootdev)) {
5916 if (!netif_carrier_ok(dev))
5917 netif_carrier_on(dev);
5918 } else {
5919 if (netif_carrier_ok(dev))
5920 netif_carrier_off(dev);
5921 }
5922}
5923EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5924
a953be53 5925#ifdef CONFIG_SYSFS
1b4bf461
ED
5926static int netif_alloc_rx_queues(struct net_device *dev)
5927{
1b4bf461 5928 unsigned int i, count = dev->num_rx_queues;
bd25fa7b 5929 struct netdev_rx_queue *rx;
1b4bf461 5930
bd25fa7b 5931 BUG_ON(count < 1);
1b4bf461 5932
bd25fa7b 5933 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
62b5942a 5934 if (!rx)
bd25fa7b 5935 return -ENOMEM;
62b5942a 5936
bd25fa7b
TH
5937 dev->_rx = rx;
5938
bd25fa7b 5939 for (i = 0; i < count; i++)
fe822240 5940 rx[i].dev = dev;
1b4bf461
ED
5941 return 0;
5942}
bf264145 5943#endif
1b4bf461 5944
aa942104
CG
5945static void netdev_init_one_queue(struct net_device *dev,
5946 struct netdev_queue *queue, void *_unused)
5947{
5948 /* Initialize queue lock */
5949 spin_lock_init(&queue->_xmit_lock);
5950 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5951 queue->xmit_lock_owner = -1;
b236da69 5952 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
aa942104 5953 queue->dev = dev;
114cf580
TH
5954#ifdef CONFIG_BQL
5955 dql_init(&queue->dql, HZ);
5956#endif
aa942104
CG
5957}
5958
60877a32
ED
5959static void netif_free_tx_queues(struct net_device *dev)
5960{
4cb28970 5961 kvfree(dev->_tx);
60877a32
ED
5962}
5963
e6484930
TH
5964static int netif_alloc_netdev_queues(struct net_device *dev)
5965{
5966 unsigned int count = dev->num_tx_queues;
5967 struct netdev_queue *tx;
60877a32 5968 size_t sz = count * sizeof(*tx);
e6484930 5969
60877a32 5970 BUG_ON(count < 1 || count > 0xffff);
62b5942a 5971
60877a32
ED
5972 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
5973 if (!tx) {
5974 tx = vzalloc(sz);
5975 if (!tx)
5976 return -ENOMEM;
5977 }
e6484930 5978 dev->_tx = tx;
1d24eb48 5979
e6484930
TH
5980 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5981 spin_lock_init(&dev->tx_global_lock);
aa942104
CG
5982
5983 return 0;
e6484930
TH
5984}
5985
1da177e4
LT
5986/**
5987 * register_netdevice - register a network device
5988 * @dev: device to register
5989 *
5990 * Take a completed network device structure and add it to the kernel
5991 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5992 * chain. 0 is returned on success. A negative errno code is returned
5993 * on a failure to set up the device, or if the name is a duplicate.
5994 *
5995 * Callers must hold the rtnl semaphore. You may want
5996 * register_netdev() instead of this.
5997 *
5998 * BUGS:
5999 * The locking appears insufficient to guarantee two parallel registers
6000 * will not get the same name.
6001 */
6002
6003int register_netdevice(struct net_device *dev)
6004{
1da177e4 6005 int ret;
d314774c 6006 struct net *net = dev_net(dev);
1da177e4
LT
6007
6008 BUG_ON(dev_boot_phase);
6009 ASSERT_RTNL();
6010
b17a7c17
SH
6011 might_sleep();
6012
1da177e4
LT
6013 /* When net_device's are persistent, this will be fatal. */
6014 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
d314774c 6015 BUG_ON(!net);
1da177e4 6016
f1f28aa3 6017 spin_lock_init(&dev->addr_list_lock);
cf508b12 6018 netdev_set_addr_lockdep_class(dev);
1da177e4 6019
1da177e4
LT
6020 dev->iflink = -1;
6021
828de4f6 6022 ret = dev_get_valid_name(net, dev, dev->name);
0696c3a8
PP
6023 if (ret < 0)
6024 goto out;
6025
1da177e4 6026 /* Init, if this function is available */
d314774c
SH
6027 if (dev->netdev_ops->ndo_init) {
6028 ret = dev->netdev_ops->ndo_init(dev);
1da177e4
LT
6029 if (ret) {
6030 if (ret > 0)
6031 ret = -EIO;
90833aa4 6032 goto out;
1da177e4
LT
6033 }
6034 }
4ec93edb 6035
f646968f
PM
6036 if (((dev->hw_features | dev->features) &
6037 NETIF_F_HW_VLAN_CTAG_FILTER) &&
d2ed273d
MM
6038 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6039 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6040 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6041 ret = -EINVAL;
6042 goto err_uninit;
6043 }
6044
9c7dafbf
PE
6045 ret = -EBUSY;
6046 if (!dev->ifindex)
6047 dev->ifindex = dev_new_index(net);
6048 else if (__dev_get_by_index(net, dev->ifindex))
6049 goto err_uninit;
6050
1da177e4
LT
6051 if (dev->iflink == -1)
6052 dev->iflink = dev->ifindex;
6053
5455c699
MM
6054 /* Transfer changeable features to wanted_features and enable
6055 * software offloads (GSO and GRO).
6056 */
6057 dev->hw_features |= NETIF_F_SOFT_FEATURES;
14d1232f
MM
6058 dev->features |= NETIF_F_SOFT_FEATURES;
6059 dev->wanted_features = dev->features & dev->hw_features;
1da177e4 6060
34324dc2
MM
6061 if (!(dev->flags & IFF_LOOPBACK)) {
6062 dev->hw_features |= NETIF_F_NOCACHE_COPY;
c6e1a0d1
TH
6063 }
6064
1180e7d6 6065 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
16c3ea78 6066 */
1180e7d6 6067 dev->vlan_features |= NETIF_F_HIGHDMA;
16c3ea78 6068
ee579677
PS
6069 /* Make NETIF_F_SG inheritable to tunnel devices.
6070 */
6071 dev->hw_enc_features |= NETIF_F_SG;
6072
0d89d203
SH
6073 /* Make NETIF_F_SG inheritable to MPLS.
6074 */
6075 dev->mpls_features |= NETIF_F_SG;
6076
7ffbe3fd
JB
6077 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6078 ret = notifier_to_errno(ret);
6079 if (ret)
6080 goto err_uninit;
6081
8b41d188 6082 ret = netdev_register_kobject(dev);
b17a7c17 6083 if (ret)
7ce1b0ed 6084 goto err_uninit;
b17a7c17
SH
6085 dev->reg_state = NETREG_REGISTERED;
6086
6cb6a27c 6087 __netdev_update_features(dev);
8e9b59b2 6088
1da177e4
LT
6089 /*
6090 * Default initial state at registry is that the
6091 * device is present.
6092 */
6093
6094 set_bit(__LINK_STATE_PRESENT, &dev->state);
6095
8f4cccbb
BH
6096 linkwatch_init_dev(dev);
6097
1da177e4 6098 dev_init_scheduler(dev);
1da177e4 6099 dev_hold(dev);
ce286d32 6100 list_netdevice(dev);
7bf23575 6101 add_device_randomness(dev->dev_addr, dev->addr_len);
1da177e4 6102
948b337e
JP
6103 /* If the device has permanent device address, driver should
6104 * set dev_addr and also addr_assign_type should be set to
6105 * NET_ADDR_PERM (default value).
6106 */
6107 if (dev->addr_assign_type == NET_ADDR_PERM)
6108 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6109
1da177e4 6110 /* Notify protocols, that a new device appeared. */
056925ab 6111 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
fcc5a03a 6112 ret = notifier_to_errno(ret);
93ee31f1
DL
6113 if (ret) {
6114 rollback_registered(dev);
6115 dev->reg_state = NETREG_UNREGISTERED;
6116 }
d90a909e
EB
6117 /*
6118 * Prevent userspace races by waiting until the network
6119 * device is fully setup before sending notifications.
6120 */
a2835763
PM
6121 if (!dev->rtnl_link_ops ||
6122 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
7f294054 6123 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
1da177e4
LT
6124
6125out:
6126 return ret;
7ce1b0ed
HX
6127
6128err_uninit:
d314774c
SH
6129 if (dev->netdev_ops->ndo_uninit)
6130 dev->netdev_ops->ndo_uninit(dev);
7ce1b0ed 6131 goto out;
1da177e4 6132}
d1b19dff 6133EXPORT_SYMBOL(register_netdevice);
1da177e4 6134
937f1ba5
BH
6135/**
6136 * init_dummy_netdev - init a dummy network device for NAPI
6137 * @dev: device to init
6138 *
6139 * This takes a network device structure and initialize the minimum
6140 * amount of fields so it can be used to schedule NAPI polls without
6141 * registering a full blown interface. This is to be used by drivers
6142 * that need to tie several hardware interfaces to a single NAPI
6143 * poll scheduler due to HW limitations.
6144 */
6145int init_dummy_netdev(struct net_device *dev)
6146{
6147 /* Clear everything. Note we don't initialize spinlocks
6148 * are they aren't supposed to be taken by any of the
6149 * NAPI code and this dummy netdev is supposed to be
6150 * only ever used for NAPI polls
6151 */
6152 memset(dev, 0, sizeof(struct net_device));
6153
6154 /* make sure we BUG if trying to hit standard
6155 * register/unregister code path
6156 */
6157 dev->reg_state = NETREG_DUMMY;
6158
937f1ba5
BH
6159 /* NAPI wants this */
6160 INIT_LIST_HEAD(&dev->napi_list);
6161
6162 /* a dummy interface is started by default */
6163 set_bit(__LINK_STATE_PRESENT, &dev->state);
6164 set_bit(__LINK_STATE_START, &dev->state);
6165
29b4433d
ED
6166 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6167 * because users of this 'device' dont need to change
6168 * its refcount.
6169 */
6170
937f1ba5
BH
6171 return 0;
6172}
6173EXPORT_SYMBOL_GPL(init_dummy_netdev);
6174
6175
1da177e4
LT
6176/**
6177 * register_netdev - register a network device
6178 * @dev: device to register
6179 *
6180 * Take a completed network device structure and add it to the kernel
6181 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6182 * chain. 0 is returned on success. A negative errno code is returned
6183 * on a failure to set up the device, or if the name is a duplicate.
6184 *
38b4da38 6185 * This is a wrapper around register_netdevice that takes the rtnl semaphore
1da177e4
LT
6186 * and expands the device name if you passed a format string to
6187 * alloc_netdev.
6188 */
6189int register_netdev(struct net_device *dev)
6190{
6191 int err;
6192
6193 rtnl_lock();
1da177e4 6194 err = register_netdevice(dev);
1da177e4
LT
6195 rtnl_unlock();
6196 return err;
6197}
6198EXPORT_SYMBOL(register_netdev);
6199
29b4433d
ED
6200int netdev_refcnt_read(const struct net_device *dev)
6201{
6202 int i, refcnt = 0;
6203
6204 for_each_possible_cpu(i)
6205 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6206 return refcnt;
6207}
6208EXPORT_SYMBOL(netdev_refcnt_read);
6209
2c53040f 6210/**
1da177e4 6211 * netdev_wait_allrefs - wait until all references are gone.
3de7a37b 6212 * @dev: target net_device
1da177e4
LT
6213 *
6214 * This is called when unregistering network devices.
6215 *
6216 * Any protocol or device that holds a reference should register
6217 * for netdevice notification, and cleanup and put back the
6218 * reference if they receive an UNREGISTER event.
6219 * We can get stuck here if buggy protocols don't correctly
4ec93edb 6220 * call dev_put.
1da177e4
LT
6221 */
6222static void netdev_wait_allrefs(struct net_device *dev)
6223{
6224 unsigned long rebroadcast_time, warning_time;
29b4433d 6225 int refcnt;
1da177e4 6226
e014debe
ED
6227 linkwatch_forget_dev(dev);
6228
1da177e4 6229 rebroadcast_time = warning_time = jiffies;
29b4433d
ED
6230 refcnt = netdev_refcnt_read(dev);
6231
6232 while (refcnt != 0) {
1da177e4 6233 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6756ae4b 6234 rtnl_lock();
1da177e4
LT
6235
6236 /* Rebroadcast unregister notification */
056925ab 6237 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
1da177e4 6238
748e2d93 6239 __rtnl_unlock();
0115e8e3 6240 rcu_barrier();
748e2d93
ED
6241 rtnl_lock();
6242
0115e8e3 6243 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
1da177e4
LT
6244 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6245 &dev->state)) {
6246 /* We must not have linkwatch events
6247 * pending on unregister. If this
6248 * happens, we simply run the queue
6249 * unscheduled, resulting in a noop
6250 * for this device.
6251 */
6252 linkwatch_run_queue();
6253 }
6254
6756ae4b 6255 __rtnl_unlock();
1da177e4
LT
6256
6257 rebroadcast_time = jiffies;
6258 }
6259
6260 msleep(250);
6261
29b4433d
ED
6262 refcnt = netdev_refcnt_read(dev);
6263
1da177e4 6264 if (time_after(jiffies, warning_time + 10 * HZ)) {
7b6cd1ce
JP
6265 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6266 dev->name, refcnt);
1da177e4
LT
6267 warning_time = jiffies;
6268 }
6269 }
6270}
6271
6272/* The sequence is:
6273 *
6274 * rtnl_lock();
6275 * ...
6276 * register_netdevice(x1);
6277 * register_netdevice(x2);
6278 * ...
6279 * unregister_netdevice(y1);
6280 * unregister_netdevice(y2);
6281 * ...
6282 * rtnl_unlock();
6283 * free_netdev(y1);
6284 * free_netdev(y2);
6285 *
58ec3b4d 6286 * We are invoked by rtnl_unlock().
1da177e4 6287 * This allows us to deal with problems:
b17a7c17 6288 * 1) We can delete sysfs objects which invoke hotplug
1da177e4
LT
6289 * without deadlocking with linkwatch via keventd.
6290 * 2) Since we run with the RTNL semaphore not held, we can sleep
6291 * safely in order to wait for the netdev refcnt to drop to zero.
58ec3b4d
HX
6292 *
6293 * We must not return until all unregister events added during
6294 * the interval the lock was held have been completed.
1da177e4 6295 */
1da177e4
LT
6296void netdev_run_todo(void)
6297{
626ab0e6 6298 struct list_head list;
1da177e4 6299
1da177e4 6300 /* Snapshot list, allow later requests */
626ab0e6 6301 list_replace_init(&net_todo_list, &list);
58ec3b4d
HX
6302
6303 __rtnl_unlock();
626ab0e6 6304
0115e8e3
ED
6305
6306 /* Wait for rcu callbacks to finish before next phase */
850a545b
EB
6307 if (!list_empty(&list))
6308 rcu_barrier();
6309
1da177e4
LT
6310 while (!list_empty(&list)) {
6311 struct net_device *dev
e5e26d75 6312 = list_first_entry(&list, struct net_device, todo_list);
1da177e4
LT
6313 list_del(&dev->todo_list);
6314
748e2d93 6315 rtnl_lock();
0115e8e3 6316 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
748e2d93 6317 __rtnl_unlock();
0115e8e3 6318
b17a7c17 6319 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
7b6cd1ce 6320 pr_err("network todo '%s' but state %d\n",
b17a7c17
SH
6321 dev->name, dev->reg_state);
6322 dump_stack();
6323 continue;
6324 }
1da177e4 6325
b17a7c17 6326 dev->reg_state = NETREG_UNREGISTERED;
1da177e4 6327
152102c7 6328 on_each_cpu(flush_backlog, dev, 1);
6e583ce5 6329
b17a7c17 6330 netdev_wait_allrefs(dev);
1da177e4 6331
b17a7c17 6332 /* paranoia */
29b4433d 6333 BUG_ON(netdev_refcnt_read(dev));
33d480ce
ED
6334 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6335 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
547b792c 6336 WARN_ON(dev->dn_ptr);
1da177e4 6337
b17a7c17
SH
6338 if (dev->destructor)
6339 dev->destructor(dev);
9093bbb2 6340
50624c93
EB
6341 /* Report a network device has been unregistered */
6342 rtnl_lock();
6343 dev_net(dev)->dev_unreg_count--;
6344 __rtnl_unlock();
6345 wake_up(&netdev_unregistering_wq);
6346
9093bbb2
SH
6347 /* Free network device */
6348 kobject_put(&dev->dev.kobj);
1da177e4 6349 }
1da177e4
LT
6350}
6351
3cfde79c
BH
6352/* Convert net_device_stats to rtnl_link_stats64. They have the same
6353 * fields in the same order, with only the type differing.
6354 */
77a1abf5
ED
6355void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6356 const struct net_device_stats *netdev_stats)
3cfde79c
BH
6357{
6358#if BITS_PER_LONG == 64
77a1abf5
ED
6359 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6360 memcpy(stats64, netdev_stats, sizeof(*stats64));
3cfde79c
BH
6361#else
6362 size_t i, n = sizeof(*stats64) / sizeof(u64);
6363 const unsigned long *src = (const unsigned long *)netdev_stats;
6364 u64 *dst = (u64 *)stats64;
6365
6366 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6367 sizeof(*stats64) / sizeof(u64));
6368 for (i = 0; i < n; i++)
6369 dst[i] = src[i];
6370#endif
6371}
77a1abf5 6372EXPORT_SYMBOL(netdev_stats_to_stats64);
3cfde79c 6373
eeda3fd6
SH
6374/**
6375 * dev_get_stats - get network device statistics
6376 * @dev: device to get statistics from
28172739 6377 * @storage: place to store stats
eeda3fd6 6378 *
d7753516
BH
6379 * Get network statistics from device. Return @storage.
6380 * The device driver may provide its own method by setting
6381 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6382 * otherwise the internal statistics structure is used.
eeda3fd6 6383 */
d7753516
BH
6384struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6385 struct rtnl_link_stats64 *storage)
7004bf25 6386{
eeda3fd6
SH
6387 const struct net_device_ops *ops = dev->netdev_ops;
6388
28172739
ED
6389 if (ops->ndo_get_stats64) {
6390 memset(storage, 0, sizeof(*storage));
caf586e5
ED
6391 ops->ndo_get_stats64(dev, storage);
6392 } else if (ops->ndo_get_stats) {
3cfde79c 6393 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
caf586e5
ED
6394 } else {
6395 netdev_stats_to_stats64(storage, &dev->stats);
28172739 6396 }
caf586e5 6397 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
015f0688 6398 storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
28172739 6399 return storage;
c45d286e 6400}
eeda3fd6 6401EXPORT_SYMBOL(dev_get_stats);
c45d286e 6402
24824a09 6403struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
dc2b4847 6404{
24824a09 6405 struct netdev_queue *queue = dev_ingress_queue(dev);
dc2b4847 6406
24824a09
ED
6407#ifdef CONFIG_NET_CLS_ACT
6408 if (queue)
6409 return queue;
6410 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6411 if (!queue)
6412 return NULL;
6413 netdev_init_one_queue(dev, queue, NULL);
24824a09
ED
6414 queue->qdisc = &noop_qdisc;
6415 queue->qdisc_sleeping = &noop_qdisc;
6416 rcu_assign_pointer(dev->ingress_queue, queue);
6417#endif
6418 return queue;
bb949fbd
DM
6419}
6420
2c60db03
ED
6421static const struct ethtool_ops default_ethtool_ops;
6422
d07d7507
SG
6423void netdev_set_default_ethtool_ops(struct net_device *dev,
6424 const struct ethtool_ops *ops)
6425{
6426 if (dev->ethtool_ops == &default_ethtool_ops)
6427 dev->ethtool_ops = ops;
6428}
6429EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6430
74d332c1
ED
6431void netdev_freemem(struct net_device *dev)
6432{
6433 char *addr = (char *)dev - dev->padded;
6434
4cb28970 6435 kvfree(addr);
74d332c1
ED
6436}
6437
1da177e4 6438/**
36909ea4 6439 * alloc_netdev_mqs - allocate network device
1da177e4
LT
6440 * @sizeof_priv: size of private data to allocate space for
6441 * @name: device name format string
6442 * @setup: callback to initialize device
36909ea4
TH
6443 * @txqs: the number of TX subqueues to allocate
6444 * @rxqs: the number of RX subqueues to allocate
1da177e4
LT
6445 *
6446 * Allocates a struct net_device with private data area for driver use
90e51adf 6447 * and performs basic initialization. Also allocates subqueue structs
36909ea4 6448 * for each queue on the device.
1da177e4 6449 */
36909ea4
TH
6450struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6451 void (*setup)(struct net_device *),
6452 unsigned int txqs, unsigned int rxqs)
1da177e4 6453{
1da177e4 6454 struct net_device *dev;
7943986c 6455 size_t alloc_size;
1ce8e7b5 6456 struct net_device *p;
1da177e4 6457
b6fe17d6
SH
6458 BUG_ON(strlen(name) >= sizeof(dev->name));
6459
36909ea4 6460 if (txqs < 1) {
7b6cd1ce 6461 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
55513fb4
TH
6462 return NULL;
6463 }
6464
a953be53 6465#ifdef CONFIG_SYSFS
36909ea4 6466 if (rxqs < 1) {
7b6cd1ce 6467 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
36909ea4
TH
6468 return NULL;
6469 }
6470#endif
6471
fd2ea0a7 6472 alloc_size = sizeof(struct net_device);
d1643d24
AD
6473 if (sizeof_priv) {
6474 /* ensure 32-byte alignment of private area */
1ce8e7b5 6475 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
d1643d24
AD
6476 alloc_size += sizeof_priv;
6477 }
6478 /* ensure 32-byte alignment of whole construct */
1ce8e7b5 6479 alloc_size += NETDEV_ALIGN - 1;
1da177e4 6480
74d332c1
ED
6481 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6482 if (!p)
6483 p = vzalloc(alloc_size);
62b5942a 6484 if (!p)
1da177e4 6485 return NULL;
1da177e4 6486
1ce8e7b5 6487 dev = PTR_ALIGN(p, NETDEV_ALIGN);
1da177e4 6488 dev->padded = (char *)dev - (char *)p;
ab9c73cc 6489
29b4433d
ED
6490 dev->pcpu_refcnt = alloc_percpu(int);
6491 if (!dev->pcpu_refcnt)
74d332c1 6492 goto free_dev;
ab9c73cc 6493
ab9c73cc 6494 if (dev_addr_init(dev))
29b4433d 6495 goto free_pcpu;
ab9c73cc 6496
22bedad3 6497 dev_mc_init(dev);
a748ee24 6498 dev_uc_init(dev);
ccffad25 6499
c346dca1 6500 dev_net_set(dev, &init_net);
1da177e4 6501
8d3bdbd5 6502 dev->gso_max_size = GSO_MAX_SIZE;
30b678d8 6503 dev->gso_max_segs = GSO_MAX_SEGS;
8d3bdbd5 6504
8d3bdbd5
DM
6505 INIT_LIST_HEAD(&dev->napi_list);
6506 INIT_LIST_HEAD(&dev->unreg_list);
5cde2829 6507 INIT_LIST_HEAD(&dev->close_list);
8d3bdbd5 6508 INIT_LIST_HEAD(&dev->link_watch_list);
2f268f12
VF
6509 INIT_LIST_HEAD(&dev->adj_list.upper);
6510 INIT_LIST_HEAD(&dev->adj_list.lower);
6511 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6512 INIT_LIST_HEAD(&dev->all_adj_list.lower);
8d3bdbd5
DM
6513 dev->priv_flags = IFF_XMIT_DST_RELEASE;
6514 setup(dev);
6515
36909ea4
TH
6516 dev->num_tx_queues = txqs;
6517 dev->real_num_tx_queues = txqs;
ed9af2e8 6518 if (netif_alloc_netdev_queues(dev))
8d3bdbd5 6519 goto free_all;
e8a0464c 6520
a953be53 6521#ifdef CONFIG_SYSFS
36909ea4
TH
6522 dev->num_rx_queues = rxqs;
6523 dev->real_num_rx_queues = rxqs;
fe822240 6524 if (netif_alloc_rx_queues(dev))
8d3bdbd5 6525 goto free_all;
df334545 6526#endif
0a9627f2 6527
1da177e4 6528 strcpy(dev->name, name);
cbda10fa 6529 dev->group = INIT_NETDEV_GROUP;
2c60db03
ED
6530 if (!dev->ethtool_ops)
6531 dev->ethtool_ops = &default_ethtool_ops;
1da177e4 6532 return dev;
ab9c73cc 6533
8d3bdbd5
DM
6534free_all:
6535 free_netdev(dev);
6536 return NULL;
6537
29b4433d
ED
6538free_pcpu:
6539 free_percpu(dev->pcpu_refcnt);
74d332c1
ED
6540free_dev:
6541 netdev_freemem(dev);
ab9c73cc 6542 return NULL;
1da177e4 6543}
36909ea4 6544EXPORT_SYMBOL(alloc_netdev_mqs);
1da177e4
LT
6545
6546/**
6547 * free_netdev - free network device
6548 * @dev: device
6549 *
4ec93edb
YH
6550 * This function does the last stage of destroying an allocated device
6551 * interface. The reference to the device object is released.
1da177e4
LT
6552 * If this is the last reference then it will be freed.
6553 */
6554void free_netdev(struct net_device *dev)
6555{
d565b0a1
HX
6556 struct napi_struct *p, *n;
6557
f3005d7f
DL
6558 release_net(dev_net(dev));
6559
60877a32 6560 netif_free_tx_queues(dev);
a953be53 6561#ifdef CONFIG_SYSFS
fe822240
TH
6562 kfree(dev->_rx);
6563#endif
e8a0464c 6564
33d480ce 6565 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
24824a09 6566
f001fde5
JP
6567 /* Flush device addresses */
6568 dev_addr_flush(dev);
6569
d565b0a1
HX
6570 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6571 netif_napi_del(p);
6572
29b4433d
ED
6573 free_percpu(dev->pcpu_refcnt);
6574 dev->pcpu_refcnt = NULL;
6575
3041a069 6576 /* Compatibility with error handling in drivers */
1da177e4 6577 if (dev->reg_state == NETREG_UNINITIALIZED) {
74d332c1 6578 netdev_freemem(dev);
1da177e4
LT
6579 return;
6580 }
6581
6582 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6583 dev->reg_state = NETREG_RELEASED;
6584
43cb76d9
GKH
6585 /* will free via device release */
6586 put_device(&dev->dev);
1da177e4 6587}
d1b19dff 6588EXPORT_SYMBOL(free_netdev);
4ec93edb 6589
f0db275a
SH
6590/**
6591 * synchronize_net - Synchronize with packet receive processing
6592 *
6593 * Wait for packets currently being received to be done.
6594 * Does not block later packets from starting.
6595 */
4ec93edb 6596void synchronize_net(void)
1da177e4
LT
6597{
6598 might_sleep();
be3fc413
ED
6599 if (rtnl_is_locked())
6600 synchronize_rcu_expedited();
6601 else
6602 synchronize_rcu();
1da177e4 6603}
d1b19dff 6604EXPORT_SYMBOL(synchronize_net);
1da177e4
LT
6605
6606/**
44a0873d 6607 * unregister_netdevice_queue - remove device from the kernel
1da177e4 6608 * @dev: device
44a0873d 6609 * @head: list
6ebfbc06 6610 *
1da177e4 6611 * This function shuts down a device interface and removes it
d59b54b1 6612 * from the kernel tables.
44a0873d 6613 * If head not NULL, device is queued to be unregistered later.
1da177e4
LT
6614 *
6615 * Callers must hold the rtnl semaphore. You may want
6616 * unregister_netdev() instead of this.
6617 */
6618
44a0873d 6619void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
1da177e4 6620{
a6620712
HX
6621 ASSERT_RTNL();
6622
44a0873d 6623 if (head) {
9fdce099 6624 list_move_tail(&dev->unreg_list, head);
44a0873d
ED
6625 } else {
6626 rollback_registered(dev);
6627 /* Finish processing unregister after unlock */
6628 net_set_todo(dev);
6629 }
1da177e4 6630}
44a0873d 6631EXPORT_SYMBOL(unregister_netdevice_queue);
1da177e4 6632
9b5e383c
ED
6633/**
6634 * unregister_netdevice_many - unregister many devices
6635 * @head: list of devices
87757a91
ED
6636 *
6637 * Note: As most callers use a stack allocated list_head,
6638 * we force a list_del() to make sure stack wont be corrupted later.
9b5e383c
ED
6639 */
6640void unregister_netdevice_many(struct list_head *head)
6641{
6642 struct net_device *dev;
6643
6644 if (!list_empty(head)) {
6645 rollback_registered_many(head);
6646 list_for_each_entry(dev, head, unreg_list)
6647 net_set_todo(dev);
87757a91 6648 list_del(head);
9b5e383c
ED
6649 }
6650}
63c8099d 6651EXPORT_SYMBOL(unregister_netdevice_many);
9b5e383c 6652
1da177e4
LT
6653/**
6654 * unregister_netdev - remove device from the kernel
6655 * @dev: device
6656 *
6657 * This function shuts down a device interface and removes it
d59b54b1 6658 * from the kernel tables.
1da177e4
LT
6659 *
6660 * This is just a wrapper for unregister_netdevice that takes
6661 * the rtnl semaphore. In general you want to use this and not
6662 * unregister_netdevice.
6663 */
6664void unregister_netdev(struct net_device *dev)
6665{
6666 rtnl_lock();
6667 unregister_netdevice(dev);
6668 rtnl_unlock();
6669}
1da177e4
LT
6670EXPORT_SYMBOL(unregister_netdev);
6671
ce286d32
EB
6672/**
6673 * dev_change_net_namespace - move device to different nethost namespace
6674 * @dev: device
6675 * @net: network namespace
6676 * @pat: If not NULL name pattern to try if the current device name
6677 * is already taken in the destination network namespace.
6678 *
6679 * This function shuts down a device interface and moves it
6680 * to a new network namespace. On success 0 is returned, on
6681 * a failure a netagive errno code is returned.
6682 *
6683 * Callers must hold the rtnl semaphore.
6684 */
6685
6686int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6687{
ce286d32
EB
6688 int err;
6689
6690 ASSERT_RTNL();
6691
6692 /* Don't allow namespace local devices to be moved. */
6693 err = -EINVAL;
6694 if (dev->features & NETIF_F_NETNS_LOCAL)
6695 goto out;
6696
6697 /* Ensure the device has been registrered */
ce286d32
EB
6698 if (dev->reg_state != NETREG_REGISTERED)
6699 goto out;
6700
6701 /* Get out if there is nothing todo */
6702 err = 0;
878628fb 6703 if (net_eq(dev_net(dev), net))
ce286d32
EB
6704 goto out;
6705
6706 /* Pick the destination device name, and ensure
6707 * we can use it in the destination network namespace.
6708 */
6709 err = -EEXIST;
d9031024 6710 if (__dev_get_by_name(net, dev->name)) {
ce286d32
EB
6711 /* We get here if we can't use the current device name */
6712 if (!pat)
6713 goto out;
828de4f6 6714 if (dev_get_valid_name(net, dev, pat) < 0)
ce286d32
EB
6715 goto out;
6716 }
6717
6718 /*
6719 * And now a mini version of register_netdevice unregister_netdevice.
6720 */
6721
6722 /* If device is running close it first. */
9b772652 6723 dev_close(dev);
ce286d32
EB
6724
6725 /* And unlink it from device chain */
6726 err = -ENODEV;
6727 unlist_netdevice(dev);
6728
6729 synchronize_net();
6730
6731 /* Shutdown queueing discipline. */
6732 dev_shutdown(dev);
6733
6734 /* Notify protocols, that we are about to destroy
6735 this device. They should clean all the things.
3b27e105
DL
6736
6737 Note that dev->reg_state stays at NETREG_REGISTERED.
6738 This is wanted because this way 8021q and macvlan know
6739 the device is just moving and can keep their slaves up.
ce286d32
EB
6740 */
6741 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6549dd43
G
6742 rcu_barrier();
6743 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7f294054 6744 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
ce286d32
EB
6745
6746 /*
6747 * Flush the unicast and multicast chains
6748 */
a748ee24 6749 dev_uc_flush(dev);
22bedad3 6750 dev_mc_flush(dev);
ce286d32 6751
4e66ae2e
SH
6752 /* Send a netdev-removed uevent to the old namespace */
6753 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6754
ce286d32 6755 /* Actually switch the network namespace */
c346dca1 6756 dev_net_set(dev, net);
ce286d32 6757
ce286d32
EB
6758 /* If there is an ifindex conflict assign a new one */
6759 if (__dev_get_by_index(net, dev->ifindex)) {
6760 int iflink = (dev->iflink == dev->ifindex);
6761 dev->ifindex = dev_new_index(net);
6762 if (iflink)
6763 dev->iflink = dev->ifindex;
6764 }
6765
4e66ae2e
SH
6766 /* Send a netdev-add uevent to the new namespace */
6767 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6768
8b41d188 6769 /* Fixup kobjects */
a1b3f594 6770 err = device_rename(&dev->dev, dev->name);
8b41d188 6771 WARN_ON(err);
ce286d32
EB
6772
6773 /* Add the device back in the hashes */
6774 list_netdevice(dev);
6775
6776 /* Notify protocols, that a new device appeared. */
6777 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6778
d90a909e
EB
6779 /*
6780 * Prevent userspace races by waiting until the network
6781 * device is fully setup before sending notifications.
6782 */
7f294054 6783 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
d90a909e 6784
ce286d32
EB
6785 synchronize_net();
6786 err = 0;
6787out:
6788 return err;
6789}
463d0183 6790EXPORT_SYMBOL_GPL(dev_change_net_namespace);
ce286d32 6791
1da177e4
LT
6792static int dev_cpu_callback(struct notifier_block *nfb,
6793 unsigned long action,
6794 void *ocpu)
6795{
6796 struct sk_buff **list_skb;
1da177e4
LT
6797 struct sk_buff *skb;
6798 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6799 struct softnet_data *sd, *oldsd;
6800
8bb78442 6801 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
1da177e4
LT
6802 return NOTIFY_OK;
6803
6804 local_irq_disable();
6805 cpu = smp_processor_id();
6806 sd = &per_cpu(softnet_data, cpu);
6807 oldsd = &per_cpu(softnet_data, oldcpu);
6808
6809 /* Find end of our completion_queue. */
6810 list_skb = &sd->completion_queue;
6811 while (*list_skb)
6812 list_skb = &(*list_skb)->next;
6813 /* Append completion queue from offline CPU. */
6814 *list_skb = oldsd->completion_queue;
6815 oldsd->completion_queue = NULL;
6816
1da177e4 6817 /* Append output queue from offline CPU. */
a9cbd588
CG
6818 if (oldsd->output_queue) {
6819 *sd->output_queue_tailp = oldsd->output_queue;
6820 sd->output_queue_tailp = oldsd->output_queue_tailp;
6821 oldsd->output_queue = NULL;
6822 oldsd->output_queue_tailp = &oldsd->output_queue;
6823 }
264524d5
HC
6824 /* Append NAPI poll list from offline CPU. */
6825 if (!list_empty(&oldsd->poll_list)) {
6826 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6827 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6828 }
1da177e4
LT
6829
6830 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6831 local_irq_enable();
6832
6833 /* Process offline CPU's input_pkt_queue */
76cc8b13 6834 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
ae78dbfa 6835 netif_rx_internal(skb);
76cc8b13 6836 input_queue_head_incr(oldsd);
fec5e652 6837 }
76cc8b13 6838 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
ae78dbfa 6839 netif_rx_internal(skb);
76cc8b13
TH
6840 input_queue_head_incr(oldsd);
6841 }
1da177e4
LT
6842
6843 return NOTIFY_OK;
6844}
1da177e4
LT
6845
6846
7f353bf2 6847/**
b63365a2
HX
6848 * netdev_increment_features - increment feature set by one
6849 * @all: current feature set
6850 * @one: new feature set
6851 * @mask: mask feature set
7f353bf2
HX
6852 *
6853 * Computes a new feature set after adding a device with feature set
b63365a2
HX
6854 * @one to the master device with current feature set @all. Will not
6855 * enable anything that is off in @mask. Returns the new feature set.
7f353bf2 6856 */
c8f44aff
MM
6857netdev_features_t netdev_increment_features(netdev_features_t all,
6858 netdev_features_t one, netdev_features_t mask)
b63365a2 6859{
1742f183
MM
6860 if (mask & NETIF_F_GEN_CSUM)
6861 mask |= NETIF_F_ALL_CSUM;
6862 mask |= NETIF_F_VLAN_CHALLENGED;
7f353bf2 6863
1742f183
MM
6864 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6865 all &= one | ~NETIF_F_ALL_FOR_ALL;
c6e1a0d1 6866
1742f183
MM
6867 /* If one device supports hw checksumming, set for all. */
6868 if (all & NETIF_F_GEN_CSUM)
6869 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7f353bf2
HX
6870
6871 return all;
6872}
b63365a2 6873EXPORT_SYMBOL(netdev_increment_features);
7f353bf2 6874
430f03cd 6875static struct hlist_head * __net_init netdev_create_hash(void)
30d97d35
PE
6876{
6877 int i;
6878 struct hlist_head *hash;
6879
6880 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6881 if (hash != NULL)
6882 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6883 INIT_HLIST_HEAD(&hash[i]);
6884
6885 return hash;
6886}
6887
881d966b 6888/* Initialize per network namespace state */
4665079c 6889static int __net_init netdev_init(struct net *net)
881d966b 6890{
734b6541
RM
6891 if (net != &init_net)
6892 INIT_LIST_HEAD(&net->dev_base_head);
881d966b 6893
30d97d35
PE
6894 net->dev_name_head = netdev_create_hash();
6895 if (net->dev_name_head == NULL)
6896 goto err_name;
881d966b 6897
30d97d35
PE
6898 net->dev_index_head = netdev_create_hash();
6899 if (net->dev_index_head == NULL)
6900 goto err_idx;
881d966b
EB
6901
6902 return 0;
30d97d35
PE
6903
6904err_idx:
6905 kfree(net->dev_name_head);
6906err_name:
6907 return -ENOMEM;
881d966b
EB
6908}
6909
f0db275a
SH
6910/**
6911 * netdev_drivername - network driver for the device
6912 * @dev: network device
f0db275a
SH
6913 *
6914 * Determine network driver for device.
6915 */
3019de12 6916const char *netdev_drivername(const struct net_device *dev)
6579e57b 6917{
cf04a4c7
SH
6918 const struct device_driver *driver;
6919 const struct device *parent;
3019de12 6920 const char *empty = "";
6579e57b
AV
6921
6922 parent = dev->dev.parent;
6579e57b 6923 if (!parent)
3019de12 6924 return empty;
6579e57b
AV
6925
6926 driver = parent->driver;
6927 if (driver && driver->name)
3019de12
DM
6928 return driver->name;
6929 return empty;
6579e57b
AV
6930}
6931
b004ff49 6932static int __netdev_printk(const char *level, const struct net_device *dev,
256df2f3
JP
6933 struct va_format *vaf)
6934{
6935 int r;
6936
b004ff49 6937 if (dev && dev->dev.parent) {
666f355f
JP
6938 r = dev_printk_emit(level[1] - '0',
6939 dev->dev.parent,
6940 "%s %s %s: %pV",
6941 dev_driver_string(dev->dev.parent),
6942 dev_name(dev->dev.parent),
6943 netdev_name(dev), vaf);
b004ff49 6944 } else if (dev) {
256df2f3 6945 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
b004ff49 6946 } else {
256df2f3 6947 r = printk("%s(NULL net_device): %pV", level, vaf);
b004ff49 6948 }
256df2f3
JP
6949
6950 return r;
6951}
6952
6953int netdev_printk(const char *level, const struct net_device *dev,
6954 const char *format, ...)
6955{
6956 struct va_format vaf;
6957 va_list args;
6958 int r;
6959
6960 va_start(args, format);
6961
6962 vaf.fmt = format;
6963 vaf.va = &args;
6964
6965 r = __netdev_printk(level, dev, &vaf);
b004ff49 6966
256df2f3
JP
6967 va_end(args);
6968
6969 return r;
6970}
6971EXPORT_SYMBOL(netdev_printk);
6972
6973#define define_netdev_printk_level(func, level) \
6974int func(const struct net_device *dev, const char *fmt, ...) \
6975{ \
6976 int r; \
6977 struct va_format vaf; \
6978 va_list args; \
6979 \
6980 va_start(args, fmt); \
6981 \
6982 vaf.fmt = fmt; \
6983 vaf.va = &args; \
6984 \
6985 r = __netdev_printk(level, dev, &vaf); \
b004ff49 6986 \
256df2f3
JP
6987 va_end(args); \
6988 \
6989 return r; \
6990} \
6991EXPORT_SYMBOL(func);
6992
6993define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6994define_netdev_printk_level(netdev_alert, KERN_ALERT);
6995define_netdev_printk_level(netdev_crit, KERN_CRIT);
6996define_netdev_printk_level(netdev_err, KERN_ERR);
6997define_netdev_printk_level(netdev_warn, KERN_WARNING);
6998define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6999define_netdev_printk_level(netdev_info, KERN_INFO);
7000
4665079c 7001static void __net_exit netdev_exit(struct net *net)
881d966b
EB
7002{
7003 kfree(net->dev_name_head);
7004 kfree(net->dev_index_head);
7005}
7006
022cbae6 7007static struct pernet_operations __net_initdata netdev_net_ops = {
881d966b
EB
7008 .init = netdev_init,
7009 .exit = netdev_exit,
7010};
7011
4665079c 7012static void __net_exit default_device_exit(struct net *net)
ce286d32 7013{
e008b5fc 7014 struct net_device *dev, *aux;
ce286d32 7015 /*
e008b5fc 7016 * Push all migratable network devices back to the
ce286d32
EB
7017 * initial network namespace
7018 */
7019 rtnl_lock();
e008b5fc 7020 for_each_netdev_safe(net, dev, aux) {
ce286d32 7021 int err;
aca51397 7022 char fb_name[IFNAMSIZ];
ce286d32
EB
7023
7024 /* Ignore unmoveable devices (i.e. loopback) */
7025 if (dev->features & NETIF_F_NETNS_LOCAL)
7026 continue;
7027
e008b5fc
EB
7028 /* Leave virtual devices for the generic cleanup */
7029 if (dev->rtnl_link_ops)
7030 continue;
d0c082ce 7031
25985edc 7032 /* Push remaining network devices to init_net */
aca51397
PE
7033 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7034 err = dev_change_net_namespace(dev, &init_net, fb_name);
ce286d32 7035 if (err) {
7b6cd1ce
JP
7036 pr_emerg("%s: failed to move %s to init_net: %d\n",
7037 __func__, dev->name, err);
aca51397 7038 BUG();
ce286d32
EB
7039 }
7040 }
7041 rtnl_unlock();
7042}
7043
50624c93
EB
7044static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7045{
7046 /* Return with the rtnl_lock held when there are no network
7047 * devices unregistering in any network namespace in net_list.
7048 */
7049 struct net *net;
7050 bool unregistering;
7051 DEFINE_WAIT(wait);
7052
7053 for (;;) {
7054 prepare_to_wait(&netdev_unregistering_wq, &wait,
7055 TASK_UNINTERRUPTIBLE);
7056 unregistering = false;
7057 rtnl_lock();
7058 list_for_each_entry(net, net_list, exit_list) {
7059 if (net->dev_unreg_count > 0) {
7060 unregistering = true;
7061 break;
7062 }
7063 }
7064 if (!unregistering)
7065 break;
7066 __rtnl_unlock();
7067 schedule();
7068 }
7069 finish_wait(&netdev_unregistering_wq, &wait);
7070}
7071
04dc7f6b
EB
7072static void __net_exit default_device_exit_batch(struct list_head *net_list)
7073{
7074 /* At exit all network devices most be removed from a network
b595076a 7075 * namespace. Do this in the reverse order of registration.
04dc7f6b
EB
7076 * Do this across as many network namespaces as possible to
7077 * improve batching efficiency.
7078 */
7079 struct net_device *dev;
7080 struct net *net;
7081 LIST_HEAD(dev_kill_list);
7082
50624c93
EB
7083 /* To prevent network device cleanup code from dereferencing
7084 * loopback devices or network devices that have been freed
7085 * wait here for all pending unregistrations to complete,
7086 * before unregistring the loopback device and allowing the
7087 * network namespace be freed.
7088 *
7089 * The netdev todo list containing all network devices
7090 * unregistrations that happen in default_device_exit_batch
7091 * will run in the rtnl_unlock() at the end of
7092 * default_device_exit_batch.
7093 */
7094 rtnl_lock_unregistering(net_list);
04dc7f6b
EB
7095 list_for_each_entry(net, net_list, exit_list) {
7096 for_each_netdev_reverse(net, dev) {
7097 if (dev->rtnl_link_ops)
7098 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7099 else
7100 unregister_netdevice_queue(dev, &dev_kill_list);
7101 }
7102 }
7103 unregister_netdevice_many(&dev_kill_list);
7104 rtnl_unlock();
7105}
7106
022cbae6 7107static struct pernet_operations __net_initdata default_device_ops = {
ce286d32 7108 .exit = default_device_exit,
04dc7f6b 7109 .exit_batch = default_device_exit_batch,
ce286d32
EB
7110};
7111
1da177e4
LT
7112/*
7113 * Initialize the DEV module. At boot time this walks the device list and
7114 * unhooks any devices that fail to initialise (normally hardware not
7115 * present) and leaves us with a valid list of present and active devices.
7116 *
7117 */
7118
7119/*
7120 * This is called single threaded during boot, so no need
7121 * to take the rtnl semaphore.
7122 */
7123static int __init net_dev_init(void)
7124{
7125 int i, rc = -ENOMEM;
7126
7127 BUG_ON(!dev_boot_phase);
7128
1da177e4
LT
7129 if (dev_proc_init())
7130 goto out;
7131
8b41d188 7132 if (netdev_kobject_init())
1da177e4
LT
7133 goto out;
7134
7135 INIT_LIST_HEAD(&ptype_all);
82d8a867 7136 for (i = 0; i < PTYPE_HASH_SIZE; i++)
1da177e4
LT
7137 INIT_LIST_HEAD(&ptype_base[i]);
7138
62532da9
VY
7139 INIT_LIST_HEAD(&offload_base);
7140
881d966b
EB
7141 if (register_pernet_subsys(&netdev_net_ops))
7142 goto out;
1da177e4
LT
7143
7144 /*
7145 * Initialise the packet receive queues.
7146 */
7147
6f912042 7148 for_each_possible_cpu(i) {
e36fa2f7 7149 struct softnet_data *sd = &per_cpu(softnet_data, i);
1da177e4 7150
e36fa2f7 7151 skb_queue_head_init(&sd->input_pkt_queue);
6e7676c1 7152 skb_queue_head_init(&sd->process_queue);
e36fa2f7 7153 INIT_LIST_HEAD(&sd->poll_list);
a9cbd588 7154 sd->output_queue_tailp = &sd->output_queue;
df334545 7155#ifdef CONFIG_RPS
e36fa2f7
ED
7156 sd->csd.func = rps_trigger_softirq;
7157 sd->csd.info = sd;
e36fa2f7 7158 sd->cpu = i;
1e94d72f 7159#endif
0a9627f2 7160
e36fa2f7
ED
7161 sd->backlog.poll = process_backlog;
7162 sd->backlog.weight = weight_p;
1da177e4
LT
7163 }
7164
1da177e4
LT
7165 dev_boot_phase = 0;
7166
505d4f73
EB
7167 /* The loopback device is special if any other network devices
7168 * is present in a network namespace the loopback device must
7169 * be present. Since we now dynamically allocate and free the
7170 * loopback device ensure this invariant is maintained by
7171 * keeping the loopback device as the first device on the
7172 * list of network devices. Ensuring the loopback devices
7173 * is the first device that appears and the last network device
7174 * that disappears.
7175 */
7176 if (register_pernet_device(&loopback_net_ops))
7177 goto out;
7178
7179 if (register_pernet_device(&default_device_ops))
7180 goto out;
7181
962cf36c
CM
7182 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7183 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
1da177e4
LT
7184
7185 hotcpu_notifier(dev_cpu_callback, 0);
7186 dst_init();
1da177e4
LT
7187 rc = 0;
7188out:
7189 return rc;
7190}
7191
7192subsys_initcall(net_dev_init);
This page took 1.722715 seconds and 5 git commands to generate.