nfsd: typo in nfsd_rename comment
[deliverable/linux.git] / net / core / dev.c
1 /*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
86 #include <linux/mm.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
101 #include <net/dst.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
120 #include <net/ip.h>
121 #include <linux/ipv6.h>
122 #include <linux/in.h>
123 #include <linux/jhash.h>
124 #include <linux/random.h>
125 #include <trace/events/napi.h>
126 #include <trace/events/net.h>
127 #include <trace/events/skb.h>
128 #include <linux/pci.h>
129 #include <linux/inetdevice.h>
130 #include <linux/cpu_rmap.h>
131 #include <linux/static_key.h>
132 #include <linux/hashtable.h>
133 #include <linux/vmalloc.h>
134 #include <linux/if_macvlan.h>
135
136 #include "net-sysfs.h"
137
138 /* Instead of increasing this, you should create a hash table. */
139 #define MAX_GRO_SKBS 8
140
141 /* This should be increased if a protocol with a bigger head is added. */
142 #define GRO_MAX_HEAD (MAX_HEADER + 128)
143
144 static DEFINE_SPINLOCK(ptype_lock);
145 static DEFINE_SPINLOCK(offload_lock);
146 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
147 struct list_head ptype_all __read_mostly; /* Taps */
148 static struct list_head offload_base __read_mostly;
149
150 static int netif_rx_internal(struct sk_buff *skb);
151
152 /*
153 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
154 * semaphore.
155 *
156 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
157 *
158 * Writers must hold the rtnl semaphore while they loop through the
159 * dev_base_head list, and hold dev_base_lock for writing when they do the
160 * actual updates. This allows pure readers to access the list even
161 * while a writer is preparing to update it.
162 *
163 * To put it another way, dev_base_lock is held for writing only to
164 * protect against pure readers; the rtnl semaphore provides the
165 * protection against other writers.
166 *
167 * See, for example usages, register_netdevice() and
168 * unregister_netdevice(), which must be called with the rtnl
169 * semaphore held.
170 */
171 DEFINE_RWLOCK(dev_base_lock);
172 EXPORT_SYMBOL(dev_base_lock);
173
174 /* protects napi_hash addition/deletion and napi_gen_id */
175 static DEFINE_SPINLOCK(napi_hash_lock);
176
177 static unsigned int napi_gen_id;
178 static DEFINE_HASHTABLE(napi_hash, 8);
179
180 static seqcount_t devnet_rename_seq;
181
182 static inline void dev_base_seq_inc(struct net *net)
183 {
184 while (++net->dev_base_seq == 0);
185 }
186
187 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
188 {
189 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
190
191 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
192 }
193
194 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
195 {
196 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
197 }
198
199 static inline void rps_lock(struct softnet_data *sd)
200 {
201 #ifdef CONFIG_RPS
202 spin_lock(&sd->input_pkt_queue.lock);
203 #endif
204 }
205
206 static inline void rps_unlock(struct softnet_data *sd)
207 {
208 #ifdef CONFIG_RPS
209 spin_unlock(&sd->input_pkt_queue.lock);
210 #endif
211 }
212
213 /* Device list insertion */
214 static void list_netdevice(struct net_device *dev)
215 {
216 struct net *net = dev_net(dev);
217
218 ASSERT_RTNL();
219
220 write_lock_bh(&dev_base_lock);
221 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
222 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
223 hlist_add_head_rcu(&dev->index_hlist,
224 dev_index_hash(net, dev->ifindex));
225 write_unlock_bh(&dev_base_lock);
226
227 dev_base_seq_inc(net);
228 }
229
230 /* Device list removal
231 * caller must respect a RCU grace period before freeing/reusing dev
232 */
233 static void unlist_netdevice(struct net_device *dev)
234 {
235 ASSERT_RTNL();
236
237 /* Unlink dev from the device chain */
238 write_lock_bh(&dev_base_lock);
239 list_del_rcu(&dev->dev_list);
240 hlist_del_rcu(&dev->name_hlist);
241 hlist_del_rcu(&dev->index_hlist);
242 write_unlock_bh(&dev_base_lock);
243
244 dev_base_seq_inc(dev_net(dev));
245 }
246
247 /*
248 * Our notifier list
249 */
250
251 static RAW_NOTIFIER_HEAD(netdev_chain);
252
253 /*
254 * Device drivers call our routines to queue packets here. We empty the
255 * queue in the local softnet handler.
256 */
257
258 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
259 EXPORT_PER_CPU_SYMBOL(softnet_data);
260
261 #ifdef CONFIG_LOCKDEP
262 /*
263 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
264 * according to dev->type
265 */
266 static const unsigned short netdev_lock_type[] =
267 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
268 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
269 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
270 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
271 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
272 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
273 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
274 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
275 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
276 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
277 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
278 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
279 ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
280 ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
281 ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
282
283 static const char *const netdev_lock_name[] =
284 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
285 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
286 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
287 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
288 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
289 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
290 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
291 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
292 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
293 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
294 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
295 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
296 "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
297 "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
298 "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
299
300 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
301 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
302
303 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
304 {
305 int i;
306
307 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
308 if (netdev_lock_type[i] == dev_type)
309 return i;
310 /* the last key is used by default */
311 return ARRAY_SIZE(netdev_lock_type) - 1;
312 }
313
314 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
315 unsigned short dev_type)
316 {
317 int i;
318
319 i = netdev_lock_pos(dev_type);
320 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
321 netdev_lock_name[i]);
322 }
323
324 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
325 {
326 int i;
327
328 i = netdev_lock_pos(dev->type);
329 lockdep_set_class_and_name(&dev->addr_list_lock,
330 &netdev_addr_lock_key[i],
331 netdev_lock_name[i]);
332 }
333 #else
334 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
335 unsigned short dev_type)
336 {
337 }
338 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
339 {
340 }
341 #endif
342
343 /*******************************************************************************
344
345 Protocol management and registration routines
346
347 *******************************************************************************/
348
349 /*
350 * Add a protocol ID to the list. Now that the input handler is
351 * smarter we can dispense with all the messy stuff that used to be
352 * here.
353 *
354 * BEWARE!!! Protocol handlers, mangling input packets,
355 * MUST BE last in hash buckets and checking protocol handlers
356 * MUST start from promiscuous ptype_all chain in net_bh.
357 * It is true now, do not change it.
358 * Explanation follows: if protocol handler, mangling packet, will
359 * be the first on list, it is not able to sense, that packet
360 * is cloned and should be copied-on-write, so that it will
361 * change it and subsequent readers will get broken packet.
362 * --ANK (980803)
363 */
364
365 static inline struct list_head *ptype_head(const struct packet_type *pt)
366 {
367 if (pt->type == htons(ETH_P_ALL))
368 return &ptype_all;
369 else
370 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
371 }
372
373 /**
374 * dev_add_pack - add packet handler
375 * @pt: packet type declaration
376 *
377 * Add a protocol handler to the networking stack. The passed &packet_type
378 * is linked into kernel lists and may not be freed until it has been
379 * removed from the kernel lists.
380 *
381 * This call does not sleep therefore it can not
382 * guarantee all CPU's that are in middle of receiving packets
383 * will see the new packet type (until the next received packet).
384 */
385
386 void dev_add_pack(struct packet_type *pt)
387 {
388 struct list_head *head = ptype_head(pt);
389
390 spin_lock(&ptype_lock);
391 list_add_rcu(&pt->list, head);
392 spin_unlock(&ptype_lock);
393 }
394 EXPORT_SYMBOL(dev_add_pack);
395
396 /**
397 * __dev_remove_pack - remove packet handler
398 * @pt: packet type declaration
399 *
400 * Remove a protocol handler that was previously added to the kernel
401 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
402 * from the kernel lists and can be freed or reused once this function
403 * returns.
404 *
405 * The packet type might still be in use by receivers
406 * and must not be freed until after all the CPU's have gone
407 * through a quiescent state.
408 */
409 void __dev_remove_pack(struct packet_type *pt)
410 {
411 struct list_head *head = ptype_head(pt);
412 struct packet_type *pt1;
413
414 spin_lock(&ptype_lock);
415
416 list_for_each_entry(pt1, head, list) {
417 if (pt == pt1) {
418 list_del_rcu(&pt->list);
419 goto out;
420 }
421 }
422
423 pr_warn("dev_remove_pack: %p not found\n", pt);
424 out:
425 spin_unlock(&ptype_lock);
426 }
427 EXPORT_SYMBOL(__dev_remove_pack);
428
429 /**
430 * dev_remove_pack - remove packet handler
431 * @pt: packet type declaration
432 *
433 * Remove a protocol handler that was previously added to the kernel
434 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
435 * from the kernel lists and can be freed or reused once this function
436 * returns.
437 *
438 * This call sleeps to guarantee that no CPU is looking at the packet
439 * type after return.
440 */
441 void dev_remove_pack(struct packet_type *pt)
442 {
443 __dev_remove_pack(pt);
444
445 synchronize_net();
446 }
447 EXPORT_SYMBOL(dev_remove_pack);
448
449
450 /**
451 * dev_add_offload - register offload handlers
452 * @po: protocol offload declaration
453 *
454 * Add protocol offload handlers to the networking stack. The passed
455 * &proto_offload is linked into kernel lists and may not be freed until
456 * it has been removed from the kernel lists.
457 *
458 * This call does not sleep therefore it can not
459 * guarantee all CPU's that are in middle of receiving packets
460 * will see the new offload handlers (until the next received packet).
461 */
462 void dev_add_offload(struct packet_offload *po)
463 {
464 struct list_head *head = &offload_base;
465
466 spin_lock(&offload_lock);
467 list_add_rcu(&po->list, head);
468 spin_unlock(&offload_lock);
469 }
470 EXPORT_SYMBOL(dev_add_offload);
471
472 /**
473 * __dev_remove_offload - remove offload handler
474 * @po: packet offload declaration
475 *
476 * Remove a protocol offload handler that was previously added to the
477 * kernel offload handlers by dev_add_offload(). The passed &offload_type
478 * is removed from the kernel lists and can be freed or reused once this
479 * function returns.
480 *
481 * The packet type might still be in use by receivers
482 * and must not be freed until after all the CPU's have gone
483 * through a quiescent state.
484 */
485 static void __dev_remove_offload(struct packet_offload *po)
486 {
487 struct list_head *head = &offload_base;
488 struct packet_offload *po1;
489
490 spin_lock(&offload_lock);
491
492 list_for_each_entry(po1, head, list) {
493 if (po == po1) {
494 list_del_rcu(&po->list);
495 goto out;
496 }
497 }
498
499 pr_warn("dev_remove_offload: %p not found\n", po);
500 out:
501 spin_unlock(&offload_lock);
502 }
503
504 /**
505 * dev_remove_offload - remove packet offload handler
506 * @po: packet offload declaration
507 *
508 * Remove a packet offload handler that was previously added to the kernel
509 * offload handlers by dev_add_offload(). The passed &offload_type is
510 * removed from the kernel lists and can be freed or reused once this
511 * function returns.
512 *
513 * This call sleeps to guarantee that no CPU is looking at the packet
514 * type after return.
515 */
516 void dev_remove_offload(struct packet_offload *po)
517 {
518 __dev_remove_offload(po);
519
520 synchronize_net();
521 }
522 EXPORT_SYMBOL(dev_remove_offload);
523
524 /******************************************************************************
525
526 Device Boot-time Settings Routines
527
528 *******************************************************************************/
529
530 /* Boot time configuration table */
531 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
532
533 /**
534 * netdev_boot_setup_add - add new setup entry
535 * @name: name of the device
536 * @map: configured settings for the device
537 *
538 * Adds new setup entry to the dev_boot_setup list. The function
539 * returns 0 on error and 1 on success. This is a generic routine to
540 * all netdevices.
541 */
542 static int netdev_boot_setup_add(char *name, struct ifmap *map)
543 {
544 struct netdev_boot_setup *s;
545 int i;
546
547 s = dev_boot_setup;
548 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
549 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
550 memset(s[i].name, 0, sizeof(s[i].name));
551 strlcpy(s[i].name, name, IFNAMSIZ);
552 memcpy(&s[i].map, map, sizeof(s[i].map));
553 break;
554 }
555 }
556
557 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
558 }
559
560 /**
561 * netdev_boot_setup_check - check boot time settings
562 * @dev: the netdevice
563 *
564 * Check boot time settings for the device.
565 * The found settings are set for the device to be used
566 * later in the device probing.
567 * Returns 0 if no settings found, 1 if they are.
568 */
569 int netdev_boot_setup_check(struct net_device *dev)
570 {
571 struct netdev_boot_setup *s = dev_boot_setup;
572 int i;
573
574 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
575 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
576 !strcmp(dev->name, s[i].name)) {
577 dev->irq = s[i].map.irq;
578 dev->base_addr = s[i].map.base_addr;
579 dev->mem_start = s[i].map.mem_start;
580 dev->mem_end = s[i].map.mem_end;
581 return 1;
582 }
583 }
584 return 0;
585 }
586 EXPORT_SYMBOL(netdev_boot_setup_check);
587
588
589 /**
590 * netdev_boot_base - get address from boot time settings
591 * @prefix: prefix for network device
592 * @unit: id for network device
593 *
594 * Check boot time settings for the base address of device.
595 * The found settings are set for the device to be used
596 * later in the device probing.
597 * Returns 0 if no settings found.
598 */
599 unsigned long netdev_boot_base(const char *prefix, int unit)
600 {
601 const struct netdev_boot_setup *s = dev_boot_setup;
602 char name[IFNAMSIZ];
603 int i;
604
605 sprintf(name, "%s%d", prefix, unit);
606
607 /*
608 * If device already registered then return base of 1
609 * to indicate not to probe for this interface
610 */
611 if (__dev_get_by_name(&init_net, name))
612 return 1;
613
614 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
615 if (!strcmp(name, s[i].name))
616 return s[i].map.base_addr;
617 return 0;
618 }
619
620 /*
621 * Saves at boot time configured settings for any netdevice.
622 */
623 int __init netdev_boot_setup(char *str)
624 {
625 int ints[5];
626 struct ifmap map;
627
628 str = get_options(str, ARRAY_SIZE(ints), ints);
629 if (!str || !*str)
630 return 0;
631
632 /* Save settings */
633 memset(&map, 0, sizeof(map));
634 if (ints[0] > 0)
635 map.irq = ints[1];
636 if (ints[0] > 1)
637 map.base_addr = ints[2];
638 if (ints[0] > 2)
639 map.mem_start = ints[3];
640 if (ints[0] > 3)
641 map.mem_end = ints[4];
642
643 /* Add new entry to the list */
644 return netdev_boot_setup_add(str, &map);
645 }
646
647 __setup("netdev=", netdev_boot_setup);
648
649 /*******************************************************************************
650
651 Device Interface Subroutines
652
653 *******************************************************************************/
654
655 /**
656 * __dev_get_by_name - find a device by its name
657 * @net: the applicable net namespace
658 * @name: name to find
659 *
660 * Find an interface by name. Must be called under RTNL semaphore
661 * or @dev_base_lock. If the name is found a pointer to the device
662 * is returned. If the name is not found then %NULL is returned. The
663 * reference counters are not incremented so the caller must be
664 * careful with locks.
665 */
666
667 struct net_device *__dev_get_by_name(struct net *net, const char *name)
668 {
669 struct net_device *dev;
670 struct hlist_head *head = dev_name_hash(net, name);
671
672 hlist_for_each_entry(dev, head, name_hlist)
673 if (!strncmp(dev->name, name, IFNAMSIZ))
674 return dev;
675
676 return NULL;
677 }
678 EXPORT_SYMBOL(__dev_get_by_name);
679
680 /**
681 * dev_get_by_name_rcu - find a device by its name
682 * @net: the applicable net namespace
683 * @name: name to find
684 *
685 * Find an interface by name.
686 * If the name is found a pointer to the device is returned.
687 * If the name is not found then %NULL is returned.
688 * The reference counters are not incremented so the caller must be
689 * careful with locks. The caller must hold RCU lock.
690 */
691
692 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
693 {
694 struct net_device *dev;
695 struct hlist_head *head = dev_name_hash(net, name);
696
697 hlist_for_each_entry_rcu(dev, head, name_hlist)
698 if (!strncmp(dev->name, name, IFNAMSIZ))
699 return dev;
700
701 return NULL;
702 }
703 EXPORT_SYMBOL(dev_get_by_name_rcu);
704
705 /**
706 * dev_get_by_name - find a device by its name
707 * @net: the applicable net namespace
708 * @name: name to find
709 *
710 * Find an interface by name. This can be called from any
711 * context and does its own locking. The returned handle has
712 * the usage count incremented and the caller must use dev_put() to
713 * release it when it is no longer needed. %NULL is returned if no
714 * matching device is found.
715 */
716
717 struct net_device *dev_get_by_name(struct net *net, const char *name)
718 {
719 struct net_device *dev;
720
721 rcu_read_lock();
722 dev = dev_get_by_name_rcu(net, name);
723 if (dev)
724 dev_hold(dev);
725 rcu_read_unlock();
726 return dev;
727 }
728 EXPORT_SYMBOL(dev_get_by_name);
729
730 /**
731 * __dev_get_by_index - find a device by its ifindex
732 * @net: the applicable net namespace
733 * @ifindex: index of device
734 *
735 * Search for an interface by index. Returns %NULL if the device
736 * is not found or a pointer to the device. The device has not
737 * had its reference counter increased so the caller must be careful
738 * about locking. The caller must hold either the RTNL semaphore
739 * or @dev_base_lock.
740 */
741
742 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
743 {
744 struct net_device *dev;
745 struct hlist_head *head = dev_index_hash(net, ifindex);
746
747 hlist_for_each_entry(dev, head, index_hlist)
748 if (dev->ifindex == ifindex)
749 return dev;
750
751 return NULL;
752 }
753 EXPORT_SYMBOL(__dev_get_by_index);
754
755 /**
756 * dev_get_by_index_rcu - find a device by its ifindex
757 * @net: the applicable net namespace
758 * @ifindex: index of device
759 *
760 * Search for an interface by index. Returns %NULL if the device
761 * is not found or a pointer to the device. The device has not
762 * had its reference counter increased so the caller must be careful
763 * about locking. The caller must hold RCU lock.
764 */
765
766 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
767 {
768 struct net_device *dev;
769 struct hlist_head *head = dev_index_hash(net, ifindex);
770
771 hlist_for_each_entry_rcu(dev, head, index_hlist)
772 if (dev->ifindex == ifindex)
773 return dev;
774
775 return NULL;
776 }
777 EXPORT_SYMBOL(dev_get_by_index_rcu);
778
779
780 /**
781 * dev_get_by_index - find a device by its ifindex
782 * @net: the applicable net namespace
783 * @ifindex: index of device
784 *
785 * Search for an interface by index. Returns NULL if the device
786 * is not found or a pointer to the device. The device returned has
787 * had a reference added and the pointer is safe until the user calls
788 * dev_put to indicate they have finished with it.
789 */
790
791 struct net_device *dev_get_by_index(struct net *net, int ifindex)
792 {
793 struct net_device *dev;
794
795 rcu_read_lock();
796 dev = dev_get_by_index_rcu(net, ifindex);
797 if (dev)
798 dev_hold(dev);
799 rcu_read_unlock();
800 return dev;
801 }
802 EXPORT_SYMBOL(dev_get_by_index);
803
804 /**
805 * netdev_get_name - get a netdevice name, knowing its ifindex.
806 * @net: network namespace
807 * @name: a pointer to the buffer where the name will be stored.
808 * @ifindex: the ifindex of the interface to get the name from.
809 *
810 * The use of raw_seqcount_begin() and cond_resched() before
811 * retrying is required as we want to give the writers a chance
812 * to complete when CONFIG_PREEMPT is not set.
813 */
814 int netdev_get_name(struct net *net, char *name, int ifindex)
815 {
816 struct net_device *dev;
817 unsigned int seq;
818
819 retry:
820 seq = raw_seqcount_begin(&devnet_rename_seq);
821 rcu_read_lock();
822 dev = dev_get_by_index_rcu(net, ifindex);
823 if (!dev) {
824 rcu_read_unlock();
825 return -ENODEV;
826 }
827
828 strcpy(name, dev->name);
829 rcu_read_unlock();
830 if (read_seqcount_retry(&devnet_rename_seq, seq)) {
831 cond_resched();
832 goto retry;
833 }
834
835 return 0;
836 }
837
838 /**
839 * dev_getbyhwaddr_rcu - find a device by its hardware address
840 * @net: the applicable net namespace
841 * @type: media type of device
842 * @ha: hardware address
843 *
844 * Search for an interface by MAC address. Returns NULL if the device
845 * is not found or a pointer to the device.
846 * The caller must hold RCU or RTNL.
847 * The returned device has not had its ref count increased
848 * and the caller must therefore be careful about locking
849 *
850 */
851
852 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
853 const char *ha)
854 {
855 struct net_device *dev;
856
857 for_each_netdev_rcu(net, dev)
858 if (dev->type == type &&
859 !memcmp(dev->dev_addr, ha, dev->addr_len))
860 return dev;
861
862 return NULL;
863 }
864 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
865
866 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
867 {
868 struct net_device *dev;
869
870 ASSERT_RTNL();
871 for_each_netdev(net, dev)
872 if (dev->type == type)
873 return dev;
874
875 return NULL;
876 }
877 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
878
879 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
880 {
881 struct net_device *dev, *ret = NULL;
882
883 rcu_read_lock();
884 for_each_netdev_rcu(net, dev)
885 if (dev->type == type) {
886 dev_hold(dev);
887 ret = dev;
888 break;
889 }
890 rcu_read_unlock();
891 return ret;
892 }
893 EXPORT_SYMBOL(dev_getfirstbyhwtype);
894
895 /**
896 * dev_get_by_flags_rcu - find any device with given flags
897 * @net: the applicable net namespace
898 * @if_flags: IFF_* values
899 * @mask: bitmask of bits in if_flags to check
900 *
901 * Search for any interface with the given flags. Returns NULL if a device
902 * is not found or a pointer to the device. Must be called inside
903 * rcu_read_lock(), and result refcount is unchanged.
904 */
905
906 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
907 unsigned short mask)
908 {
909 struct net_device *dev, *ret;
910
911 ret = NULL;
912 for_each_netdev_rcu(net, dev) {
913 if (((dev->flags ^ if_flags) & mask) == 0) {
914 ret = dev;
915 break;
916 }
917 }
918 return ret;
919 }
920 EXPORT_SYMBOL(dev_get_by_flags_rcu);
921
922 /**
923 * dev_valid_name - check if name is okay for network device
924 * @name: name string
925 *
926 * Network device names need to be valid file names to
927 * to allow sysfs to work. We also disallow any kind of
928 * whitespace.
929 */
930 bool dev_valid_name(const char *name)
931 {
932 if (*name == '\0')
933 return false;
934 if (strlen(name) >= IFNAMSIZ)
935 return false;
936 if (!strcmp(name, ".") || !strcmp(name, ".."))
937 return false;
938
939 while (*name) {
940 if (*name == '/' || isspace(*name))
941 return false;
942 name++;
943 }
944 return true;
945 }
946 EXPORT_SYMBOL(dev_valid_name);
947
948 /**
949 * __dev_alloc_name - allocate a name for a device
950 * @net: network namespace to allocate the device name in
951 * @name: name format string
952 * @buf: scratch buffer and result name string
953 *
954 * Passed a format string - eg "lt%d" it will try and find a suitable
955 * id. It scans list of devices to build up a free map, then chooses
956 * the first empty slot. The caller must hold the dev_base or rtnl lock
957 * while allocating the name and adding the device in order to avoid
958 * duplicates.
959 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
960 * Returns the number of the unit assigned or a negative errno code.
961 */
962
963 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
964 {
965 int i = 0;
966 const char *p;
967 const int max_netdevices = 8*PAGE_SIZE;
968 unsigned long *inuse;
969 struct net_device *d;
970
971 p = strnchr(name, IFNAMSIZ-1, '%');
972 if (p) {
973 /*
974 * Verify the string as this thing may have come from
975 * the user. There must be either one "%d" and no other "%"
976 * characters.
977 */
978 if (p[1] != 'd' || strchr(p + 2, '%'))
979 return -EINVAL;
980
981 /* Use one page as a bit array of possible slots */
982 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
983 if (!inuse)
984 return -ENOMEM;
985
986 for_each_netdev(net, d) {
987 if (!sscanf(d->name, name, &i))
988 continue;
989 if (i < 0 || i >= max_netdevices)
990 continue;
991
992 /* avoid cases where sscanf is not exact inverse of printf */
993 snprintf(buf, IFNAMSIZ, name, i);
994 if (!strncmp(buf, d->name, IFNAMSIZ))
995 set_bit(i, inuse);
996 }
997
998 i = find_first_zero_bit(inuse, max_netdevices);
999 free_page((unsigned long) inuse);
1000 }
1001
1002 if (buf != name)
1003 snprintf(buf, IFNAMSIZ, name, i);
1004 if (!__dev_get_by_name(net, buf))
1005 return i;
1006
1007 /* It is possible to run out of possible slots
1008 * when the name is long and there isn't enough space left
1009 * for the digits, or if all bits are used.
1010 */
1011 return -ENFILE;
1012 }
1013
1014 /**
1015 * dev_alloc_name - allocate a name for a device
1016 * @dev: device
1017 * @name: name format string
1018 *
1019 * Passed a format string - eg "lt%d" it will try and find a suitable
1020 * id. It scans list of devices to build up a free map, then chooses
1021 * the first empty slot. The caller must hold the dev_base or rtnl lock
1022 * while allocating the name and adding the device in order to avoid
1023 * duplicates.
1024 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1025 * Returns the number of the unit assigned or a negative errno code.
1026 */
1027
1028 int dev_alloc_name(struct net_device *dev, const char *name)
1029 {
1030 char buf[IFNAMSIZ];
1031 struct net *net;
1032 int ret;
1033
1034 BUG_ON(!dev_net(dev));
1035 net = dev_net(dev);
1036 ret = __dev_alloc_name(net, name, buf);
1037 if (ret >= 0)
1038 strlcpy(dev->name, buf, IFNAMSIZ);
1039 return ret;
1040 }
1041 EXPORT_SYMBOL(dev_alloc_name);
1042
1043 static int dev_alloc_name_ns(struct net *net,
1044 struct net_device *dev,
1045 const char *name)
1046 {
1047 char buf[IFNAMSIZ];
1048 int ret;
1049
1050 ret = __dev_alloc_name(net, name, buf);
1051 if (ret >= 0)
1052 strlcpy(dev->name, buf, IFNAMSIZ);
1053 return ret;
1054 }
1055
1056 static int dev_get_valid_name(struct net *net,
1057 struct net_device *dev,
1058 const char *name)
1059 {
1060 BUG_ON(!net);
1061
1062 if (!dev_valid_name(name))
1063 return -EINVAL;
1064
1065 if (strchr(name, '%'))
1066 return dev_alloc_name_ns(net, dev, name);
1067 else if (__dev_get_by_name(net, name))
1068 return -EEXIST;
1069 else if (dev->name != name)
1070 strlcpy(dev->name, name, IFNAMSIZ);
1071
1072 return 0;
1073 }
1074
1075 /**
1076 * dev_change_name - change name of a device
1077 * @dev: device
1078 * @newname: name (or format string) must be at least IFNAMSIZ
1079 *
1080 * Change name of a device, can pass format strings "eth%d".
1081 * for wildcarding.
1082 */
1083 int dev_change_name(struct net_device *dev, const char *newname)
1084 {
1085 char oldname[IFNAMSIZ];
1086 int err = 0;
1087 int ret;
1088 struct net *net;
1089
1090 ASSERT_RTNL();
1091 BUG_ON(!dev_net(dev));
1092
1093 net = dev_net(dev);
1094 if (dev->flags & IFF_UP)
1095 return -EBUSY;
1096
1097 write_seqcount_begin(&devnet_rename_seq);
1098
1099 if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1100 write_seqcount_end(&devnet_rename_seq);
1101 return 0;
1102 }
1103
1104 memcpy(oldname, dev->name, IFNAMSIZ);
1105
1106 err = dev_get_valid_name(net, dev, newname);
1107 if (err < 0) {
1108 write_seqcount_end(&devnet_rename_seq);
1109 return err;
1110 }
1111
1112 rollback:
1113 ret = device_rename(&dev->dev, dev->name);
1114 if (ret) {
1115 memcpy(dev->name, oldname, IFNAMSIZ);
1116 write_seqcount_end(&devnet_rename_seq);
1117 return ret;
1118 }
1119
1120 write_seqcount_end(&devnet_rename_seq);
1121
1122 netdev_adjacent_rename_links(dev, oldname);
1123
1124 write_lock_bh(&dev_base_lock);
1125 hlist_del_rcu(&dev->name_hlist);
1126 write_unlock_bh(&dev_base_lock);
1127
1128 synchronize_rcu();
1129
1130 write_lock_bh(&dev_base_lock);
1131 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1132 write_unlock_bh(&dev_base_lock);
1133
1134 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1135 ret = notifier_to_errno(ret);
1136
1137 if (ret) {
1138 /* err >= 0 after dev_alloc_name() or stores the first errno */
1139 if (err >= 0) {
1140 err = ret;
1141 write_seqcount_begin(&devnet_rename_seq);
1142 memcpy(dev->name, oldname, IFNAMSIZ);
1143 memcpy(oldname, newname, IFNAMSIZ);
1144 goto rollback;
1145 } else {
1146 pr_err("%s: name change rollback failed: %d\n",
1147 dev->name, ret);
1148 }
1149 }
1150
1151 return err;
1152 }
1153
1154 /**
1155 * dev_set_alias - change ifalias of a device
1156 * @dev: device
1157 * @alias: name up to IFALIASZ
1158 * @len: limit of bytes to copy from info
1159 *
1160 * Set ifalias for a device,
1161 */
1162 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1163 {
1164 char *new_ifalias;
1165
1166 ASSERT_RTNL();
1167
1168 if (len >= IFALIASZ)
1169 return -EINVAL;
1170
1171 if (!len) {
1172 kfree(dev->ifalias);
1173 dev->ifalias = NULL;
1174 return 0;
1175 }
1176
1177 new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1178 if (!new_ifalias)
1179 return -ENOMEM;
1180 dev->ifalias = new_ifalias;
1181
1182 strlcpy(dev->ifalias, alias, len+1);
1183 return len;
1184 }
1185
1186
1187 /**
1188 * netdev_features_change - device changes features
1189 * @dev: device to cause notification
1190 *
1191 * Called to indicate a device has changed features.
1192 */
1193 void netdev_features_change(struct net_device *dev)
1194 {
1195 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1196 }
1197 EXPORT_SYMBOL(netdev_features_change);
1198
1199 /**
1200 * netdev_state_change - device changes state
1201 * @dev: device to cause notification
1202 *
1203 * Called to indicate a device has changed state. This function calls
1204 * the notifier chains for netdev_chain and sends a NEWLINK message
1205 * to the routing socket.
1206 */
1207 void netdev_state_change(struct net_device *dev)
1208 {
1209 if (dev->flags & IFF_UP) {
1210 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1211 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1212 }
1213 }
1214 EXPORT_SYMBOL(netdev_state_change);
1215
1216 /**
1217 * netdev_notify_peers - notify network peers about existence of @dev
1218 * @dev: network device
1219 *
1220 * Generate traffic such that interested network peers are aware of
1221 * @dev, such as by generating a gratuitous ARP. This may be used when
1222 * a device wants to inform the rest of the network about some sort of
1223 * reconfiguration such as a failover event or virtual machine
1224 * migration.
1225 */
1226 void netdev_notify_peers(struct net_device *dev)
1227 {
1228 rtnl_lock();
1229 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1230 rtnl_unlock();
1231 }
1232 EXPORT_SYMBOL(netdev_notify_peers);
1233
1234 static int __dev_open(struct net_device *dev)
1235 {
1236 const struct net_device_ops *ops = dev->netdev_ops;
1237 int ret;
1238
1239 ASSERT_RTNL();
1240
1241 if (!netif_device_present(dev))
1242 return -ENODEV;
1243
1244 /* Block netpoll from trying to do any rx path servicing.
1245 * If we don't do this there is a chance ndo_poll_controller
1246 * or ndo_poll may be running while we open the device
1247 */
1248 netpoll_rx_disable(dev);
1249
1250 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1251 ret = notifier_to_errno(ret);
1252 if (ret)
1253 return ret;
1254
1255 set_bit(__LINK_STATE_START, &dev->state);
1256
1257 if (ops->ndo_validate_addr)
1258 ret = ops->ndo_validate_addr(dev);
1259
1260 if (!ret && ops->ndo_open)
1261 ret = ops->ndo_open(dev);
1262
1263 netpoll_rx_enable(dev);
1264
1265 if (ret)
1266 clear_bit(__LINK_STATE_START, &dev->state);
1267 else {
1268 dev->flags |= IFF_UP;
1269 net_dmaengine_get();
1270 dev_set_rx_mode(dev);
1271 dev_activate(dev);
1272 add_device_randomness(dev->dev_addr, dev->addr_len);
1273 }
1274
1275 return ret;
1276 }
1277
1278 /**
1279 * dev_open - prepare an interface for use.
1280 * @dev: device to open
1281 *
1282 * Takes a device from down to up state. The device's private open
1283 * function is invoked and then the multicast lists are loaded. Finally
1284 * the device is moved into the up state and a %NETDEV_UP message is
1285 * sent to the netdev notifier chain.
1286 *
1287 * Calling this function on an active interface is a nop. On a failure
1288 * a negative errno code is returned.
1289 */
1290 int dev_open(struct net_device *dev)
1291 {
1292 int ret;
1293
1294 if (dev->flags & IFF_UP)
1295 return 0;
1296
1297 ret = __dev_open(dev);
1298 if (ret < 0)
1299 return ret;
1300
1301 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1302 call_netdevice_notifiers(NETDEV_UP, dev);
1303
1304 return ret;
1305 }
1306 EXPORT_SYMBOL(dev_open);
1307
1308 static int __dev_close_many(struct list_head *head)
1309 {
1310 struct net_device *dev;
1311
1312 ASSERT_RTNL();
1313 might_sleep();
1314
1315 list_for_each_entry(dev, head, close_list) {
1316 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1317
1318 clear_bit(__LINK_STATE_START, &dev->state);
1319
1320 /* Synchronize to scheduled poll. We cannot touch poll list, it
1321 * can be even on different cpu. So just clear netif_running().
1322 *
1323 * dev->stop() will invoke napi_disable() on all of it's
1324 * napi_struct instances on this device.
1325 */
1326 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1327 }
1328
1329 dev_deactivate_many(head);
1330
1331 list_for_each_entry(dev, head, close_list) {
1332 const struct net_device_ops *ops = dev->netdev_ops;
1333
1334 /*
1335 * Call the device specific close. This cannot fail.
1336 * Only if device is UP
1337 *
1338 * We allow it to be called even after a DETACH hot-plug
1339 * event.
1340 */
1341 if (ops->ndo_stop)
1342 ops->ndo_stop(dev);
1343
1344 dev->flags &= ~IFF_UP;
1345 net_dmaengine_put();
1346 }
1347
1348 return 0;
1349 }
1350
1351 static int __dev_close(struct net_device *dev)
1352 {
1353 int retval;
1354 LIST_HEAD(single);
1355
1356 /* Temporarily disable netpoll until the interface is down */
1357 netpoll_rx_disable(dev);
1358
1359 list_add(&dev->close_list, &single);
1360 retval = __dev_close_many(&single);
1361 list_del(&single);
1362
1363 netpoll_rx_enable(dev);
1364 return retval;
1365 }
1366
1367 static int dev_close_many(struct list_head *head)
1368 {
1369 struct net_device *dev, *tmp;
1370
1371 /* Remove the devices that don't need to be closed */
1372 list_for_each_entry_safe(dev, tmp, head, close_list)
1373 if (!(dev->flags & IFF_UP))
1374 list_del_init(&dev->close_list);
1375
1376 __dev_close_many(head);
1377
1378 list_for_each_entry_safe(dev, tmp, head, close_list) {
1379 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1380 call_netdevice_notifiers(NETDEV_DOWN, dev);
1381 list_del_init(&dev->close_list);
1382 }
1383
1384 return 0;
1385 }
1386
1387 /**
1388 * dev_close - shutdown an interface.
1389 * @dev: device to shutdown
1390 *
1391 * This function moves an active device into down state. A
1392 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1393 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1394 * chain.
1395 */
1396 int dev_close(struct net_device *dev)
1397 {
1398 if (dev->flags & IFF_UP) {
1399 LIST_HEAD(single);
1400
1401 /* Block netpoll rx while the interface is going down */
1402 netpoll_rx_disable(dev);
1403
1404 list_add(&dev->close_list, &single);
1405 dev_close_many(&single);
1406 list_del(&single);
1407
1408 netpoll_rx_enable(dev);
1409 }
1410 return 0;
1411 }
1412 EXPORT_SYMBOL(dev_close);
1413
1414
1415 /**
1416 * dev_disable_lro - disable Large Receive Offload on a device
1417 * @dev: device
1418 *
1419 * Disable Large Receive Offload (LRO) on a net device. Must be
1420 * called under RTNL. This is needed if received packets may be
1421 * forwarded to another interface.
1422 */
1423 void dev_disable_lro(struct net_device *dev)
1424 {
1425 /*
1426 * If we're trying to disable lro on a vlan device
1427 * use the underlying physical device instead
1428 */
1429 if (is_vlan_dev(dev))
1430 dev = vlan_dev_real_dev(dev);
1431
1432 /* the same for macvlan devices */
1433 if (netif_is_macvlan(dev))
1434 dev = macvlan_dev_real_dev(dev);
1435
1436 dev->wanted_features &= ~NETIF_F_LRO;
1437 netdev_update_features(dev);
1438
1439 if (unlikely(dev->features & NETIF_F_LRO))
1440 netdev_WARN(dev, "failed to disable LRO!\n");
1441 }
1442 EXPORT_SYMBOL(dev_disable_lro);
1443
1444 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1445 struct net_device *dev)
1446 {
1447 struct netdev_notifier_info info;
1448
1449 netdev_notifier_info_init(&info, dev);
1450 return nb->notifier_call(nb, val, &info);
1451 }
1452
1453 static int dev_boot_phase = 1;
1454
1455 /**
1456 * register_netdevice_notifier - register a network notifier block
1457 * @nb: notifier
1458 *
1459 * Register a notifier to be called when network device events occur.
1460 * The notifier passed is linked into the kernel structures and must
1461 * not be reused until it has been unregistered. A negative errno code
1462 * is returned on a failure.
1463 *
1464 * When registered all registration and up events are replayed
1465 * to the new notifier to allow device to have a race free
1466 * view of the network device list.
1467 */
1468
1469 int register_netdevice_notifier(struct notifier_block *nb)
1470 {
1471 struct net_device *dev;
1472 struct net_device *last;
1473 struct net *net;
1474 int err;
1475
1476 rtnl_lock();
1477 err = raw_notifier_chain_register(&netdev_chain, nb);
1478 if (err)
1479 goto unlock;
1480 if (dev_boot_phase)
1481 goto unlock;
1482 for_each_net(net) {
1483 for_each_netdev(net, dev) {
1484 err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1485 err = notifier_to_errno(err);
1486 if (err)
1487 goto rollback;
1488
1489 if (!(dev->flags & IFF_UP))
1490 continue;
1491
1492 call_netdevice_notifier(nb, NETDEV_UP, dev);
1493 }
1494 }
1495
1496 unlock:
1497 rtnl_unlock();
1498 return err;
1499
1500 rollback:
1501 last = dev;
1502 for_each_net(net) {
1503 for_each_netdev(net, dev) {
1504 if (dev == last)
1505 goto outroll;
1506
1507 if (dev->flags & IFF_UP) {
1508 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1509 dev);
1510 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1511 }
1512 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1513 }
1514 }
1515
1516 outroll:
1517 raw_notifier_chain_unregister(&netdev_chain, nb);
1518 goto unlock;
1519 }
1520 EXPORT_SYMBOL(register_netdevice_notifier);
1521
1522 /**
1523 * unregister_netdevice_notifier - unregister a network notifier block
1524 * @nb: notifier
1525 *
1526 * Unregister a notifier previously registered by
1527 * register_netdevice_notifier(). The notifier is unlinked into the
1528 * kernel structures and may then be reused. A negative errno code
1529 * is returned on a failure.
1530 *
1531 * After unregistering unregister and down device events are synthesized
1532 * for all devices on the device list to the removed notifier to remove
1533 * the need for special case cleanup code.
1534 */
1535
1536 int unregister_netdevice_notifier(struct notifier_block *nb)
1537 {
1538 struct net_device *dev;
1539 struct net *net;
1540 int err;
1541
1542 rtnl_lock();
1543 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1544 if (err)
1545 goto unlock;
1546
1547 for_each_net(net) {
1548 for_each_netdev(net, dev) {
1549 if (dev->flags & IFF_UP) {
1550 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1551 dev);
1552 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1553 }
1554 call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1555 }
1556 }
1557 unlock:
1558 rtnl_unlock();
1559 return err;
1560 }
1561 EXPORT_SYMBOL(unregister_netdevice_notifier);
1562
1563 /**
1564 * call_netdevice_notifiers_info - call all network notifier blocks
1565 * @val: value passed unmodified to notifier function
1566 * @dev: net_device pointer passed unmodified to notifier function
1567 * @info: notifier information data
1568 *
1569 * Call all network notifier blocks. Parameters and return value
1570 * are as for raw_notifier_call_chain().
1571 */
1572
1573 static int call_netdevice_notifiers_info(unsigned long val,
1574 struct net_device *dev,
1575 struct netdev_notifier_info *info)
1576 {
1577 ASSERT_RTNL();
1578 netdev_notifier_info_init(info, dev);
1579 return raw_notifier_call_chain(&netdev_chain, val, info);
1580 }
1581
1582 /**
1583 * call_netdevice_notifiers - call all network notifier blocks
1584 * @val: value passed unmodified to notifier function
1585 * @dev: net_device pointer passed unmodified to notifier function
1586 *
1587 * Call all network notifier blocks. Parameters and return value
1588 * are as for raw_notifier_call_chain().
1589 */
1590
1591 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1592 {
1593 struct netdev_notifier_info info;
1594
1595 return call_netdevice_notifiers_info(val, dev, &info);
1596 }
1597 EXPORT_SYMBOL(call_netdevice_notifiers);
1598
1599 static struct static_key netstamp_needed __read_mostly;
1600 #ifdef HAVE_JUMP_LABEL
1601 /* We are not allowed to call static_key_slow_dec() from irq context
1602 * If net_disable_timestamp() is called from irq context, defer the
1603 * static_key_slow_dec() calls.
1604 */
1605 static atomic_t netstamp_needed_deferred;
1606 #endif
1607
1608 void net_enable_timestamp(void)
1609 {
1610 #ifdef HAVE_JUMP_LABEL
1611 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1612
1613 if (deferred) {
1614 while (--deferred)
1615 static_key_slow_dec(&netstamp_needed);
1616 return;
1617 }
1618 #endif
1619 static_key_slow_inc(&netstamp_needed);
1620 }
1621 EXPORT_SYMBOL(net_enable_timestamp);
1622
1623 void net_disable_timestamp(void)
1624 {
1625 #ifdef HAVE_JUMP_LABEL
1626 if (in_interrupt()) {
1627 atomic_inc(&netstamp_needed_deferred);
1628 return;
1629 }
1630 #endif
1631 static_key_slow_dec(&netstamp_needed);
1632 }
1633 EXPORT_SYMBOL(net_disable_timestamp);
1634
1635 static inline void net_timestamp_set(struct sk_buff *skb)
1636 {
1637 skb->tstamp.tv64 = 0;
1638 if (static_key_false(&netstamp_needed))
1639 __net_timestamp(skb);
1640 }
1641
1642 #define net_timestamp_check(COND, SKB) \
1643 if (static_key_false(&netstamp_needed)) { \
1644 if ((COND) && !(SKB)->tstamp.tv64) \
1645 __net_timestamp(SKB); \
1646 } \
1647
1648 static inline bool is_skb_forwardable(struct net_device *dev,
1649 struct sk_buff *skb)
1650 {
1651 unsigned int len;
1652
1653 if (!(dev->flags & IFF_UP))
1654 return false;
1655
1656 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1657 if (skb->len <= len)
1658 return true;
1659
1660 /* if TSO is enabled, we don't care about the length as the packet
1661 * could be forwarded without being segmented before
1662 */
1663 if (skb_is_gso(skb))
1664 return true;
1665
1666 return false;
1667 }
1668
1669 /**
1670 * dev_forward_skb - loopback an skb to another netif
1671 *
1672 * @dev: destination network device
1673 * @skb: buffer to forward
1674 *
1675 * return values:
1676 * NET_RX_SUCCESS (no congestion)
1677 * NET_RX_DROP (packet was dropped, but freed)
1678 *
1679 * dev_forward_skb can be used for injecting an skb from the
1680 * start_xmit function of one device into the receive queue
1681 * of another device.
1682 *
1683 * The receiving device may be in another namespace, so
1684 * we have to clear all information in the skb that could
1685 * impact namespace isolation.
1686 */
1687 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1688 {
1689 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1690 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1691 atomic_long_inc(&dev->rx_dropped);
1692 kfree_skb(skb);
1693 return NET_RX_DROP;
1694 }
1695 }
1696
1697 if (unlikely(!is_skb_forwardable(dev, skb))) {
1698 atomic_long_inc(&dev->rx_dropped);
1699 kfree_skb(skb);
1700 return NET_RX_DROP;
1701 }
1702
1703 skb_scrub_packet(skb, true);
1704 skb->protocol = eth_type_trans(skb, dev);
1705
1706 return netif_rx_internal(skb);
1707 }
1708 EXPORT_SYMBOL_GPL(dev_forward_skb);
1709
1710 static inline int deliver_skb(struct sk_buff *skb,
1711 struct packet_type *pt_prev,
1712 struct net_device *orig_dev)
1713 {
1714 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1715 return -ENOMEM;
1716 atomic_inc(&skb->users);
1717 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1718 }
1719
1720 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1721 {
1722 if (!ptype->af_packet_priv || !skb->sk)
1723 return false;
1724
1725 if (ptype->id_match)
1726 return ptype->id_match(ptype, skb->sk);
1727 else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1728 return true;
1729
1730 return false;
1731 }
1732
1733 /*
1734 * Support routine. Sends outgoing frames to any network
1735 * taps currently in use.
1736 */
1737
1738 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1739 {
1740 struct packet_type *ptype;
1741 struct sk_buff *skb2 = NULL;
1742 struct packet_type *pt_prev = NULL;
1743
1744 rcu_read_lock();
1745 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1746 /* Never send packets back to the socket
1747 * they originated from - MvS (miquels@drinkel.ow.org)
1748 */
1749 if ((ptype->dev == dev || !ptype->dev) &&
1750 (!skb_loop_sk(ptype, skb))) {
1751 if (pt_prev) {
1752 deliver_skb(skb2, pt_prev, skb->dev);
1753 pt_prev = ptype;
1754 continue;
1755 }
1756
1757 skb2 = skb_clone(skb, GFP_ATOMIC);
1758 if (!skb2)
1759 break;
1760
1761 net_timestamp_set(skb2);
1762
1763 /* skb->nh should be correctly
1764 set by sender, so that the second statement is
1765 just protection against buggy protocols.
1766 */
1767 skb_reset_mac_header(skb2);
1768
1769 if (skb_network_header(skb2) < skb2->data ||
1770 skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1771 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1772 ntohs(skb2->protocol),
1773 dev->name);
1774 skb_reset_network_header(skb2);
1775 }
1776
1777 skb2->transport_header = skb2->network_header;
1778 skb2->pkt_type = PACKET_OUTGOING;
1779 pt_prev = ptype;
1780 }
1781 }
1782 if (pt_prev)
1783 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1784 rcu_read_unlock();
1785 }
1786
1787 /**
1788 * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1789 * @dev: Network device
1790 * @txq: number of queues available
1791 *
1792 * If real_num_tx_queues is changed the tc mappings may no longer be
1793 * valid. To resolve this verify the tc mapping remains valid and if
1794 * not NULL the mapping. With no priorities mapping to this
1795 * offset/count pair it will no longer be used. In the worst case TC0
1796 * is invalid nothing can be done so disable priority mappings. If is
1797 * expected that drivers will fix this mapping if they can before
1798 * calling netif_set_real_num_tx_queues.
1799 */
1800 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1801 {
1802 int i;
1803 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1804
1805 /* If TC0 is invalidated disable TC mapping */
1806 if (tc->offset + tc->count > txq) {
1807 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1808 dev->num_tc = 0;
1809 return;
1810 }
1811
1812 /* Invalidated prio to tc mappings set to TC0 */
1813 for (i = 1; i < TC_BITMASK + 1; i++) {
1814 int q = netdev_get_prio_tc_map(dev, i);
1815
1816 tc = &dev->tc_to_txq[q];
1817 if (tc->offset + tc->count > txq) {
1818 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1819 i, q);
1820 netdev_set_prio_tc_map(dev, i, 0);
1821 }
1822 }
1823 }
1824
1825 #ifdef CONFIG_XPS
1826 static DEFINE_MUTEX(xps_map_mutex);
1827 #define xmap_dereference(P) \
1828 rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1829
1830 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1831 int cpu, u16 index)
1832 {
1833 struct xps_map *map = NULL;
1834 int pos;
1835
1836 if (dev_maps)
1837 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1838
1839 for (pos = 0; map && pos < map->len; pos++) {
1840 if (map->queues[pos] == index) {
1841 if (map->len > 1) {
1842 map->queues[pos] = map->queues[--map->len];
1843 } else {
1844 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1845 kfree_rcu(map, rcu);
1846 map = NULL;
1847 }
1848 break;
1849 }
1850 }
1851
1852 return map;
1853 }
1854
1855 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1856 {
1857 struct xps_dev_maps *dev_maps;
1858 int cpu, i;
1859 bool active = false;
1860
1861 mutex_lock(&xps_map_mutex);
1862 dev_maps = xmap_dereference(dev->xps_maps);
1863
1864 if (!dev_maps)
1865 goto out_no_maps;
1866
1867 for_each_possible_cpu(cpu) {
1868 for (i = index; i < dev->num_tx_queues; i++) {
1869 if (!remove_xps_queue(dev_maps, cpu, i))
1870 break;
1871 }
1872 if (i == dev->num_tx_queues)
1873 active = true;
1874 }
1875
1876 if (!active) {
1877 RCU_INIT_POINTER(dev->xps_maps, NULL);
1878 kfree_rcu(dev_maps, rcu);
1879 }
1880
1881 for (i = index; i < dev->num_tx_queues; i++)
1882 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1883 NUMA_NO_NODE);
1884
1885 out_no_maps:
1886 mutex_unlock(&xps_map_mutex);
1887 }
1888
1889 static struct xps_map *expand_xps_map(struct xps_map *map,
1890 int cpu, u16 index)
1891 {
1892 struct xps_map *new_map;
1893 int alloc_len = XPS_MIN_MAP_ALLOC;
1894 int i, pos;
1895
1896 for (pos = 0; map && pos < map->len; pos++) {
1897 if (map->queues[pos] != index)
1898 continue;
1899 return map;
1900 }
1901
1902 /* Need to add queue to this CPU's existing map */
1903 if (map) {
1904 if (pos < map->alloc_len)
1905 return map;
1906
1907 alloc_len = map->alloc_len * 2;
1908 }
1909
1910 /* Need to allocate new map to store queue on this CPU's map */
1911 new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1912 cpu_to_node(cpu));
1913 if (!new_map)
1914 return NULL;
1915
1916 for (i = 0; i < pos; i++)
1917 new_map->queues[i] = map->queues[i];
1918 new_map->alloc_len = alloc_len;
1919 new_map->len = pos;
1920
1921 return new_map;
1922 }
1923
1924 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
1925 u16 index)
1926 {
1927 struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
1928 struct xps_map *map, *new_map;
1929 int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
1930 int cpu, numa_node_id = -2;
1931 bool active = false;
1932
1933 mutex_lock(&xps_map_mutex);
1934
1935 dev_maps = xmap_dereference(dev->xps_maps);
1936
1937 /* allocate memory for queue storage */
1938 for_each_online_cpu(cpu) {
1939 if (!cpumask_test_cpu(cpu, mask))
1940 continue;
1941
1942 if (!new_dev_maps)
1943 new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
1944 if (!new_dev_maps) {
1945 mutex_unlock(&xps_map_mutex);
1946 return -ENOMEM;
1947 }
1948
1949 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
1950 NULL;
1951
1952 map = expand_xps_map(map, cpu, index);
1953 if (!map)
1954 goto error;
1955
1956 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1957 }
1958
1959 if (!new_dev_maps)
1960 goto out_no_new_maps;
1961
1962 for_each_possible_cpu(cpu) {
1963 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
1964 /* add queue to CPU maps */
1965 int pos = 0;
1966
1967 map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1968 while ((pos < map->len) && (map->queues[pos] != index))
1969 pos++;
1970
1971 if (pos == map->len)
1972 map->queues[map->len++] = index;
1973 #ifdef CONFIG_NUMA
1974 if (numa_node_id == -2)
1975 numa_node_id = cpu_to_node(cpu);
1976 else if (numa_node_id != cpu_to_node(cpu))
1977 numa_node_id = -1;
1978 #endif
1979 } else if (dev_maps) {
1980 /* fill in the new device map from the old device map */
1981 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1982 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
1983 }
1984
1985 }
1986
1987 rcu_assign_pointer(dev->xps_maps, new_dev_maps);
1988
1989 /* Cleanup old maps */
1990 if (dev_maps) {
1991 for_each_possible_cpu(cpu) {
1992 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
1993 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1994 if (map && map != new_map)
1995 kfree_rcu(map, rcu);
1996 }
1997
1998 kfree_rcu(dev_maps, rcu);
1999 }
2000
2001 dev_maps = new_dev_maps;
2002 active = true;
2003
2004 out_no_new_maps:
2005 /* update Tx queue numa node */
2006 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2007 (numa_node_id >= 0) ? numa_node_id :
2008 NUMA_NO_NODE);
2009
2010 if (!dev_maps)
2011 goto out_no_maps;
2012
2013 /* removes queue from unused CPUs */
2014 for_each_possible_cpu(cpu) {
2015 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2016 continue;
2017
2018 if (remove_xps_queue(dev_maps, cpu, index))
2019 active = true;
2020 }
2021
2022 /* free map if not active */
2023 if (!active) {
2024 RCU_INIT_POINTER(dev->xps_maps, NULL);
2025 kfree_rcu(dev_maps, rcu);
2026 }
2027
2028 out_no_maps:
2029 mutex_unlock(&xps_map_mutex);
2030
2031 return 0;
2032 error:
2033 /* remove any maps that we added */
2034 for_each_possible_cpu(cpu) {
2035 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2036 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2037 NULL;
2038 if (new_map && new_map != map)
2039 kfree(new_map);
2040 }
2041
2042 mutex_unlock(&xps_map_mutex);
2043
2044 kfree(new_dev_maps);
2045 return -ENOMEM;
2046 }
2047 EXPORT_SYMBOL(netif_set_xps_queue);
2048
2049 #endif
2050 /*
2051 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2052 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2053 */
2054 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2055 {
2056 int rc;
2057
2058 if (txq < 1 || txq > dev->num_tx_queues)
2059 return -EINVAL;
2060
2061 if (dev->reg_state == NETREG_REGISTERED ||
2062 dev->reg_state == NETREG_UNREGISTERING) {
2063 ASSERT_RTNL();
2064
2065 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2066 txq);
2067 if (rc)
2068 return rc;
2069
2070 if (dev->num_tc)
2071 netif_setup_tc(dev, txq);
2072
2073 if (txq < dev->real_num_tx_queues) {
2074 qdisc_reset_all_tx_gt(dev, txq);
2075 #ifdef CONFIG_XPS
2076 netif_reset_xps_queues_gt(dev, txq);
2077 #endif
2078 }
2079 }
2080
2081 dev->real_num_tx_queues = txq;
2082 return 0;
2083 }
2084 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2085
2086 #ifdef CONFIG_SYSFS
2087 /**
2088 * netif_set_real_num_rx_queues - set actual number of RX queues used
2089 * @dev: Network device
2090 * @rxq: Actual number of RX queues
2091 *
2092 * This must be called either with the rtnl_lock held or before
2093 * registration of the net device. Returns 0 on success, or a
2094 * negative error code. If called before registration, it always
2095 * succeeds.
2096 */
2097 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2098 {
2099 int rc;
2100
2101 if (rxq < 1 || rxq > dev->num_rx_queues)
2102 return -EINVAL;
2103
2104 if (dev->reg_state == NETREG_REGISTERED) {
2105 ASSERT_RTNL();
2106
2107 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2108 rxq);
2109 if (rc)
2110 return rc;
2111 }
2112
2113 dev->real_num_rx_queues = rxq;
2114 return 0;
2115 }
2116 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2117 #endif
2118
2119 /**
2120 * netif_get_num_default_rss_queues - default number of RSS queues
2121 *
2122 * This routine should set an upper limit on the number of RSS queues
2123 * used by default by multiqueue devices.
2124 */
2125 int netif_get_num_default_rss_queues(void)
2126 {
2127 return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2128 }
2129 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2130
2131 static inline void __netif_reschedule(struct Qdisc *q)
2132 {
2133 struct softnet_data *sd;
2134 unsigned long flags;
2135
2136 local_irq_save(flags);
2137 sd = &__get_cpu_var(softnet_data);
2138 q->next_sched = NULL;
2139 *sd->output_queue_tailp = q;
2140 sd->output_queue_tailp = &q->next_sched;
2141 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2142 local_irq_restore(flags);
2143 }
2144
2145 void __netif_schedule(struct Qdisc *q)
2146 {
2147 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2148 __netif_reschedule(q);
2149 }
2150 EXPORT_SYMBOL(__netif_schedule);
2151
2152 struct dev_kfree_skb_cb {
2153 enum skb_free_reason reason;
2154 };
2155
2156 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2157 {
2158 return (struct dev_kfree_skb_cb *)skb->cb;
2159 }
2160
2161 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2162 {
2163 unsigned long flags;
2164
2165 if (likely(atomic_read(&skb->users) == 1)) {
2166 smp_rmb();
2167 atomic_set(&skb->users, 0);
2168 } else if (likely(!atomic_dec_and_test(&skb->users))) {
2169 return;
2170 }
2171 get_kfree_skb_cb(skb)->reason = reason;
2172 local_irq_save(flags);
2173 skb->next = __this_cpu_read(softnet_data.completion_queue);
2174 __this_cpu_write(softnet_data.completion_queue, skb);
2175 raise_softirq_irqoff(NET_TX_SOFTIRQ);
2176 local_irq_restore(flags);
2177 }
2178 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2179
2180 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2181 {
2182 if (in_irq() || irqs_disabled())
2183 __dev_kfree_skb_irq(skb, reason);
2184 else
2185 dev_kfree_skb(skb);
2186 }
2187 EXPORT_SYMBOL(__dev_kfree_skb_any);
2188
2189
2190 /**
2191 * netif_device_detach - mark device as removed
2192 * @dev: network device
2193 *
2194 * Mark device as removed from system and therefore no longer available.
2195 */
2196 void netif_device_detach(struct net_device *dev)
2197 {
2198 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2199 netif_running(dev)) {
2200 netif_tx_stop_all_queues(dev);
2201 }
2202 }
2203 EXPORT_SYMBOL(netif_device_detach);
2204
2205 /**
2206 * netif_device_attach - mark device as attached
2207 * @dev: network device
2208 *
2209 * Mark device as attached from system and restart if needed.
2210 */
2211 void netif_device_attach(struct net_device *dev)
2212 {
2213 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2214 netif_running(dev)) {
2215 netif_tx_wake_all_queues(dev);
2216 __netdev_watchdog_up(dev);
2217 }
2218 }
2219 EXPORT_SYMBOL(netif_device_attach);
2220
2221 static void skb_warn_bad_offload(const struct sk_buff *skb)
2222 {
2223 static const netdev_features_t null_features = 0;
2224 struct net_device *dev = skb->dev;
2225 const char *driver = "";
2226
2227 if (!net_ratelimit())
2228 return;
2229
2230 if (dev && dev->dev.parent)
2231 driver = dev_driver_string(dev->dev.parent);
2232
2233 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2234 "gso_type=%d ip_summed=%d\n",
2235 driver, dev ? &dev->features : &null_features,
2236 skb->sk ? &skb->sk->sk_route_caps : &null_features,
2237 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2238 skb_shinfo(skb)->gso_type, skb->ip_summed);
2239 }
2240
2241 /*
2242 * Invalidate hardware checksum when packet is to be mangled, and
2243 * complete checksum manually on outgoing path.
2244 */
2245 int skb_checksum_help(struct sk_buff *skb)
2246 {
2247 __wsum csum;
2248 int ret = 0, offset;
2249
2250 if (skb->ip_summed == CHECKSUM_COMPLETE)
2251 goto out_set_summed;
2252
2253 if (unlikely(skb_shinfo(skb)->gso_size)) {
2254 skb_warn_bad_offload(skb);
2255 return -EINVAL;
2256 }
2257
2258 /* Before computing a checksum, we should make sure no frag could
2259 * be modified by an external entity : checksum could be wrong.
2260 */
2261 if (skb_has_shared_frag(skb)) {
2262 ret = __skb_linearize(skb);
2263 if (ret)
2264 goto out;
2265 }
2266
2267 offset = skb_checksum_start_offset(skb);
2268 BUG_ON(offset >= skb_headlen(skb));
2269 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2270
2271 offset += skb->csum_offset;
2272 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2273
2274 if (skb_cloned(skb) &&
2275 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2276 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2277 if (ret)
2278 goto out;
2279 }
2280
2281 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2282 out_set_summed:
2283 skb->ip_summed = CHECKSUM_NONE;
2284 out:
2285 return ret;
2286 }
2287 EXPORT_SYMBOL(skb_checksum_help);
2288
2289 __be16 skb_network_protocol(struct sk_buff *skb)
2290 {
2291 __be16 type = skb->protocol;
2292 int vlan_depth = ETH_HLEN;
2293
2294 /* Tunnel gso handlers can set protocol to ethernet. */
2295 if (type == htons(ETH_P_TEB)) {
2296 struct ethhdr *eth;
2297
2298 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2299 return 0;
2300
2301 eth = (struct ethhdr *)skb_mac_header(skb);
2302 type = eth->h_proto;
2303 }
2304
2305 while (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
2306 struct vlan_hdr *vh;
2307
2308 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2309 return 0;
2310
2311 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2312 type = vh->h_vlan_encapsulated_proto;
2313 vlan_depth += VLAN_HLEN;
2314 }
2315
2316 return type;
2317 }
2318
2319 /**
2320 * skb_mac_gso_segment - mac layer segmentation handler.
2321 * @skb: buffer to segment
2322 * @features: features for the output path (see dev->features)
2323 */
2324 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2325 netdev_features_t features)
2326 {
2327 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2328 struct packet_offload *ptype;
2329 __be16 type = skb_network_protocol(skb);
2330
2331 if (unlikely(!type))
2332 return ERR_PTR(-EINVAL);
2333
2334 __skb_pull(skb, skb->mac_len);
2335
2336 rcu_read_lock();
2337 list_for_each_entry_rcu(ptype, &offload_base, list) {
2338 if (ptype->type == type && ptype->callbacks.gso_segment) {
2339 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2340 int err;
2341
2342 err = ptype->callbacks.gso_send_check(skb);
2343 segs = ERR_PTR(err);
2344 if (err || skb_gso_ok(skb, features))
2345 break;
2346 __skb_push(skb, (skb->data -
2347 skb_network_header(skb)));
2348 }
2349 segs = ptype->callbacks.gso_segment(skb, features);
2350 break;
2351 }
2352 }
2353 rcu_read_unlock();
2354
2355 __skb_push(skb, skb->data - skb_mac_header(skb));
2356
2357 return segs;
2358 }
2359 EXPORT_SYMBOL(skb_mac_gso_segment);
2360
2361
2362 /* openvswitch calls this on rx path, so we need a different check.
2363 */
2364 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2365 {
2366 if (tx_path)
2367 return skb->ip_summed != CHECKSUM_PARTIAL;
2368 else
2369 return skb->ip_summed == CHECKSUM_NONE;
2370 }
2371
2372 /**
2373 * __skb_gso_segment - Perform segmentation on skb.
2374 * @skb: buffer to segment
2375 * @features: features for the output path (see dev->features)
2376 * @tx_path: whether it is called in TX path
2377 *
2378 * This function segments the given skb and returns a list of segments.
2379 *
2380 * It may return NULL if the skb requires no segmentation. This is
2381 * only possible when GSO is used for verifying header integrity.
2382 */
2383 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2384 netdev_features_t features, bool tx_path)
2385 {
2386 if (unlikely(skb_needs_check(skb, tx_path))) {
2387 int err;
2388
2389 skb_warn_bad_offload(skb);
2390
2391 if (skb_header_cloned(skb) &&
2392 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
2393 return ERR_PTR(err);
2394 }
2395
2396 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2397 SKB_GSO_CB(skb)->encap_level = 0;
2398
2399 skb_reset_mac_header(skb);
2400 skb_reset_mac_len(skb);
2401
2402 return skb_mac_gso_segment(skb, features);
2403 }
2404 EXPORT_SYMBOL(__skb_gso_segment);
2405
2406 /* Take action when hardware reception checksum errors are detected. */
2407 #ifdef CONFIG_BUG
2408 void netdev_rx_csum_fault(struct net_device *dev)
2409 {
2410 if (net_ratelimit()) {
2411 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2412 dump_stack();
2413 }
2414 }
2415 EXPORT_SYMBOL(netdev_rx_csum_fault);
2416 #endif
2417
2418 /* Actually, we should eliminate this check as soon as we know, that:
2419 * 1. IOMMU is present and allows to map all the memory.
2420 * 2. No high memory really exists on this machine.
2421 */
2422
2423 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2424 {
2425 #ifdef CONFIG_HIGHMEM
2426 int i;
2427 if (!(dev->features & NETIF_F_HIGHDMA)) {
2428 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2429 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2430 if (PageHighMem(skb_frag_page(frag)))
2431 return 1;
2432 }
2433 }
2434
2435 if (PCI_DMA_BUS_IS_PHYS) {
2436 struct device *pdev = dev->dev.parent;
2437
2438 if (!pdev)
2439 return 0;
2440 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2441 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2442 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2443 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2444 return 1;
2445 }
2446 }
2447 #endif
2448 return 0;
2449 }
2450
2451 struct dev_gso_cb {
2452 void (*destructor)(struct sk_buff *skb);
2453 };
2454
2455 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2456
2457 static void dev_gso_skb_destructor(struct sk_buff *skb)
2458 {
2459 struct dev_gso_cb *cb;
2460
2461 kfree_skb_list(skb->next);
2462 skb->next = NULL;
2463
2464 cb = DEV_GSO_CB(skb);
2465 if (cb->destructor)
2466 cb->destructor(skb);
2467 }
2468
2469 /**
2470 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2471 * @skb: buffer to segment
2472 * @features: device features as applicable to this skb
2473 *
2474 * This function segments the given skb and stores the list of segments
2475 * in skb->next.
2476 */
2477 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2478 {
2479 struct sk_buff *segs;
2480
2481 segs = skb_gso_segment(skb, features);
2482
2483 /* Verifying header integrity only. */
2484 if (!segs)
2485 return 0;
2486
2487 if (IS_ERR(segs))
2488 return PTR_ERR(segs);
2489
2490 skb->next = segs;
2491 DEV_GSO_CB(skb)->destructor = skb->destructor;
2492 skb->destructor = dev_gso_skb_destructor;
2493
2494 return 0;
2495 }
2496
2497 static netdev_features_t harmonize_features(struct sk_buff *skb,
2498 netdev_features_t features)
2499 {
2500 if (skb->ip_summed != CHECKSUM_NONE &&
2501 !can_checksum_protocol(features, skb_network_protocol(skb))) {
2502 features &= ~NETIF_F_ALL_CSUM;
2503 } else if (illegal_highdma(skb->dev, skb)) {
2504 features &= ~NETIF_F_SG;
2505 }
2506
2507 return features;
2508 }
2509
2510 netdev_features_t netif_skb_features(struct sk_buff *skb)
2511 {
2512 __be16 protocol = skb->protocol;
2513 netdev_features_t features = skb->dev->features;
2514
2515 if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
2516 features &= ~NETIF_F_GSO_MASK;
2517
2518 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
2519 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2520 protocol = veh->h_vlan_encapsulated_proto;
2521 } else if (!vlan_tx_tag_present(skb)) {
2522 return harmonize_features(skb, features);
2523 }
2524
2525 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
2526 NETIF_F_HW_VLAN_STAG_TX);
2527
2528 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
2529 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2530 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
2531 NETIF_F_HW_VLAN_STAG_TX;
2532
2533 return harmonize_features(skb, features);
2534 }
2535 EXPORT_SYMBOL(netif_skb_features);
2536
2537 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2538 struct netdev_queue *txq)
2539 {
2540 const struct net_device_ops *ops = dev->netdev_ops;
2541 int rc = NETDEV_TX_OK;
2542 unsigned int skb_len;
2543
2544 if (likely(!skb->next)) {
2545 netdev_features_t features;
2546
2547 /*
2548 * If device doesn't need skb->dst, release it right now while
2549 * its hot in this cpu cache
2550 */
2551 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2552 skb_dst_drop(skb);
2553
2554 features = netif_skb_features(skb);
2555
2556 if (vlan_tx_tag_present(skb) &&
2557 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
2558 skb = __vlan_put_tag(skb, skb->vlan_proto,
2559 vlan_tx_tag_get(skb));
2560 if (unlikely(!skb))
2561 goto out;
2562
2563 skb->vlan_tci = 0;
2564 }
2565
2566 /* If encapsulation offload request, verify we are testing
2567 * hardware encapsulation features instead of standard
2568 * features for the netdev
2569 */
2570 if (skb->encapsulation)
2571 features &= dev->hw_enc_features;
2572
2573 if (netif_needs_gso(skb, features)) {
2574 if (unlikely(dev_gso_segment(skb, features)))
2575 goto out_kfree_skb;
2576 if (skb->next)
2577 goto gso;
2578 } else {
2579 if (skb_needs_linearize(skb, features) &&
2580 __skb_linearize(skb))
2581 goto out_kfree_skb;
2582
2583 /* If packet is not checksummed and device does not
2584 * support checksumming for this protocol, complete
2585 * checksumming here.
2586 */
2587 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2588 if (skb->encapsulation)
2589 skb_set_inner_transport_header(skb,
2590 skb_checksum_start_offset(skb));
2591 else
2592 skb_set_transport_header(skb,
2593 skb_checksum_start_offset(skb));
2594 if (!(features & NETIF_F_ALL_CSUM) &&
2595 skb_checksum_help(skb))
2596 goto out_kfree_skb;
2597 }
2598 }
2599
2600 if (!list_empty(&ptype_all))
2601 dev_queue_xmit_nit(skb, dev);
2602
2603 skb_len = skb->len;
2604 trace_net_dev_start_xmit(skb, dev);
2605 rc = ops->ndo_start_xmit(skb, dev);
2606 trace_net_dev_xmit(skb, rc, dev, skb_len);
2607 if (rc == NETDEV_TX_OK)
2608 txq_trans_update(txq);
2609 return rc;
2610 }
2611
2612 gso:
2613 do {
2614 struct sk_buff *nskb = skb->next;
2615
2616 skb->next = nskb->next;
2617 nskb->next = NULL;
2618
2619 if (!list_empty(&ptype_all))
2620 dev_queue_xmit_nit(nskb, dev);
2621
2622 skb_len = nskb->len;
2623 trace_net_dev_start_xmit(nskb, dev);
2624 rc = ops->ndo_start_xmit(nskb, dev);
2625 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2626 if (unlikely(rc != NETDEV_TX_OK)) {
2627 if (rc & ~NETDEV_TX_MASK)
2628 goto out_kfree_gso_skb;
2629 nskb->next = skb->next;
2630 skb->next = nskb;
2631 return rc;
2632 }
2633 txq_trans_update(txq);
2634 if (unlikely(netif_xmit_stopped(txq) && skb->next))
2635 return NETDEV_TX_BUSY;
2636 } while (skb->next);
2637
2638 out_kfree_gso_skb:
2639 if (likely(skb->next == NULL)) {
2640 skb->destructor = DEV_GSO_CB(skb)->destructor;
2641 consume_skb(skb);
2642 return rc;
2643 }
2644 out_kfree_skb:
2645 kfree_skb(skb);
2646 out:
2647 return rc;
2648 }
2649 EXPORT_SYMBOL_GPL(dev_hard_start_xmit);
2650
2651 static void qdisc_pkt_len_init(struct sk_buff *skb)
2652 {
2653 const struct skb_shared_info *shinfo = skb_shinfo(skb);
2654
2655 qdisc_skb_cb(skb)->pkt_len = skb->len;
2656
2657 /* To get more precise estimation of bytes sent on wire,
2658 * we add to pkt_len the headers size of all segments
2659 */
2660 if (shinfo->gso_size) {
2661 unsigned int hdr_len;
2662 u16 gso_segs = shinfo->gso_segs;
2663
2664 /* mac layer + network layer */
2665 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2666
2667 /* + transport layer */
2668 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2669 hdr_len += tcp_hdrlen(skb);
2670 else
2671 hdr_len += sizeof(struct udphdr);
2672
2673 if (shinfo->gso_type & SKB_GSO_DODGY)
2674 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2675 shinfo->gso_size);
2676
2677 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
2678 }
2679 }
2680
2681 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2682 struct net_device *dev,
2683 struct netdev_queue *txq)
2684 {
2685 spinlock_t *root_lock = qdisc_lock(q);
2686 bool contended;
2687 int rc;
2688
2689 qdisc_pkt_len_init(skb);
2690 qdisc_calculate_pkt_len(skb, q);
2691 /*
2692 * Heuristic to force contended enqueues to serialize on a
2693 * separate lock before trying to get qdisc main lock.
2694 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2695 * and dequeue packets faster.
2696 */
2697 contended = qdisc_is_running(q);
2698 if (unlikely(contended))
2699 spin_lock(&q->busylock);
2700
2701 spin_lock(root_lock);
2702 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2703 kfree_skb(skb);
2704 rc = NET_XMIT_DROP;
2705 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2706 qdisc_run_begin(q)) {
2707 /*
2708 * This is a work-conserving queue; there are no old skbs
2709 * waiting to be sent out; and the qdisc is not running -
2710 * xmit the skb directly.
2711 */
2712 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2713 skb_dst_force(skb);
2714
2715 qdisc_bstats_update(q, skb);
2716
2717 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2718 if (unlikely(contended)) {
2719 spin_unlock(&q->busylock);
2720 contended = false;
2721 }
2722 __qdisc_run(q);
2723 } else
2724 qdisc_run_end(q);
2725
2726 rc = NET_XMIT_SUCCESS;
2727 } else {
2728 skb_dst_force(skb);
2729 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2730 if (qdisc_run_begin(q)) {
2731 if (unlikely(contended)) {
2732 spin_unlock(&q->busylock);
2733 contended = false;
2734 }
2735 __qdisc_run(q);
2736 }
2737 }
2738 spin_unlock(root_lock);
2739 if (unlikely(contended))
2740 spin_unlock(&q->busylock);
2741 return rc;
2742 }
2743
2744 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2745 static void skb_update_prio(struct sk_buff *skb)
2746 {
2747 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2748
2749 if (!skb->priority && skb->sk && map) {
2750 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2751
2752 if (prioidx < map->priomap_len)
2753 skb->priority = map->priomap[prioidx];
2754 }
2755 }
2756 #else
2757 #define skb_update_prio(skb)
2758 #endif
2759
2760 static DEFINE_PER_CPU(int, xmit_recursion);
2761 #define RECURSION_LIMIT 10
2762
2763 /**
2764 * dev_loopback_xmit - loop back @skb
2765 * @skb: buffer to transmit
2766 */
2767 int dev_loopback_xmit(struct sk_buff *skb)
2768 {
2769 skb_reset_mac_header(skb);
2770 __skb_pull(skb, skb_network_offset(skb));
2771 skb->pkt_type = PACKET_LOOPBACK;
2772 skb->ip_summed = CHECKSUM_UNNECESSARY;
2773 WARN_ON(!skb_dst(skb));
2774 skb_dst_force(skb);
2775 netif_rx_ni(skb);
2776 return 0;
2777 }
2778 EXPORT_SYMBOL(dev_loopback_xmit);
2779
2780 /**
2781 * __dev_queue_xmit - transmit a buffer
2782 * @skb: buffer to transmit
2783 * @accel_priv: private data used for L2 forwarding offload
2784 *
2785 * Queue a buffer for transmission to a network device. The caller must
2786 * have set the device and priority and built the buffer before calling
2787 * this function. The function can be called from an interrupt.
2788 *
2789 * A negative errno code is returned on a failure. A success does not
2790 * guarantee the frame will be transmitted as it may be dropped due
2791 * to congestion or traffic shaping.
2792 *
2793 * -----------------------------------------------------------------------------------
2794 * I notice this method can also return errors from the queue disciplines,
2795 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2796 * be positive.
2797 *
2798 * Regardless of the return value, the skb is consumed, so it is currently
2799 * difficult to retry a send to this method. (You can bump the ref count
2800 * before sending to hold a reference for retry if you are careful.)
2801 *
2802 * When calling this method, interrupts MUST be enabled. This is because
2803 * the BH enable code must have IRQs enabled so that it will not deadlock.
2804 * --BLG
2805 */
2806 int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2807 {
2808 struct net_device *dev = skb->dev;
2809 struct netdev_queue *txq;
2810 struct Qdisc *q;
2811 int rc = -ENOMEM;
2812
2813 skb_reset_mac_header(skb);
2814
2815 /* Disable soft irqs for various locks below. Also
2816 * stops preemption for RCU.
2817 */
2818 rcu_read_lock_bh();
2819
2820 skb_update_prio(skb);
2821
2822 txq = netdev_pick_tx(dev, skb, accel_priv);
2823 q = rcu_dereference_bh(txq->qdisc);
2824
2825 #ifdef CONFIG_NET_CLS_ACT
2826 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2827 #endif
2828 trace_net_dev_queue(skb);
2829 if (q->enqueue) {
2830 rc = __dev_xmit_skb(skb, q, dev, txq);
2831 goto out;
2832 }
2833
2834 /* The device has no queue. Common case for software devices:
2835 loopback, all the sorts of tunnels...
2836
2837 Really, it is unlikely that netif_tx_lock protection is necessary
2838 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2839 counters.)
2840 However, it is possible, that they rely on protection
2841 made by us here.
2842
2843 Check this and shot the lock. It is not prone from deadlocks.
2844 Either shot noqueue qdisc, it is even simpler 8)
2845 */
2846 if (dev->flags & IFF_UP) {
2847 int cpu = smp_processor_id(); /* ok because BHs are off */
2848
2849 if (txq->xmit_lock_owner != cpu) {
2850
2851 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2852 goto recursion_alert;
2853
2854 HARD_TX_LOCK(dev, txq, cpu);
2855
2856 if (!netif_xmit_stopped(txq)) {
2857 __this_cpu_inc(xmit_recursion);
2858 rc = dev_hard_start_xmit(skb, dev, txq);
2859 __this_cpu_dec(xmit_recursion);
2860 if (dev_xmit_complete(rc)) {
2861 HARD_TX_UNLOCK(dev, txq);
2862 goto out;
2863 }
2864 }
2865 HARD_TX_UNLOCK(dev, txq);
2866 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2867 dev->name);
2868 } else {
2869 /* Recursion is detected! It is possible,
2870 * unfortunately
2871 */
2872 recursion_alert:
2873 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2874 dev->name);
2875 }
2876 }
2877
2878 rc = -ENETDOWN;
2879 rcu_read_unlock_bh();
2880
2881 kfree_skb(skb);
2882 return rc;
2883 out:
2884 rcu_read_unlock_bh();
2885 return rc;
2886 }
2887
2888 int dev_queue_xmit(struct sk_buff *skb)
2889 {
2890 return __dev_queue_xmit(skb, NULL);
2891 }
2892 EXPORT_SYMBOL(dev_queue_xmit);
2893
2894 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
2895 {
2896 return __dev_queue_xmit(skb, accel_priv);
2897 }
2898 EXPORT_SYMBOL(dev_queue_xmit_accel);
2899
2900
2901 /*=======================================================================
2902 Receiver routines
2903 =======================================================================*/
2904
2905 int netdev_max_backlog __read_mostly = 1000;
2906 EXPORT_SYMBOL(netdev_max_backlog);
2907
2908 int netdev_tstamp_prequeue __read_mostly = 1;
2909 int netdev_budget __read_mostly = 300;
2910 int weight_p __read_mostly = 64; /* old backlog weight */
2911
2912 /* Called with irq disabled */
2913 static inline void ____napi_schedule(struct softnet_data *sd,
2914 struct napi_struct *napi)
2915 {
2916 list_add_tail(&napi->poll_list, &sd->poll_list);
2917 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2918 }
2919
2920 #ifdef CONFIG_RPS
2921
2922 /* One global table that all flow-based protocols share. */
2923 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2924 EXPORT_SYMBOL(rps_sock_flow_table);
2925
2926 struct static_key rps_needed __read_mostly;
2927
2928 static struct rps_dev_flow *
2929 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2930 struct rps_dev_flow *rflow, u16 next_cpu)
2931 {
2932 if (next_cpu != RPS_NO_CPU) {
2933 #ifdef CONFIG_RFS_ACCEL
2934 struct netdev_rx_queue *rxqueue;
2935 struct rps_dev_flow_table *flow_table;
2936 struct rps_dev_flow *old_rflow;
2937 u32 flow_id;
2938 u16 rxq_index;
2939 int rc;
2940
2941 /* Should we steer this flow to a different hardware queue? */
2942 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2943 !(dev->features & NETIF_F_NTUPLE))
2944 goto out;
2945 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2946 if (rxq_index == skb_get_rx_queue(skb))
2947 goto out;
2948
2949 rxqueue = dev->_rx + rxq_index;
2950 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2951 if (!flow_table)
2952 goto out;
2953 flow_id = skb->rxhash & flow_table->mask;
2954 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2955 rxq_index, flow_id);
2956 if (rc < 0)
2957 goto out;
2958 old_rflow = rflow;
2959 rflow = &flow_table->flows[flow_id];
2960 rflow->filter = rc;
2961 if (old_rflow->filter == rflow->filter)
2962 old_rflow->filter = RPS_NO_FILTER;
2963 out:
2964 #endif
2965 rflow->last_qtail =
2966 per_cpu(softnet_data, next_cpu).input_queue_head;
2967 }
2968
2969 rflow->cpu = next_cpu;
2970 return rflow;
2971 }
2972
2973 /*
2974 * get_rps_cpu is called from netif_receive_skb and returns the target
2975 * CPU from the RPS map of the receiving queue for a given skb.
2976 * rcu_read_lock must be held on entry.
2977 */
2978 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2979 struct rps_dev_flow **rflowp)
2980 {
2981 struct netdev_rx_queue *rxqueue;
2982 struct rps_map *map;
2983 struct rps_dev_flow_table *flow_table;
2984 struct rps_sock_flow_table *sock_flow_table;
2985 int cpu = -1;
2986 u16 tcpu;
2987
2988 if (skb_rx_queue_recorded(skb)) {
2989 u16 index = skb_get_rx_queue(skb);
2990 if (unlikely(index >= dev->real_num_rx_queues)) {
2991 WARN_ONCE(dev->real_num_rx_queues > 1,
2992 "%s received packet on queue %u, but number "
2993 "of RX queues is %u\n",
2994 dev->name, index, dev->real_num_rx_queues);
2995 goto done;
2996 }
2997 rxqueue = dev->_rx + index;
2998 } else
2999 rxqueue = dev->_rx;
3000
3001 map = rcu_dereference(rxqueue->rps_map);
3002 if (map) {
3003 if (map->len == 1 &&
3004 !rcu_access_pointer(rxqueue->rps_flow_table)) {
3005 tcpu = map->cpus[0];
3006 if (cpu_online(tcpu))
3007 cpu = tcpu;
3008 goto done;
3009 }
3010 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
3011 goto done;
3012 }
3013
3014 skb_reset_network_header(skb);
3015 if (!skb_get_hash(skb))
3016 goto done;
3017
3018 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3019 sock_flow_table = rcu_dereference(rps_sock_flow_table);
3020 if (flow_table && sock_flow_table) {
3021 u16 next_cpu;
3022 struct rps_dev_flow *rflow;
3023
3024 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
3025 tcpu = rflow->cpu;
3026
3027 next_cpu = sock_flow_table->ents[skb->rxhash &
3028 sock_flow_table->mask];
3029
3030 /*
3031 * If the desired CPU (where last recvmsg was done) is
3032 * different from current CPU (one in the rx-queue flow
3033 * table entry), switch if one of the following holds:
3034 * - Current CPU is unset (equal to RPS_NO_CPU).
3035 * - Current CPU is offline.
3036 * - The current CPU's queue tail has advanced beyond the
3037 * last packet that was enqueued using this table entry.
3038 * This guarantees that all previous packets for the flow
3039 * have been dequeued, thus preserving in order delivery.
3040 */
3041 if (unlikely(tcpu != next_cpu) &&
3042 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
3043 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3044 rflow->last_qtail)) >= 0)) {
3045 tcpu = next_cpu;
3046 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3047 }
3048
3049 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
3050 *rflowp = rflow;
3051 cpu = tcpu;
3052 goto done;
3053 }
3054 }
3055
3056 if (map) {
3057 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
3058
3059 if (cpu_online(tcpu)) {
3060 cpu = tcpu;
3061 goto done;
3062 }
3063 }
3064
3065 done:
3066 return cpu;
3067 }
3068
3069 #ifdef CONFIG_RFS_ACCEL
3070
3071 /**
3072 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3073 * @dev: Device on which the filter was set
3074 * @rxq_index: RX queue index
3075 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3076 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3077 *
3078 * Drivers that implement ndo_rx_flow_steer() should periodically call
3079 * this function for each installed filter and remove the filters for
3080 * which it returns %true.
3081 */
3082 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3083 u32 flow_id, u16 filter_id)
3084 {
3085 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3086 struct rps_dev_flow_table *flow_table;
3087 struct rps_dev_flow *rflow;
3088 bool expire = true;
3089 int cpu;
3090
3091 rcu_read_lock();
3092 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3093 if (flow_table && flow_id <= flow_table->mask) {
3094 rflow = &flow_table->flows[flow_id];
3095 cpu = ACCESS_ONCE(rflow->cpu);
3096 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
3097 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3098 rflow->last_qtail) <
3099 (int)(10 * flow_table->mask)))
3100 expire = false;
3101 }
3102 rcu_read_unlock();
3103 return expire;
3104 }
3105 EXPORT_SYMBOL(rps_may_expire_flow);
3106
3107 #endif /* CONFIG_RFS_ACCEL */
3108
3109 /* Called from hardirq (IPI) context */
3110 static void rps_trigger_softirq(void *data)
3111 {
3112 struct softnet_data *sd = data;
3113
3114 ____napi_schedule(sd, &sd->backlog);
3115 sd->received_rps++;
3116 }
3117
3118 #endif /* CONFIG_RPS */
3119
3120 /*
3121 * Check if this softnet_data structure is another cpu one
3122 * If yes, queue it to our IPI list and return 1
3123 * If no, return 0
3124 */
3125 static int rps_ipi_queued(struct softnet_data *sd)
3126 {
3127 #ifdef CONFIG_RPS
3128 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
3129
3130 if (sd != mysd) {
3131 sd->rps_ipi_next = mysd->rps_ipi_list;
3132 mysd->rps_ipi_list = sd;
3133
3134 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3135 return 1;
3136 }
3137 #endif /* CONFIG_RPS */
3138 return 0;
3139 }
3140
3141 #ifdef CONFIG_NET_FLOW_LIMIT
3142 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3143 #endif
3144
3145 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3146 {
3147 #ifdef CONFIG_NET_FLOW_LIMIT
3148 struct sd_flow_limit *fl;
3149 struct softnet_data *sd;
3150 unsigned int old_flow, new_flow;
3151
3152 if (qlen < (netdev_max_backlog >> 1))
3153 return false;
3154
3155 sd = &__get_cpu_var(softnet_data);
3156
3157 rcu_read_lock();
3158 fl = rcu_dereference(sd->flow_limit);
3159 if (fl) {
3160 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3161 old_flow = fl->history[fl->history_head];
3162 fl->history[fl->history_head] = new_flow;
3163
3164 fl->history_head++;
3165 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3166
3167 if (likely(fl->buckets[old_flow]))
3168 fl->buckets[old_flow]--;
3169
3170 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3171 fl->count++;
3172 rcu_read_unlock();
3173 return true;
3174 }
3175 }
3176 rcu_read_unlock();
3177 #endif
3178 return false;
3179 }
3180
3181 /*
3182 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3183 * queue (may be a remote CPU queue).
3184 */
3185 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3186 unsigned int *qtail)
3187 {
3188 struct softnet_data *sd;
3189 unsigned long flags;
3190 unsigned int qlen;
3191
3192 sd = &per_cpu(softnet_data, cpu);
3193
3194 local_irq_save(flags);
3195
3196 rps_lock(sd);
3197 qlen = skb_queue_len(&sd->input_pkt_queue);
3198 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3199 if (skb_queue_len(&sd->input_pkt_queue)) {
3200 enqueue:
3201 __skb_queue_tail(&sd->input_pkt_queue, skb);
3202 input_queue_tail_incr_save(sd, qtail);
3203 rps_unlock(sd);
3204 local_irq_restore(flags);
3205 return NET_RX_SUCCESS;
3206 }
3207
3208 /* Schedule NAPI for backlog device
3209 * We can use non atomic operation since we own the queue lock
3210 */
3211 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3212 if (!rps_ipi_queued(sd))
3213 ____napi_schedule(sd, &sd->backlog);
3214 }
3215 goto enqueue;
3216 }
3217
3218 sd->dropped++;
3219 rps_unlock(sd);
3220
3221 local_irq_restore(flags);
3222
3223 atomic_long_inc(&skb->dev->rx_dropped);
3224 kfree_skb(skb);
3225 return NET_RX_DROP;
3226 }
3227
3228 static int netif_rx_internal(struct sk_buff *skb)
3229 {
3230 int ret;
3231
3232 /* if netpoll wants it, pretend we never saw it */
3233 if (netpoll_rx(skb))
3234 return NET_RX_DROP;
3235
3236 net_timestamp_check(netdev_tstamp_prequeue, skb);
3237
3238 trace_netif_rx(skb);
3239 #ifdef CONFIG_RPS
3240 if (static_key_false(&rps_needed)) {
3241 struct rps_dev_flow voidflow, *rflow = &voidflow;
3242 int cpu;
3243
3244 preempt_disable();
3245 rcu_read_lock();
3246
3247 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3248 if (cpu < 0)
3249 cpu = smp_processor_id();
3250
3251 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3252
3253 rcu_read_unlock();
3254 preempt_enable();
3255 } else
3256 #endif
3257 {
3258 unsigned int qtail;
3259 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3260 put_cpu();
3261 }
3262 return ret;
3263 }
3264
3265 /**
3266 * netif_rx - post buffer to the network code
3267 * @skb: buffer to post
3268 *
3269 * This function receives a packet from a device driver and queues it for
3270 * the upper (protocol) levels to process. It always succeeds. The buffer
3271 * may be dropped during processing for congestion control or by the
3272 * protocol layers.
3273 *
3274 * return values:
3275 * NET_RX_SUCCESS (no congestion)
3276 * NET_RX_DROP (packet was dropped)
3277 *
3278 */
3279
3280 int netif_rx(struct sk_buff *skb)
3281 {
3282 trace_netif_rx_entry(skb);
3283
3284 return netif_rx_internal(skb);
3285 }
3286 EXPORT_SYMBOL(netif_rx);
3287
3288 int netif_rx_ni(struct sk_buff *skb)
3289 {
3290 int err;
3291
3292 trace_netif_rx_ni_entry(skb);
3293
3294 preempt_disable();
3295 err = netif_rx_internal(skb);
3296 if (local_softirq_pending())
3297 do_softirq();
3298 preempt_enable();
3299
3300 return err;
3301 }
3302 EXPORT_SYMBOL(netif_rx_ni);
3303
3304 static void net_tx_action(struct softirq_action *h)
3305 {
3306 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3307
3308 if (sd->completion_queue) {
3309 struct sk_buff *clist;
3310
3311 local_irq_disable();
3312 clist = sd->completion_queue;
3313 sd->completion_queue = NULL;
3314 local_irq_enable();
3315
3316 while (clist) {
3317 struct sk_buff *skb = clist;
3318 clist = clist->next;
3319
3320 WARN_ON(atomic_read(&skb->users));
3321 if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3322 trace_consume_skb(skb);
3323 else
3324 trace_kfree_skb(skb, net_tx_action);
3325 __kfree_skb(skb);
3326 }
3327 }
3328
3329 if (sd->output_queue) {
3330 struct Qdisc *head;
3331
3332 local_irq_disable();
3333 head = sd->output_queue;
3334 sd->output_queue = NULL;
3335 sd->output_queue_tailp = &sd->output_queue;
3336 local_irq_enable();
3337
3338 while (head) {
3339 struct Qdisc *q = head;
3340 spinlock_t *root_lock;
3341
3342 head = head->next_sched;
3343
3344 root_lock = qdisc_lock(q);
3345 if (spin_trylock(root_lock)) {
3346 smp_mb__before_clear_bit();
3347 clear_bit(__QDISC_STATE_SCHED,
3348 &q->state);
3349 qdisc_run(q);
3350 spin_unlock(root_lock);
3351 } else {
3352 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3353 &q->state)) {
3354 __netif_reschedule(q);
3355 } else {
3356 smp_mb__before_clear_bit();
3357 clear_bit(__QDISC_STATE_SCHED,
3358 &q->state);
3359 }
3360 }
3361 }
3362 }
3363 }
3364
3365 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3366 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3367 /* This hook is defined here for ATM LANE */
3368 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3369 unsigned char *addr) __read_mostly;
3370 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3371 #endif
3372
3373 #ifdef CONFIG_NET_CLS_ACT
3374 /* TODO: Maybe we should just force sch_ingress to be compiled in
3375 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3376 * a compare and 2 stores extra right now if we dont have it on
3377 * but have CONFIG_NET_CLS_ACT
3378 * NOTE: This doesn't stop any functionality; if you dont have
3379 * the ingress scheduler, you just can't add policies on ingress.
3380 *
3381 */
3382 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3383 {
3384 struct net_device *dev = skb->dev;
3385 u32 ttl = G_TC_RTTL(skb->tc_verd);
3386 int result = TC_ACT_OK;
3387 struct Qdisc *q;
3388
3389 if (unlikely(MAX_RED_LOOP < ttl++)) {
3390 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3391 skb->skb_iif, dev->ifindex);
3392 return TC_ACT_SHOT;
3393 }
3394
3395 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3396 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3397
3398 q = rxq->qdisc;
3399 if (q != &noop_qdisc) {
3400 spin_lock(qdisc_lock(q));
3401 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3402 result = qdisc_enqueue_root(skb, q);
3403 spin_unlock(qdisc_lock(q));
3404 }
3405
3406 return result;
3407 }
3408
3409 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3410 struct packet_type **pt_prev,
3411 int *ret, struct net_device *orig_dev)
3412 {
3413 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3414
3415 if (!rxq || rxq->qdisc == &noop_qdisc)
3416 goto out;
3417
3418 if (*pt_prev) {
3419 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3420 *pt_prev = NULL;
3421 }
3422
3423 switch (ing_filter(skb, rxq)) {
3424 case TC_ACT_SHOT:
3425 case TC_ACT_STOLEN:
3426 kfree_skb(skb);
3427 return NULL;
3428 }
3429
3430 out:
3431 skb->tc_verd = 0;
3432 return skb;
3433 }
3434 #endif
3435
3436 /**
3437 * netdev_rx_handler_register - register receive handler
3438 * @dev: device to register a handler for
3439 * @rx_handler: receive handler to register
3440 * @rx_handler_data: data pointer that is used by rx handler
3441 *
3442 * Register a receive hander for a device. This handler will then be
3443 * called from __netif_receive_skb. A negative errno code is returned
3444 * on a failure.
3445 *
3446 * The caller must hold the rtnl_mutex.
3447 *
3448 * For a general description of rx_handler, see enum rx_handler_result.
3449 */
3450 int netdev_rx_handler_register(struct net_device *dev,
3451 rx_handler_func_t *rx_handler,
3452 void *rx_handler_data)
3453 {
3454 ASSERT_RTNL();
3455
3456 if (dev->rx_handler)
3457 return -EBUSY;
3458
3459 /* Note: rx_handler_data must be set before rx_handler */
3460 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3461 rcu_assign_pointer(dev->rx_handler, rx_handler);
3462
3463 return 0;
3464 }
3465 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3466
3467 /**
3468 * netdev_rx_handler_unregister - unregister receive handler
3469 * @dev: device to unregister a handler from
3470 *
3471 * Unregister a receive handler from a device.
3472 *
3473 * The caller must hold the rtnl_mutex.
3474 */
3475 void netdev_rx_handler_unregister(struct net_device *dev)
3476 {
3477
3478 ASSERT_RTNL();
3479 RCU_INIT_POINTER(dev->rx_handler, NULL);
3480 /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3481 * section has a guarantee to see a non NULL rx_handler_data
3482 * as well.
3483 */
3484 synchronize_net();
3485 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3486 }
3487 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3488
3489 /*
3490 * Limit the use of PFMEMALLOC reserves to those protocols that implement
3491 * the special handling of PFMEMALLOC skbs.
3492 */
3493 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3494 {
3495 switch (skb->protocol) {
3496 case __constant_htons(ETH_P_ARP):
3497 case __constant_htons(ETH_P_IP):
3498 case __constant_htons(ETH_P_IPV6):
3499 case __constant_htons(ETH_P_8021Q):
3500 case __constant_htons(ETH_P_8021AD):
3501 return true;
3502 default:
3503 return false;
3504 }
3505 }
3506
3507 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3508 {
3509 struct packet_type *ptype, *pt_prev;
3510 rx_handler_func_t *rx_handler;
3511 struct net_device *orig_dev;
3512 struct net_device *null_or_dev;
3513 bool deliver_exact = false;
3514 int ret = NET_RX_DROP;
3515 __be16 type;
3516
3517 net_timestamp_check(!netdev_tstamp_prequeue, skb);
3518
3519 trace_netif_receive_skb(skb);
3520
3521 /* if we've gotten here through NAPI, check netpoll */
3522 if (netpoll_receive_skb(skb))
3523 goto out;
3524
3525 orig_dev = skb->dev;
3526
3527 skb_reset_network_header(skb);
3528 if (!skb_transport_header_was_set(skb))
3529 skb_reset_transport_header(skb);
3530 skb_reset_mac_len(skb);
3531
3532 pt_prev = NULL;
3533
3534 rcu_read_lock();
3535
3536 another_round:
3537 skb->skb_iif = skb->dev->ifindex;
3538
3539 __this_cpu_inc(softnet_data.processed);
3540
3541 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3542 skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3543 skb = vlan_untag(skb);
3544 if (unlikely(!skb))
3545 goto unlock;
3546 }
3547
3548 #ifdef CONFIG_NET_CLS_ACT
3549 if (skb->tc_verd & TC_NCLS) {
3550 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3551 goto ncls;
3552 }
3553 #endif
3554
3555 if (pfmemalloc)
3556 goto skip_taps;
3557
3558 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3559 if (!ptype->dev || ptype->dev == skb->dev) {
3560 if (pt_prev)
3561 ret = deliver_skb(skb, pt_prev, orig_dev);
3562 pt_prev = ptype;
3563 }
3564 }
3565
3566 skip_taps:
3567 #ifdef CONFIG_NET_CLS_ACT
3568 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3569 if (!skb)
3570 goto unlock;
3571 ncls:
3572 #endif
3573
3574 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3575 goto drop;
3576
3577 if (vlan_tx_tag_present(skb)) {
3578 if (pt_prev) {
3579 ret = deliver_skb(skb, pt_prev, orig_dev);
3580 pt_prev = NULL;
3581 }
3582 if (vlan_do_receive(&skb))
3583 goto another_round;
3584 else if (unlikely(!skb))
3585 goto unlock;
3586 }
3587
3588 rx_handler = rcu_dereference(skb->dev->rx_handler);
3589 if (rx_handler) {
3590 if (pt_prev) {
3591 ret = deliver_skb(skb, pt_prev, orig_dev);
3592 pt_prev = NULL;
3593 }
3594 switch (rx_handler(&skb)) {
3595 case RX_HANDLER_CONSUMED:
3596 ret = NET_RX_SUCCESS;
3597 goto unlock;
3598 case RX_HANDLER_ANOTHER:
3599 goto another_round;
3600 case RX_HANDLER_EXACT:
3601 deliver_exact = true;
3602 case RX_HANDLER_PASS:
3603 break;
3604 default:
3605 BUG();
3606 }
3607 }
3608
3609 if (unlikely(vlan_tx_tag_present(skb))) {
3610 if (vlan_tx_tag_get_id(skb))
3611 skb->pkt_type = PACKET_OTHERHOST;
3612 /* Note: we might in the future use prio bits
3613 * and set skb->priority like in vlan_do_receive()
3614 * For the time being, just ignore Priority Code Point
3615 */
3616 skb->vlan_tci = 0;
3617 }
3618
3619 /* deliver only exact match when indicated */
3620 null_or_dev = deliver_exact ? skb->dev : NULL;
3621
3622 type = skb->protocol;
3623 list_for_each_entry_rcu(ptype,
3624 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3625 if (ptype->type == type &&
3626 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3627 ptype->dev == orig_dev)) {
3628 if (pt_prev)
3629 ret = deliver_skb(skb, pt_prev, orig_dev);
3630 pt_prev = ptype;
3631 }
3632 }
3633
3634 if (pt_prev) {
3635 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3636 goto drop;
3637 else
3638 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3639 } else {
3640 drop:
3641 atomic_long_inc(&skb->dev->rx_dropped);
3642 kfree_skb(skb);
3643 /* Jamal, now you will not able to escape explaining
3644 * me how you were going to use this. :-)
3645 */
3646 ret = NET_RX_DROP;
3647 }
3648
3649 unlock:
3650 rcu_read_unlock();
3651 out:
3652 return ret;
3653 }
3654
3655 static int __netif_receive_skb(struct sk_buff *skb)
3656 {
3657 int ret;
3658
3659 if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3660 unsigned long pflags = current->flags;
3661
3662 /*
3663 * PFMEMALLOC skbs are special, they should
3664 * - be delivered to SOCK_MEMALLOC sockets only
3665 * - stay away from userspace
3666 * - have bounded memory usage
3667 *
3668 * Use PF_MEMALLOC as this saves us from propagating the allocation
3669 * context down to all allocation sites.
3670 */
3671 current->flags |= PF_MEMALLOC;
3672 ret = __netif_receive_skb_core(skb, true);
3673 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3674 } else
3675 ret = __netif_receive_skb_core(skb, false);
3676
3677 return ret;
3678 }
3679
3680 static int netif_receive_skb_internal(struct sk_buff *skb)
3681 {
3682 net_timestamp_check(netdev_tstamp_prequeue, skb);
3683
3684 if (skb_defer_rx_timestamp(skb))
3685 return NET_RX_SUCCESS;
3686
3687 #ifdef CONFIG_RPS
3688 if (static_key_false(&rps_needed)) {
3689 struct rps_dev_flow voidflow, *rflow = &voidflow;
3690 int cpu, ret;
3691
3692 rcu_read_lock();
3693
3694 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3695
3696 if (cpu >= 0) {
3697 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3698 rcu_read_unlock();
3699 return ret;
3700 }
3701 rcu_read_unlock();
3702 }
3703 #endif
3704 return __netif_receive_skb(skb);
3705 }
3706
3707 /**
3708 * netif_receive_skb - process receive buffer from network
3709 * @skb: buffer to process
3710 *
3711 * netif_receive_skb() is the main receive data processing function.
3712 * It always succeeds. The buffer may be dropped during processing
3713 * for congestion control or by the protocol layers.
3714 *
3715 * This function may only be called from softirq context and interrupts
3716 * should be enabled.
3717 *
3718 * Return values (usually ignored):
3719 * NET_RX_SUCCESS: no congestion
3720 * NET_RX_DROP: packet was dropped
3721 */
3722 int netif_receive_skb(struct sk_buff *skb)
3723 {
3724 trace_netif_receive_skb_entry(skb);
3725
3726 return netif_receive_skb_internal(skb);
3727 }
3728 EXPORT_SYMBOL(netif_receive_skb);
3729
3730 /* Network device is going away, flush any packets still pending
3731 * Called with irqs disabled.
3732 */
3733 static void flush_backlog(void *arg)
3734 {
3735 struct net_device *dev = arg;
3736 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3737 struct sk_buff *skb, *tmp;
3738
3739 rps_lock(sd);
3740 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3741 if (skb->dev == dev) {
3742 __skb_unlink(skb, &sd->input_pkt_queue);
3743 kfree_skb(skb);
3744 input_queue_head_incr(sd);
3745 }
3746 }
3747 rps_unlock(sd);
3748
3749 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3750 if (skb->dev == dev) {
3751 __skb_unlink(skb, &sd->process_queue);
3752 kfree_skb(skb);
3753 input_queue_head_incr(sd);
3754 }
3755 }
3756 }
3757
3758 static int napi_gro_complete(struct sk_buff *skb)
3759 {
3760 struct packet_offload *ptype;
3761 __be16 type = skb->protocol;
3762 struct list_head *head = &offload_base;
3763 int err = -ENOENT;
3764
3765 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3766
3767 if (NAPI_GRO_CB(skb)->count == 1) {
3768 skb_shinfo(skb)->gso_size = 0;
3769 goto out;
3770 }
3771
3772 rcu_read_lock();
3773 list_for_each_entry_rcu(ptype, head, list) {
3774 if (ptype->type != type || !ptype->callbacks.gro_complete)
3775 continue;
3776
3777 err = ptype->callbacks.gro_complete(skb, 0);
3778 break;
3779 }
3780 rcu_read_unlock();
3781
3782 if (err) {
3783 WARN_ON(&ptype->list == head);
3784 kfree_skb(skb);
3785 return NET_RX_SUCCESS;
3786 }
3787
3788 out:
3789 return netif_receive_skb_internal(skb);
3790 }
3791
3792 /* napi->gro_list contains packets ordered by age.
3793 * youngest packets at the head of it.
3794 * Complete skbs in reverse order to reduce latencies.
3795 */
3796 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
3797 {
3798 struct sk_buff *skb, *prev = NULL;
3799
3800 /* scan list and build reverse chain */
3801 for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3802 skb->prev = prev;
3803 prev = skb;
3804 }
3805
3806 for (skb = prev; skb; skb = prev) {
3807 skb->next = NULL;
3808
3809 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3810 return;
3811
3812 prev = skb->prev;
3813 napi_gro_complete(skb);
3814 napi->gro_count--;
3815 }
3816
3817 napi->gro_list = NULL;
3818 }
3819 EXPORT_SYMBOL(napi_gro_flush);
3820
3821 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3822 {
3823 struct sk_buff *p;
3824 unsigned int maclen = skb->dev->hard_header_len;
3825 u32 hash = skb_get_hash_raw(skb);
3826
3827 for (p = napi->gro_list; p; p = p->next) {
3828 unsigned long diffs;
3829
3830 NAPI_GRO_CB(p)->flush = 0;
3831
3832 if (hash != skb_get_hash_raw(p)) {
3833 NAPI_GRO_CB(p)->same_flow = 0;
3834 continue;
3835 }
3836
3837 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3838 diffs |= p->vlan_tci ^ skb->vlan_tci;
3839 if (maclen == ETH_HLEN)
3840 diffs |= compare_ether_header(skb_mac_header(p),
3841 skb_gro_mac_header(skb));
3842 else if (!diffs)
3843 diffs = memcmp(skb_mac_header(p),
3844 skb_gro_mac_header(skb),
3845 maclen);
3846 NAPI_GRO_CB(p)->same_flow = !diffs;
3847 }
3848 }
3849
3850 static void skb_gro_reset_offset(struct sk_buff *skb)
3851 {
3852 const struct skb_shared_info *pinfo = skb_shinfo(skb);
3853 const skb_frag_t *frag0 = &pinfo->frags[0];
3854
3855 NAPI_GRO_CB(skb)->data_offset = 0;
3856 NAPI_GRO_CB(skb)->frag0 = NULL;
3857 NAPI_GRO_CB(skb)->frag0_len = 0;
3858
3859 if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
3860 pinfo->nr_frags &&
3861 !PageHighMem(skb_frag_page(frag0))) {
3862 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
3863 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
3864 }
3865 }
3866
3867 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3868 {
3869 struct sk_buff **pp = NULL;
3870 struct packet_offload *ptype;
3871 __be16 type = skb->protocol;
3872 struct list_head *head = &offload_base;
3873 int same_flow;
3874 enum gro_result ret;
3875
3876 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3877 goto normal;
3878
3879 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3880 goto normal;
3881
3882 skb_gro_reset_offset(skb);
3883 gro_list_prepare(napi, skb);
3884 NAPI_GRO_CB(skb)->csum = skb->csum; /* Needed for CHECKSUM_COMPLETE */
3885
3886 rcu_read_lock();
3887 list_for_each_entry_rcu(ptype, head, list) {
3888 if (ptype->type != type || !ptype->callbacks.gro_receive)
3889 continue;
3890
3891 skb_set_network_header(skb, skb_gro_offset(skb));
3892 skb_reset_mac_len(skb);
3893 NAPI_GRO_CB(skb)->same_flow = 0;
3894 NAPI_GRO_CB(skb)->flush = 0;
3895 NAPI_GRO_CB(skb)->free = 0;
3896 NAPI_GRO_CB(skb)->udp_mark = 0;
3897
3898 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
3899 break;
3900 }
3901 rcu_read_unlock();
3902
3903 if (&ptype->list == head)
3904 goto normal;
3905
3906 same_flow = NAPI_GRO_CB(skb)->same_flow;
3907 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3908
3909 if (pp) {
3910 struct sk_buff *nskb = *pp;
3911
3912 *pp = nskb->next;
3913 nskb->next = NULL;
3914 napi_gro_complete(nskb);
3915 napi->gro_count--;
3916 }
3917
3918 if (same_flow)
3919 goto ok;
3920
3921 if (NAPI_GRO_CB(skb)->flush)
3922 goto normal;
3923
3924 if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
3925 struct sk_buff *nskb = napi->gro_list;
3926
3927 /* locate the end of the list to select the 'oldest' flow */
3928 while (nskb->next) {
3929 pp = &nskb->next;
3930 nskb = *pp;
3931 }
3932 *pp = NULL;
3933 nskb->next = NULL;
3934 napi_gro_complete(nskb);
3935 } else {
3936 napi->gro_count++;
3937 }
3938 NAPI_GRO_CB(skb)->count = 1;
3939 NAPI_GRO_CB(skb)->age = jiffies;
3940 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3941 skb->next = napi->gro_list;
3942 napi->gro_list = skb;
3943 ret = GRO_HELD;
3944
3945 pull:
3946 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3947 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3948
3949 BUG_ON(skb->end - skb->tail < grow);
3950
3951 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3952
3953 skb->tail += grow;
3954 skb->data_len -= grow;
3955
3956 skb_shinfo(skb)->frags[0].page_offset += grow;
3957 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3958
3959 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3960 skb_frag_unref(skb, 0);
3961 memmove(skb_shinfo(skb)->frags,
3962 skb_shinfo(skb)->frags + 1,
3963 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3964 }
3965 }
3966
3967 ok:
3968 return ret;
3969
3970 normal:
3971 ret = GRO_NORMAL;
3972 goto pull;
3973 }
3974
3975 struct packet_offload *gro_find_receive_by_type(__be16 type)
3976 {
3977 struct list_head *offload_head = &offload_base;
3978 struct packet_offload *ptype;
3979
3980 list_for_each_entry_rcu(ptype, offload_head, list) {
3981 if (ptype->type != type || !ptype->callbacks.gro_receive)
3982 continue;
3983 return ptype;
3984 }
3985 return NULL;
3986 }
3987 EXPORT_SYMBOL(gro_find_receive_by_type);
3988
3989 struct packet_offload *gro_find_complete_by_type(__be16 type)
3990 {
3991 struct list_head *offload_head = &offload_base;
3992 struct packet_offload *ptype;
3993
3994 list_for_each_entry_rcu(ptype, offload_head, list) {
3995 if (ptype->type != type || !ptype->callbacks.gro_complete)
3996 continue;
3997 return ptype;
3998 }
3999 return NULL;
4000 }
4001 EXPORT_SYMBOL(gro_find_complete_by_type);
4002
4003 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4004 {
4005 switch (ret) {
4006 case GRO_NORMAL:
4007 if (netif_receive_skb_internal(skb))
4008 ret = GRO_DROP;
4009 break;
4010
4011 case GRO_DROP:
4012 kfree_skb(skb);
4013 break;
4014
4015 case GRO_MERGED_FREE:
4016 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4017 kmem_cache_free(skbuff_head_cache, skb);
4018 else
4019 __kfree_skb(skb);
4020 break;
4021
4022 case GRO_HELD:
4023 case GRO_MERGED:
4024 break;
4025 }
4026
4027 return ret;
4028 }
4029
4030 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4031 {
4032 trace_napi_gro_receive_entry(skb);
4033
4034 return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4035 }
4036 EXPORT_SYMBOL(napi_gro_receive);
4037
4038 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4039 {
4040 __skb_pull(skb, skb_headlen(skb));
4041 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4042 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4043 skb->vlan_tci = 0;
4044 skb->dev = napi->dev;
4045 skb->skb_iif = 0;
4046
4047 napi->skb = skb;
4048 }
4049
4050 struct sk_buff *napi_get_frags(struct napi_struct *napi)
4051 {
4052 struct sk_buff *skb = napi->skb;
4053
4054 if (!skb) {
4055 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
4056 napi->skb = skb;
4057 }
4058 return skb;
4059 }
4060 EXPORT_SYMBOL(napi_get_frags);
4061
4062 static gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
4063 gro_result_t ret)
4064 {
4065 switch (ret) {
4066 case GRO_NORMAL:
4067 if (netif_receive_skb_internal(skb))
4068 ret = GRO_DROP;
4069 break;
4070
4071 case GRO_DROP:
4072 case GRO_MERGED_FREE:
4073 napi_reuse_skb(napi, skb);
4074 break;
4075
4076 case GRO_HELD:
4077 case GRO_MERGED:
4078 break;
4079 }
4080
4081 return ret;
4082 }
4083
4084 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4085 {
4086 struct sk_buff *skb = napi->skb;
4087
4088 napi->skb = NULL;
4089
4090 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr)))) {
4091 napi_reuse_skb(napi, skb);
4092 return NULL;
4093 }
4094 skb->protocol = eth_type_trans(skb, skb->dev);
4095
4096 return skb;
4097 }
4098
4099 gro_result_t napi_gro_frags(struct napi_struct *napi)
4100 {
4101 struct sk_buff *skb = napi_frags_skb(napi);
4102
4103 if (!skb)
4104 return GRO_DROP;
4105
4106 trace_napi_gro_frags_entry(skb);
4107
4108 return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4109 }
4110 EXPORT_SYMBOL(napi_gro_frags);
4111
4112 /*
4113 * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4114 * Note: called with local irq disabled, but exits with local irq enabled.
4115 */
4116 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4117 {
4118 #ifdef CONFIG_RPS
4119 struct softnet_data *remsd = sd->rps_ipi_list;
4120
4121 if (remsd) {
4122 sd->rps_ipi_list = NULL;
4123
4124 local_irq_enable();
4125
4126 /* Send pending IPI's to kick RPS processing on remote cpus. */
4127 while (remsd) {
4128 struct softnet_data *next = remsd->rps_ipi_next;
4129
4130 if (cpu_online(remsd->cpu))
4131 __smp_call_function_single(remsd->cpu,
4132 &remsd->csd, 0);
4133 remsd = next;
4134 }
4135 } else
4136 #endif
4137 local_irq_enable();
4138 }
4139
4140 static int process_backlog(struct napi_struct *napi, int quota)
4141 {
4142 int work = 0;
4143 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4144
4145 #ifdef CONFIG_RPS
4146 /* Check if we have pending ipi, its better to send them now,
4147 * not waiting net_rx_action() end.
4148 */
4149 if (sd->rps_ipi_list) {
4150 local_irq_disable();
4151 net_rps_action_and_irq_enable(sd);
4152 }
4153 #endif
4154 napi->weight = weight_p;
4155 local_irq_disable();
4156 while (work < quota) {
4157 struct sk_buff *skb;
4158 unsigned int qlen;
4159
4160 while ((skb = __skb_dequeue(&sd->process_queue))) {
4161 local_irq_enable();
4162 __netif_receive_skb(skb);
4163 local_irq_disable();
4164 input_queue_head_incr(sd);
4165 if (++work >= quota) {
4166 local_irq_enable();
4167 return work;
4168 }
4169 }
4170
4171 rps_lock(sd);
4172 qlen = skb_queue_len(&sd->input_pkt_queue);
4173 if (qlen)
4174 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4175 &sd->process_queue);
4176
4177 if (qlen < quota - work) {
4178 /*
4179 * Inline a custom version of __napi_complete().
4180 * only current cpu owns and manipulates this napi,
4181 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
4182 * we can use a plain write instead of clear_bit(),
4183 * and we dont need an smp_mb() memory barrier.
4184 */
4185 list_del(&napi->poll_list);
4186 napi->state = 0;
4187
4188 quota = work + qlen;
4189 }
4190 rps_unlock(sd);
4191 }
4192 local_irq_enable();
4193
4194 return work;
4195 }
4196
4197 /**
4198 * __napi_schedule - schedule for receive
4199 * @n: entry to schedule
4200 *
4201 * The entry's receive function will be scheduled to run
4202 */
4203 void __napi_schedule(struct napi_struct *n)
4204 {
4205 unsigned long flags;
4206
4207 local_irq_save(flags);
4208 ____napi_schedule(&__get_cpu_var(softnet_data), n);
4209 local_irq_restore(flags);
4210 }
4211 EXPORT_SYMBOL(__napi_schedule);
4212
4213 void __napi_complete(struct napi_struct *n)
4214 {
4215 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4216 BUG_ON(n->gro_list);
4217
4218 list_del(&n->poll_list);
4219 smp_mb__before_clear_bit();
4220 clear_bit(NAPI_STATE_SCHED, &n->state);
4221 }
4222 EXPORT_SYMBOL(__napi_complete);
4223
4224 void napi_complete(struct napi_struct *n)
4225 {
4226 unsigned long flags;
4227
4228 /*
4229 * don't let napi dequeue from the cpu poll list
4230 * just in case its running on a different cpu
4231 */
4232 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4233 return;
4234
4235 napi_gro_flush(n, false);
4236 local_irq_save(flags);
4237 __napi_complete(n);
4238 local_irq_restore(flags);
4239 }
4240 EXPORT_SYMBOL(napi_complete);
4241
4242 /* must be called under rcu_read_lock(), as we dont take a reference */
4243 struct napi_struct *napi_by_id(unsigned int napi_id)
4244 {
4245 unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4246 struct napi_struct *napi;
4247
4248 hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4249 if (napi->napi_id == napi_id)
4250 return napi;
4251
4252 return NULL;
4253 }
4254 EXPORT_SYMBOL_GPL(napi_by_id);
4255
4256 void napi_hash_add(struct napi_struct *napi)
4257 {
4258 if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4259
4260 spin_lock(&napi_hash_lock);
4261
4262 /* 0 is not a valid id, we also skip an id that is taken
4263 * we expect both events to be extremely rare
4264 */
4265 napi->napi_id = 0;
4266 while (!napi->napi_id) {
4267 napi->napi_id = ++napi_gen_id;
4268 if (napi_by_id(napi->napi_id))
4269 napi->napi_id = 0;
4270 }
4271
4272 hlist_add_head_rcu(&napi->napi_hash_node,
4273 &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4274
4275 spin_unlock(&napi_hash_lock);
4276 }
4277 }
4278 EXPORT_SYMBOL_GPL(napi_hash_add);
4279
4280 /* Warning : caller is responsible to make sure rcu grace period
4281 * is respected before freeing memory containing @napi
4282 */
4283 void napi_hash_del(struct napi_struct *napi)
4284 {
4285 spin_lock(&napi_hash_lock);
4286
4287 if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4288 hlist_del_rcu(&napi->napi_hash_node);
4289
4290 spin_unlock(&napi_hash_lock);
4291 }
4292 EXPORT_SYMBOL_GPL(napi_hash_del);
4293
4294 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4295 int (*poll)(struct napi_struct *, int), int weight)
4296 {
4297 INIT_LIST_HEAD(&napi->poll_list);
4298 napi->gro_count = 0;
4299 napi->gro_list = NULL;
4300 napi->skb = NULL;
4301 napi->poll = poll;
4302 if (weight > NAPI_POLL_WEIGHT)
4303 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4304 weight, dev->name);
4305 napi->weight = weight;
4306 list_add(&napi->dev_list, &dev->napi_list);
4307 napi->dev = dev;
4308 #ifdef CONFIG_NETPOLL
4309 spin_lock_init(&napi->poll_lock);
4310 napi->poll_owner = -1;
4311 #endif
4312 set_bit(NAPI_STATE_SCHED, &napi->state);
4313 }
4314 EXPORT_SYMBOL(netif_napi_add);
4315
4316 void netif_napi_del(struct napi_struct *napi)
4317 {
4318 list_del_init(&napi->dev_list);
4319 napi_free_frags(napi);
4320
4321 kfree_skb_list(napi->gro_list);
4322 napi->gro_list = NULL;
4323 napi->gro_count = 0;
4324 }
4325 EXPORT_SYMBOL(netif_napi_del);
4326
4327 static void net_rx_action(struct softirq_action *h)
4328 {
4329 struct softnet_data *sd = &__get_cpu_var(softnet_data);
4330 unsigned long time_limit = jiffies + 2;
4331 int budget = netdev_budget;
4332 void *have;
4333
4334 local_irq_disable();
4335
4336 while (!list_empty(&sd->poll_list)) {
4337 struct napi_struct *n;
4338 int work, weight;
4339
4340 /* If softirq window is exhuasted then punt.
4341 * Allow this to run for 2 jiffies since which will allow
4342 * an average latency of 1.5/HZ.
4343 */
4344 if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
4345 goto softnet_break;
4346
4347 local_irq_enable();
4348
4349 /* Even though interrupts have been re-enabled, this
4350 * access is safe because interrupts can only add new
4351 * entries to the tail of this list, and only ->poll()
4352 * calls can remove this head entry from the list.
4353 */
4354 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
4355
4356 have = netpoll_poll_lock(n);
4357
4358 weight = n->weight;
4359
4360 /* This NAPI_STATE_SCHED test is for avoiding a race
4361 * with netpoll's poll_napi(). Only the entity which
4362 * obtains the lock and sees NAPI_STATE_SCHED set will
4363 * actually make the ->poll() call. Therefore we avoid
4364 * accidentally calling ->poll() when NAPI is not scheduled.
4365 */
4366 work = 0;
4367 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4368 work = n->poll(n, weight);
4369 trace_napi_poll(n);
4370 }
4371
4372 WARN_ON_ONCE(work > weight);
4373
4374 budget -= work;
4375
4376 local_irq_disable();
4377
4378 /* Drivers must not modify the NAPI state if they
4379 * consume the entire weight. In such cases this code
4380 * still "owns" the NAPI instance and therefore can
4381 * move the instance around on the list at-will.
4382 */
4383 if (unlikely(work == weight)) {
4384 if (unlikely(napi_disable_pending(n))) {
4385 local_irq_enable();
4386 napi_complete(n);
4387 local_irq_disable();
4388 } else {
4389 if (n->gro_list) {
4390 /* flush too old packets
4391 * If HZ < 1000, flush all packets.
4392 */
4393 local_irq_enable();
4394 napi_gro_flush(n, HZ >= 1000);
4395 local_irq_disable();
4396 }
4397 list_move_tail(&n->poll_list, &sd->poll_list);
4398 }
4399 }
4400
4401 netpoll_poll_unlock(have);
4402 }
4403 out:
4404 net_rps_action_and_irq_enable(sd);
4405
4406 #ifdef CONFIG_NET_DMA
4407 /*
4408 * There may not be any more sk_buffs coming right now, so push
4409 * any pending DMA copies to hardware
4410 */
4411 dma_issue_pending_all();
4412 #endif
4413
4414 return;
4415
4416 softnet_break:
4417 sd->time_squeeze++;
4418 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4419 goto out;
4420 }
4421
4422 struct netdev_adjacent {
4423 struct net_device *dev;
4424
4425 /* upper master flag, there can only be one master device per list */
4426 bool master;
4427
4428 /* counter for the number of times this device was added to us */
4429 u16 ref_nr;
4430
4431 /* private field for the users */
4432 void *private;
4433
4434 struct list_head list;
4435 struct rcu_head rcu;
4436 };
4437
4438 static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4439 struct net_device *adj_dev,
4440 struct list_head *adj_list)
4441 {
4442 struct netdev_adjacent *adj;
4443
4444 list_for_each_entry(adj, adj_list, list) {
4445 if (adj->dev == adj_dev)
4446 return adj;
4447 }
4448 return NULL;
4449 }
4450
4451 /**
4452 * netdev_has_upper_dev - Check if device is linked to an upper device
4453 * @dev: device
4454 * @upper_dev: upper device to check
4455 *
4456 * Find out if a device is linked to specified upper device and return true
4457 * in case it is. Note that this checks only immediate upper device,
4458 * not through a complete stack of devices. The caller must hold the RTNL lock.
4459 */
4460 bool netdev_has_upper_dev(struct net_device *dev,
4461 struct net_device *upper_dev)
4462 {
4463 ASSERT_RTNL();
4464
4465 return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
4466 }
4467 EXPORT_SYMBOL(netdev_has_upper_dev);
4468
4469 /**
4470 * netdev_has_any_upper_dev - Check if device is linked to some device
4471 * @dev: device
4472 *
4473 * Find out if a device is linked to an upper device and return true in case
4474 * it is. The caller must hold the RTNL lock.
4475 */
4476 static bool netdev_has_any_upper_dev(struct net_device *dev)
4477 {
4478 ASSERT_RTNL();
4479
4480 return !list_empty(&dev->all_adj_list.upper);
4481 }
4482
4483 /**
4484 * netdev_master_upper_dev_get - Get master upper device
4485 * @dev: device
4486 *
4487 * Find a master upper device and return pointer to it or NULL in case
4488 * it's not there. The caller must hold the RTNL lock.
4489 */
4490 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4491 {
4492 struct netdev_adjacent *upper;
4493
4494 ASSERT_RTNL();
4495
4496 if (list_empty(&dev->adj_list.upper))
4497 return NULL;
4498
4499 upper = list_first_entry(&dev->adj_list.upper,
4500 struct netdev_adjacent, list);
4501 if (likely(upper->master))
4502 return upper->dev;
4503 return NULL;
4504 }
4505 EXPORT_SYMBOL(netdev_master_upper_dev_get);
4506
4507 void *netdev_adjacent_get_private(struct list_head *adj_list)
4508 {
4509 struct netdev_adjacent *adj;
4510
4511 adj = list_entry(adj_list, struct netdev_adjacent, list);
4512
4513 return adj->private;
4514 }
4515 EXPORT_SYMBOL(netdev_adjacent_get_private);
4516
4517 /**
4518 * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4519 * @dev: device
4520 * @iter: list_head ** of the current position
4521 *
4522 * Gets the next device from the dev's upper list, starting from iter
4523 * position. The caller must hold RCU read lock.
4524 */
4525 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4526 struct list_head **iter)
4527 {
4528 struct netdev_adjacent *upper;
4529
4530 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4531
4532 upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4533
4534 if (&upper->list == &dev->all_adj_list.upper)
4535 return NULL;
4536
4537 *iter = &upper->list;
4538
4539 return upper->dev;
4540 }
4541 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
4542
4543 /**
4544 * netdev_lower_get_next_private - Get the next ->private from the
4545 * lower neighbour list
4546 * @dev: device
4547 * @iter: list_head ** of the current position
4548 *
4549 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4550 * list, starting from iter position. The caller must hold either hold the
4551 * RTNL lock or its own locking that guarantees that the neighbour lower
4552 * list will remain unchainged.
4553 */
4554 void *netdev_lower_get_next_private(struct net_device *dev,
4555 struct list_head **iter)
4556 {
4557 struct netdev_adjacent *lower;
4558
4559 lower = list_entry(*iter, struct netdev_adjacent, list);
4560
4561 if (&lower->list == &dev->adj_list.lower)
4562 return NULL;
4563
4564 if (iter)
4565 *iter = lower->list.next;
4566
4567 return lower->private;
4568 }
4569 EXPORT_SYMBOL(netdev_lower_get_next_private);
4570
4571 /**
4572 * netdev_lower_get_next_private_rcu - Get the next ->private from the
4573 * lower neighbour list, RCU
4574 * variant
4575 * @dev: device
4576 * @iter: list_head ** of the current position
4577 *
4578 * Gets the next netdev_adjacent->private from the dev's lower neighbour
4579 * list, starting from iter position. The caller must hold RCU read lock.
4580 */
4581 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4582 struct list_head **iter)
4583 {
4584 struct netdev_adjacent *lower;
4585
4586 WARN_ON_ONCE(!rcu_read_lock_held());
4587
4588 lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4589
4590 if (&lower->list == &dev->adj_list.lower)
4591 return NULL;
4592
4593 if (iter)
4594 *iter = &lower->list;
4595
4596 return lower->private;
4597 }
4598 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4599
4600 /**
4601 * netdev_lower_get_first_private_rcu - Get the first ->private from the
4602 * lower neighbour list, RCU
4603 * variant
4604 * @dev: device
4605 *
4606 * Gets the first netdev_adjacent->private from the dev's lower neighbour
4607 * list. The caller must hold RCU read lock.
4608 */
4609 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4610 {
4611 struct netdev_adjacent *lower;
4612
4613 lower = list_first_or_null_rcu(&dev->adj_list.lower,
4614 struct netdev_adjacent, list);
4615 if (lower)
4616 return lower->private;
4617 return NULL;
4618 }
4619 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4620
4621 /**
4622 * netdev_master_upper_dev_get_rcu - Get master upper device
4623 * @dev: device
4624 *
4625 * Find a master upper device and return pointer to it or NULL in case
4626 * it's not there. The caller must hold the RCU read lock.
4627 */
4628 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4629 {
4630 struct netdev_adjacent *upper;
4631
4632 upper = list_first_or_null_rcu(&dev->adj_list.upper,
4633 struct netdev_adjacent, list);
4634 if (upper && likely(upper->master))
4635 return upper->dev;
4636 return NULL;
4637 }
4638 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4639
4640 int netdev_adjacent_sysfs_add(struct net_device *dev,
4641 struct net_device *adj_dev,
4642 struct list_head *dev_list)
4643 {
4644 char linkname[IFNAMSIZ+7];
4645 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4646 "upper_%s" : "lower_%s", adj_dev->name);
4647 return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4648 linkname);
4649 }
4650 void netdev_adjacent_sysfs_del(struct net_device *dev,
4651 char *name,
4652 struct list_head *dev_list)
4653 {
4654 char linkname[IFNAMSIZ+7];
4655 sprintf(linkname, dev_list == &dev->adj_list.upper ?
4656 "upper_%s" : "lower_%s", name);
4657 sysfs_remove_link(&(dev->dev.kobj), linkname);
4658 }
4659
4660 #define netdev_adjacent_is_neigh_list(dev, dev_list) \
4661 (dev_list == &dev->adj_list.upper || \
4662 dev_list == &dev->adj_list.lower)
4663
4664 static int __netdev_adjacent_dev_insert(struct net_device *dev,
4665 struct net_device *adj_dev,
4666 struct list_head *dev_list,
4667 void *private, bool master)
4668 {
4669 struct netdev_adjacent *adj;
4670 int ret;
4671
4672 adj = __netdev_find_adj(dev, adj_dev, dev_list);
4673
4674 if (adj) {
4675 adj->ref_nr++;
4676 return 0;
4677 }
4678
4679 adj = kmalloc(sizeof(*adj), GFP_KERNEL);
4680 if (!adj)
4681 return -ENOMEM;
4682
4683 adj->dev = adj_dev;
4684 adj->master = master;
4685 adj->ref_nr = 1;
4686 adj->private = private;
4687 dev_hold(adj_dev);
4688
4689 pr_debug("dev_hold for %s, because of link added from %s to %s\n",
4690 adj_dev->name, dev->name, adj_dev->name);
4691
4692 if (netdev_adjacent_is_neigh_list(dev, dev_list)) {
4693 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
4694 if (ret)
4695 goto free_adj;
4696 }
4697
4698 /* Ensure that master link is always the first item in list. */
4699 if (master) {
4700 ret = sysfs_create_link(&(dev->dev.kobj),
4701 &(adj_dev->dev.kobj), "master");
4702 if (ret)
4703 goto remove_symlinks;
4704
4705 list_add_rcu(&adj->list, dev_list);
4706 } else {
4707 list_add_tail_rcu(&adj->list, dev_list);
4708 }
4709
4710 return 0;
4711
4712 remove_symlinks:
4713 if (netdev_adjacent_is_neigh_list(dev, dev_list))
4714 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
4715 free_adj:
4716 kfree(adj);
4717 dev_put(adj_dev);
4718
4719 return ret;
4720 }
4721
4722 static void __netdev_adjacent_dev_remove(struct net_device *dev,
4723 struct net_device *adj_dev,
4724 struct list_head *dev_list)
4725 {
4726 struct netdev_adjacent *adj;
4727
4728 adj = __netdev_find_adj(dev, adj_dev, dev_list);
4729
4730 if (!adj) {
4731 pr_err("tried to remove device %s from %s\n",
4732 dev->name, adj_dev->name);
4733 BUG();
4734 }
4735
4736 if (adj->ref_nr > 1) {
4737 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
4738 adj->ref_nr-1);
4739 adj->ref_nr--;
4740 return;
4741 }
4742
4743 if (adj->master)
4744 sysfs_remove_link(&(dev->dev.kobj), "master");
4745
4746 if (netdev_adjacent_is_neigh_list(dev, dev_list))
4747 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
4748
4749 list_del_rcu(&adj->list);
4750 pr_debug("dev_put for %s, because link removed from %s to %s\n",
4751 adj_dev->name, dev->name, adj_dev->name);
4752 dev_put(adj_dev);
4753 kfree_rcu(adj, rcu);
4754 }
4755
4756 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
4757 struct net_device *upper_dev,
4758 struct list_head *up_list,
4759 struct list_head *down_list,
4760 void *private, bool master)
4761 {
4762 int ret;
4763
4764 ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
4765 master);
4766 if (ret)
4767 return ret;
4768
4769 ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
4770 false);
4771 if (ret) {
4772 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
4773 return ret;
4774 }
4775
4776 return 0;
4777 }
4778
4779 static int __netdev_adjacent_dev_link(struct net_device *dev,
4780 struct net_device *upper_dev)
4781 {
4782 return __netdev_adjacent_dev_link_lists(dev, upper_dev,
4783 &dev->all_adj_list.upper,
4784 &upper_dev->all_adj_list.lower,
4785 NULL, false);
4786 }
4787
4788 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
4789 struct net_device *upper_dev,
4790 struct list_head *up_list,
4791 struct list_head *down_list)
4792 {
4793 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
4794 __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
4795 }
4796
4797 static void __netdev_adjacent_dev_unlink(struct net_device *dev,
4798 struct net_device *upper_dev)
4799 {
4800 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4801 &dev->all_adj_list.upper,
4802 &upper_dev->all_adj_list.lower);
4803 }
4804
4805 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
4806 struct net_device *upper_dev,
4807 void *private, bool master)
4808 {
4809 int ret = __netdev_adjacent_dev_link(dev, upper_dev);
4810
4811 if (ret)
4812 return ret;
4813
4814 ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
4815 &dev->adj_list.upper,
4816 &upper_dev->adj_list.lower,
4817 private, master);
4818 if (ret) {
4819 __netdev_adjacent_dev_unlink(dev, upper_dev);
4820 return ret;
4821 }
4822
4823 return 0;
4824 }
4825
4826 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
4827 struct net_device *upper_dev)
4828 {
4829 __netdev_adjacent_dev_unlink(dev, upper_dev);
4830 __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
4831 &dev->adj_list.upper,
4832 &upper_dev->adj_list.lower);
4833 }
4834
4835 static int __netdev_upper_dev_link(struct net_device *dev,
4836 struct net_device *upper_dev, bool master,
4837 void *private)
4838 {
4839 struct netdev_adjacent *i, *j, *to_i, *to_j;
4840 int ret = 0;
4841
4842 ASSERT_RTNL();
4843
4844 if (dev == upper_dev)
4845 return -EBUSY;
4846
4847 /* To prevent loops, check if dev is not upper device to upper_dev. */
4848 if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
4849 return -EBUSY;
4850
4851 if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
4852 return -EEXIST;
4853
4854 if (master && netdev_master_upper_dev_get(dev))
4855 return -EBUSY;
4856
4857 ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
4858 master);
4859 if (ret)
4860 return ret;
4861
4862 /* Now that we linked these devs, make all the upper_dev's
4863 * all_adj_list.upper visible to every dev's all_adj_list.lower an
4864 * versa, and don't forget the devices itself. All of these
4865 * links are non-neighbours.
4866 */
4867 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4868 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
4869 pr_debug("Interlinking %s with %s, non-neighbour\n",
4870 i->dev->name, j->dev->name);
4871 ret = __netdev_adjacent_dev_link(i->dev, j->dev);
4872 if (ret)
4873 goto rollback_mesh;
4874 }
4875 }
4876
4877 /* add dev to every upper_dev's upper device */
4878 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
4879 pr_debug("linking %s's upper device %s with %s\n",
4880 upper_dev->name, i->dev->name, dev->name);
4881 ret = __netdev_adjacent_dev_link(dev, i->dev);
4882 if (ret)
4883 goto rollback_upper_mesh;
4884 }
4885
4886 /* add upper_dev to every dev's lower device */
4887 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4888 pr_debug("linking %s's lower device %s with %s\n", dev->name,
4889 i->dev->name, upper_dev->name);
4890 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
4891 if (ret)
4892 goto rollback_lower_mesh;
4893 }
4894
4895 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
4896 return 0;
4897
4898 rollback_lower_mesh:
4899 to_i = i;
4900 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4901 if (i == to_i)
4902 break;
4903 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
4904 }
4905
4906 i = NULL;
4907
4908 rollback_upper_mesh:
4909 to_i = i;
4910 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
4911 if (i == to_i)
4912 break;
4913 __netdev_adjacent_dev_unlink(dev, i->dev);
4914 }
4915
4916 i = j = NULL;
4917
4918 rollback_mesh:
4919 to_i = i;
4920 to_j = j;
4921 list_for_each_entry(i, &dev->all_adj_list.lower, list) {
4922 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
4923 if (i == to_i && j == to_j)
4924 break;
4925 __netdev_adjacent_dev_unlink(i->dev, j->dev);
4926 }
4927 if (i == to_i)
4928 break;
4929 }
4930
4931 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
4932
4933 return ret;
4934 }
4935
4936 /**
4937 * netdev_upper_dev_link - Add a link to the upper device
4938 * @dev: device
4939 * @upper_dev: new upper device
4940 *
4941 * Adds a link to device which is upper to this one. The caller must hold
4942 * the RTNL lock. On a failure a negative errno code is returned.
4943 * On success the reference counts are adjusted and the function
4944 * returns zero.
4945 */
4946 int netdev_upper_dev_link(struct net_device *dev,
4947 struct net_device *upper_dev)
4948 {
4949 return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
4950 }
4951 EXPORT_SYMBOL(netdev_upper_dev_link);
4952
4953 /**
4954 * netdev_master_upper_dev_link - Add a master link to the upper device
4955 * @dev: device
4956 * @upper_dev: new upper device
4957 *
4958 * Adds a link to device which is upper to this one. In this case, only
4959 * one master upper device can be linked, although other non-master devices
4960 * might be linked as well. The caller must hold the RTNL lock.
4961 * On a failure a negative errno code is returned. On success the reference
4962 * counts are adjusted and the function returns zero.
4963 */
4964 int netdev_master_upper_dev_link(struct net_device *dev,
4965 struct net_device *upper_dev)
4966 {
4967 return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
4968 }
4969 EXPORT_SYMBOL(netdev_master_upper_dev_link);
4970
4971 int netdev_master_upper_dev_link_private(struct net_device *dev,
4972 struct net_device *upper_dev,
4973 void *private)
4974 {
4975 return __netdev_upper_dev_link(dev, upper_dev, true, private);
4976 }
4977 EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
4978
4979 /**
4980 * netdev_upper_dev_unlink - Removes a link to upper device
4981 * @dev: device
4982 * @upper_dev: new upper device
4983 *
4984 * Removes a link to device which is upper to this one. The caller must hold
4985 * the RTNL lock.
4986 */
4987 void netdev_upper_dev_unlink(struct net_device *dev,
4988 struct net_device *upper_dev)
4989 {
4990 struct netdev_adjacent *i, *j;
4991 ASSERT_RTNL();
4992
4993 __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
4994
4995 /* Here is the tricky part. We must remove all dev's lower
4996 * devices from all upper_dev's upper devices and vice
4997 * versa, to maintain the graph relationship.
4998 */
4999 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5000 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5001 __netdev_adjacent_dev_unlink(i->dev, j->dev);
5002
5003 /* remove also the devices itself from lower/upper device
5004 * list
5005 */
5006 list_for_each_entry(i, &dev->all_adj_list.lower, list)
5007 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5008
5009 list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5010 __netdev_adjacent_dev_unlink(dev, i->dev);
5011
5012 call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5013 }
5014 EXPORT_SYMBOL(netdev_upper_dev_unlink);
5015
5016 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
5017 {
5018 struct netdev_adjacent *iter;
5019
5020 list_for_each_entry(iter, &dev->adj_list.upper, list) {
5021 netdev_adjacent_sysfs_del(iter->dev, oldname,
5022 &iter->dev->adj_list.lower);
5023 netdev_adjacent_sysfs_add(iter->dev, dev,
5024 &iter->dev->adj_list.lower);
5025 }
5026
5027 list_for_each_entry(iter, &dev->adj_list.lower, list) {
5028 netdev_adjacent_sysfs_del(iter->dev, oldname,
5029 &iter->dev->adj_list.upper);
5030 netdev_adjacent_sysfs_add(iter->dev, dev,
5031 &iter->dev->adj_list.upper);
5032 }
5033 }
5034
5035 void *netdev_lower_dev_get_private(struct net_device *dev,
5036 struct net_device *lower_dev)
5037 {
5038 struct netdev_adjacent *lower;
5039
5040 if (!lower_dev)
5041 return NULL;
5042 lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5043 if (!lower)
5044 return NULL;
5045
5046 return lower->private;
5047 }
5048 EXPORT_SYMBOL(netdev_lower_dev_get_private);
5049
5050 static void dev_change_rx_flags(struct net_device *dev, int flags)
5051 {
5052 const struct net_device_ops *ops = dev->netdev_ops;
5053
5054 if (ops->ndo_change_rx_flags)
5055 ops->ndo_change_rx_flags(dev, flags);
5056 }
5057
5058 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
5059 {
5060 unsigned int old_flags = dev->flags;
5061 kuid_t uid;
5062 kgid_t gid;
5063
5064 ASSERT_RTNL();
5065
5066 dev->flags |= IFF_PROMISC;
5067 dev->promiscuity += inc;
5068 if (dev->promiscuity == 0) {
5069 /*
5070 * Avoid overflow.
5071 * If inc causes overflow, untouch promisc and return error.
5072 */
5073 if (inc < 0)
5074 dev->flags &= ~IFF_PROMISC;
5075 else {
5076 dev->promiscuity -= inc;
5077 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5078 dev->name);
5079 return -EOVERFLOW;
5080 }
5081 }
5082 if (dev->flags != old_flags) {
5083 pr_info("device %s %s promiscuous mode\n",
5084 dev->name,
5085 dev->flags & IFF_PROMISC ? "entered" : "left");
5086 if (audit_enabled) {
5087 current_uid_gid(&uid, &gid);
5088 audit_log(current->audit_context, GFP_ATOMIC,
5089 AUDIT_ANOM_PROMISCUOUS,
5090 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5091 dev->name, (dev->flags & IFF_PROMISC),
5092 (old_flags & IFF_PROMISC),
5093 from_kuid(&init_user_ns, audit_get_loginuid(current)),
5094 from_kuid(&init_user_ns, uid),
5095 from_kgid(&init_user_ns, gid),
5096 audit_get_sessionid(current));
5097 }
5098
5099 dev_change_rx_flags(dev, IFF_PROMISC);
5100 }
5101 if (notify)
5102 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
5103 return 0;
5104 }
5105
5106 /**
5107 * dev_set_promiscuity - update promiscuity count on a device
5108 * @dev: device
5109 * @inc: modifier
5110 *
5111 * Add or remove promiscuity from a device. While the count in the device
5112 * remains above zero the interface remains promiscuous. Once it hits zero
5113 * the device reverts back to normal filtering operation. A negative inc
5114 * value is used to drop promiscuity on the device.
5115 * Return 0 if successful or a negative errno code on error.
5116 */
5117 int dev_set_promiscuity(struct net_device *dev, int inc)
5118 {
5119 unsigned int old_flags = dev->flags;
5120 int err;
5121
5122 err = __dev_set_promiscuity(dev, inc, true);
5123 if (err < 0)
5124 return err;
5125 if (dev->flags != old_flags)
5126 dev_set_rx_mode(dev);
5127 return err;
5128 }
5129 EXPORT_SYMBOL(dev_set_promiscuity);
5130
5131 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
5132 {
5133 unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
5134
5135 ASSERT_RTNL();
5136
5137 dev->flags |= IFF_ALLMULTI;
5138 dev->allmulti += inc;
5139 if (dev->allmulti == 0) {
5140 /*
5141 * Avoid overflow.
5142 * If inc causes overflow, untouch allmulti and return error.
5143 */
5144 if (inc < 0)
5145 dev->flags &= ~IFF_ALLMULTI;
5146 else {
5147 dev->allmulti -= inc;
5148 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5149 dev->name);
5150 return -EOVERFLOW;
5151 }
5152 }
5153 if (dev->flags ^ old_flags) {
5154 dev_change_rx_flags(dev, IFF_ALLMULTI);
5155 dev_set_rx_mode(dev);
5156 if (notify)
5157 __dev_notify_flags(dev, old_flags,
5158 dev->gflags ^ old_gflags);
5159 }
5160 return 0;
5161 }
5162
5163 /**
5164 * dev_set_allmulti - update allmulti count on a device
5165 * @dev: device
5166 * @inc: modifier
5167 *
5168 * Add or remove reception of all multicast frames to a device. While the
5169 * count in the device remains above zero the interface remains listening
5170 * to all interfaces. Once it hits zero the device reverts back to normal
5171 * filtering operation. A negative @inc value is used to drop the counter
5172 * when releasing a resource needing all multicasts.
5173 * Return 0 if successful or a negative errno code on error.
5174 */
5175
5176 int dev_set_allmulti(struct net_device *dev, int inc)
5177 {
5178 return __dev_set_allmulti(dev, inc, true);
5179 }
5180 EXPORT_SYMBOL(dev_set_allmulti);
5181
5182 /*
5183 * Upload unicast and multicast address lists to device and
5184 * configure RX filtering. When the device doesn't support unicast
5185 * filtering it is put in promiscuous mode while unicast addresses
5186 * are present.
5187 */
5188 void __dev_set_rx_mode(struct net_device *dev)
5189 {
5190 const struct net_device_ops *ops = dev->netdev_ops;
5191
5192 /* dev_open will call this function so the list will stay sane. */
5193 if (!(dev->flags&IFF_UP))
5194 return;
5195
5196 if (!netif_device_present(dev))
5197 return;
5198
5199 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
5200 /* Unicast addresses changes may only happen under the rtnl,
5201 * therefore calling __dev_set_promiscuity here is safe.
5202 */
5203 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
5204 __dev_set_promiscuity(dev, 1, false);
5205 dev->uc_promisc = true;
5206 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
5207 __dev_set_promiscuity(dev, -1, false);
5208 dev->uc_promisc = false;
5209 }
5210 }
5211
5212 if (ops->ndo_set_rx_mode)
5213 ops->ndo_set_rx_mode(dev);
5214 }
5215
5216 void dev_set_rx_mode(struct net_device *dev)
5217 {
5218 netif_addr_lock_bh(dev);
5219 __dev_set_rx_mode(dev);
5220 netif_addr_unlock_bh(dev);
5221 }
5222
5223 /**
5224 * dev_get_flags - get flags reported to userspace
5225 * @dev: device
5226 *
5227 * Get the combination of flag bits exported through APIs to userspace.
5228 */
5229 unsigned int dev_get_flags(const struct net_device *dev)
5230 {
5231 unsigned int flags;
5232
5233 flags = (dev->flags & ~(IFF_PROMISC |
5234 IFF_ALLMULTI |
5235 IFF_RUNNING |
5236 IFF_LOWER_UP |
5237 IFF_DORMANT)) |
5238 (dev->gflags & (IFF_PROMISC |
5239 IFF_ALLMULTI));
5240
5241 if (netif_running(dev)) {
5242 if (netif_oper_up(dev))
5243 flags |= IFF_RUNNING;
5244 if (netif_carrier_ok(dev))
5245 flags |= IFF_LOWER_UP;
5246 if (netif_dormant(dev))
5247 flags |= IFF_DORMANT;
5248 }
5249
5250 return flags;
5251 }
5252 EXPORT_SYMBOL(dev_get_flags);
5253
5254 int __dev_change_flags(struct net_device *dev, unsigned int flags)
5255 {
5256 unsigned int old_flags = dev->flags;
5257 int ret;
5258
5259 ASSERT_RTNL();
5260
5261 /*
5262 * Set the flags on our device.
5263 */
5264
5265 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5266 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5267 IFF_AUTOMEDIA)) |
5268 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5269 IFF_ALLMULTI));
5270
5271 /*
5272 * Load in the correct multicast list now the flags have changed.
5273 */
5274
5275 if ((old_flags ^ flags) & IFF_MULTICAST)
5276 dev_change_rx_flags(dev, IFF_MULTICAST);
5277
5278 dev_set_rx_mode(dev);
5279
5280 /*
5281 * Have we downed the interface. We handle IFF_UP ourselves
5282 * according to user attempts to set it, rather than blindly
5283 * setting it.
5284 */
5285
5286 ret = 0;
5287 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
5288 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
5289
5290 if (!ret)
5291 dev_set_rx_mode(dev);
5292 }
5293
5294 if ((flags ^ dev->gflags) & IFF_PROMISC) {
5295 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5296 unsigned int old_flags = dev->flags;
5297
5298 dev->gflags ^= IFF_PROMISC;
5299
5300 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5301 if (dev->flags != old_flags)
5302 dev_set_rx_mode(dev);
5303 }
5304
5305 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5306 is important. Some (broken) drivers set IFF_PROMISC, when
5307 IFF_ALLMULTI is requested not asking us and not reporting.
5308 */
5309 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
5310 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5311
5312 dev->gflags ^= IFF_ALLMULTI;
5313 __dev_set_allmulti(dev, inc, false);
5314 }
5315
5316 return ret;
5317 }
5318
5319 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5320 unsigned int gchanges)
5321 {
5322 unsigned int changes = dev->flags ^ old_flags;
5323
5324 if (gchanges)
5325 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
5326
5327 if (changes & IFF_UP) {
5328 if (dev->flags & IFF_UP)
5329 call_netdevice_notifiers(NETDEV_UP, dev);
5330 else
5331 call_netdevice_notifiers(NETDEV_DOWN, dev);
5332 }
5333
5334 if (dev->flags & IFF_UP &&
5335 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5336 struct netdev_notifier_change_info change_info;
5337
5338 change_info.flags_changed = changes;
5339 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5340 &change_info.info);
5341 }
5342 }
5343
5344 /**
5345 * dev_change_flags - change device settings
5346 * @dev: device
5347 * @flags: device state flags
5348 *
5349 * Change settings on device based state flags. The flags are
5350 * in the userspace exported format.
5351 */
5352 int dev_change_flags(struct net_device *dev, unsigned int flags)
5353 {
5354 int ret;
5355 unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
5356
5357 ret = __dev_change_flags(dev, flags);
5358 if (ret < 0)
5359 return ret;
5360
5361 changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
5362 __dev_notify_flags(dev, old_flags, changes);
5363 return ret;
5364 }
5365 EXPORT_SYMBOL(dev_change_flags);
5366
5367 static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5368 {
5369 const struct net_device_ops *ops = dev->netdev_ops;
5370
5371 if (ops->ndo_change_mtu)
5372 return ops->ndo_change_mtu(dev, new_mtu);
5373
5374 dev->mtu = new_mtu;
5375 return 0;
5376 }
5377
5378 /**
5379 * dev_set_mtu - Change maximum transfer unit
5380 * @dev: device
5381 * @new_mtu: new transfer unit
5382 *
5383 * Change the maximum transfer size of the network device.
5384 */
5385 int dev_set_mtu(struct net_device *dev, int new_mtu)
5386 {
5387 int err, orig_mtu;
5388
5389 if (new_mtu == dev->mtu)
5390 return 0;
5391
5392 /* MTU must be positive. */
5393 if (new_mtu < 0)
5394 return -EINVAL;
5395
5396 if (!netif_device_present(dev))
5397 return -ENODEV;
5398
5399 err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5400 err = notifier_to_errno(err);
5401 if (err)
5402 return err;
5403
5404 orig_mtu = dev->mtu;
5405 err = __dev_set_mtu(dev, new_mtu);
5406
5407 if (!err) {
5408 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5409 err = notifier_to_errno(err);
5410 if (err) {
5411 /* setting mtu back and notifying everyone again,
5412 * so that they have a chance to revert changes.
5413 */
5414 __dev_set_mtu(dev, orig_mtu);
5415 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5416 }
5417 }
5418 return err;
5419 }
5420 EXPORT_SYMBOL(dev_set_mtu);
5421
5422 /**
5423 * dev_set_group - Change group this device belongs to
5424 * @dev: device
5425 * @new_group: group this device should belong to
5426 */
5427 void dev_set_group(struct net_device *dev, int new_group)
5428 {
5429 dev->group = new_group;
5430 }
5431 EXPORT_SYMBOL(dev_set_group);
5432
5433 /**
5434 * dev_set_mac_address - Change Media Access Control Address
5435 * @dev: device
5436 * @sa: new address
5437 *
5438 * Change the hardware (MAC) address of the device
5439 */
5440 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5441 {
5442 const struct net_device_ops *ops = dev->netdev_ops;
5443 int err;
5444
5445 if (!ops->ndo_set_mac_address)
5446 return -EOPNOTSUPP;
5447 if (sa->sa_family != dev->type)
5448 return -EINVAL;
5449 if (!netif_device_present(dev))
5450 return -ENODEV;
5451 err = ops->ndo_set_mac_address(dev, sa);
5452 if (err)
5453 return err;
5454 dev->addr_assign_type = NET_ADDR_SET;
5455 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5456 add_device_randomness(dev->dev_addr, dev->addr_len);
5457 return 0;
5458 }
5459 EXPORT_SYMBOL(dev_set_mac_address);
5460
5461 /**
5462 * dev_change_carrier - Change device carrier
5463 * @dev: device
5464 * @new_carrier: new value
5465 *
5466 * Change device carrier
5467 */
5468 int dev_change_carrier(struct net_device *dev, bool new_carrier)
5469 {
5470 const struct net_device_ops *ops = dev->netdev_ops;
5471
5472 if (!ops->ndo_change_carrier)
5473 return -EOPNOTSUPP;
5474 if (!netif_device_present(dev))
5475 return -ENODEV;
5476 return ops->ndo_change_carrier(dev, new_carrier);
5477 }
5478 EXPORT_SYMBOL(dev_change_carrier);
5479
5480 /**
5481 * dev_get_phys_port_id - Get device physical port ID
5482 * @dev: device
5483 * @ppid: port ID
5484 *
5485 * Get device physical port ID
5486 */
5487 int dev_get_phys_port_id(struct net_device *dev,
5488 struct netdev_phys_port_id *ppid)
5489 {
5490 const struct net_device_ops *ops = dev->netdev_ops;
5491
5492 if (!ops->ndo_get_phys_port_id)
5493 return -EOPNOTSUPP;
5494 return ops->ndo_get_phys_port_id(dev, ppid);
5495 }
5496 EXPORT_SYMBOL(dev_get_phys_port_id);
5497
5498 /**
5499 * dev_new_index - allocate an ifindex
5500 * @net: the applicable net namespace
5501 *
5502 * Returns a suitable unique value for a new device interface
5503 * number. The caller must hold the rtnl semaphore or the
5504 * dev_base_lock to be sure it remains unique.
5505 */
5506 static int dev_new_index(struct net *net)
5507 {
5508 int ifindex = net->ifindex;
5509 for (;;) {
5510 if (++ifindex <= 0)
5511 ifindex = 1;
5512 if (!__dev_get_by_index(net, ifindex))
5513 return net->ifindex = ifindex;
5514 }
5515 }
5516
5517 /* Delayed registration/unregisteration */
5518 static LIST_HEAD(net_todo_list);
5519 static DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5520
5521 static void net_set_todo(struct net_device *dev)
5522 {
5523 list_add_tail(&dev->todo_list, &net_todo_list);
5524 dev_net(dev)->dev_unreg_count++;
5525 }
5526
5527 static void rollback_registered_many(struct list_head *head)
5528 {
5529 struct net_device *dev, *tmp;
5530 LIST_HEAD(close_head);
5531
5532 BUG_ON(dev_boot_phase);
5533 ASSERT_RTNL();
5534
5535 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5536 /* Some devices call without registering
5537 * for initialization unwind. Remove those
5538 * devices and proceed with the remaining.
5539 */
5540 if (dev->reg_state == NETREG_UNINITIALIZED) {
5541 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5542 dev->name, dev);
5543
5544 WARN_ON(1);
5545 list_del(&dev->unreg_list);
5546 continue;
5547 }
5548 dev->dismantle = true;
5549 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5550 }
5551
5552 /* If device is running, close it first. */
5553 list_for_each_entry(dev, head, unreg_list)
5554 list_add_tail(&dev->close_list, &close_head);
5555 dev_close_many(&close_head);
5556
5557 list_for_each_entry(dev, head, unreg_list) {
5558 /* And unlink it from device chain. */
5559 unlist_netdevice(dev);
5560
5561 dev->reg_state = NETREG_UNREGISTERING;
5562 }
5563
5564 synchronize_net();
5565
5566 list_for_each_entry(dev, head, unreg_list) {
5567 /* Shutdown queueing discipline. */
5568 dev_shutdown(dev);
5569
5570
5571 /* Notify protocols, that we are about to destroy
5572 this device. They should clean all the things.
5573 */
5574 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5575
5576 if (!dev->rtnl_link_ops ||
5577 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5578 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
5579
5580 /*
5581 * Flush the unicast and multicast chains
5582 */
5583 dev_uc_flush(dev);
5584 dev_mc_flush(dev);
5585
5586 if (dev->netdev_ops->ndo_uninit)
5587 dev->netdev_ops->ndo_uninit(dev);
5588
5589 /* Notifier chain MUST detach us all upper devices. */
5590 WARN_ON(netdev_has_any_upper_dev(dev));
5591
5592 /* Remove entries from kobject tree */
5593 netdev_unregister_kobject(dev);
5594 #ifdef CONFIG_XPS
5595 /* Remove XPS queueing entries */
5596 netif_reset_xps_queues_gt(dev, 0);
5597 #endif
5598 }
5599
5600 synchronize_net();
5601
5602 list_for_each_entry(dev, head, unreg_list)
5603 dev_put(dev);
5604 }
5605
5606 static void rollback_registered(struct net_device *dev)
5607 {
5608 LIST_HEAD(single);
5609
5610 list_add(&dev->unreg_list, &single);
5611 rollback_registered_many(&single);
5612 list_del(&single);
5613 }
5614
5615 static netdev_features_t netdev_fix_features(struct net_device *dev,
5616 netdev_features_t features)
5617 {
5618 /* Fix illegal checksum combinations */
5619 if ((features & NETIF_F_HW_CSUM) &&
5620 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5621 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5622 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5623 }
5624
5625 /* TSO requires that SG is present as well. */
5626 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5627 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5628 features &= ~NETIF_F_ALL_TSO;
5629 }
5630
5631 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
5632 !(features & NETIF_F_IP_CSUM)) {
5633 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
5634 features &= ~NETIF_F_TSO;
5635 features &= ~NETIF_F_TSO_ECN;
5636 }
5637
5638 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
5639 !(features & NETIF_F_IPV6_CSUM)) {
5640 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
5641 features &= ~NETIF_F_TSO6;
5642 }
5643
5644 /* TSO ECN requires that TSO is present as well. */
5645 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5646 features &= ~NETIF_F_TSO_ECN;
5647
5648 /* Software GSO depends on SG. */
5649 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5650 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5651 features &= ~NETIF_F_GSO;
5652 }
5653
5654 /* UFO needs SG and checksumming */
5655 if (features & NETIF_F_UFO) {
5656 /* maybe split UFO into V4 and V6? */
5657 if (!((features & NETIF_F_GEN_CSUM) ||
5658 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5659 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5660 netdev_dbg(dev,
5661 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5662 features &= ~NETIF_F_UFO;
5663 }
5664
5665 if (!(features & NETIF_F_SG)) {
5666 netdev_dbg(dev,
5667 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5668 features &= ~NETIF_F_UFO;
5669 }
5670 }
5671
5672 return features;
5673 }
5674
5675 int __netdev_update_features(struct net_device *dev)
5676 {
5677 netdev_features_t features;
5678 int err = 0;
5679
5680 ASSERT_RTNL();
5681
5682 features = netdev_get_wanted_features(dev);
5683
5684 if (dev->netdev_ops->ndo_fix_features)
5685 features = dev->netdev_ops->ndo_fix_features(dev, features);
5686
5687 /* driver might be less strict about feature dependencies */
5688 features = netdev_fix_features(dev, features);
5689
5690 if (dev->features == features)
5691 return 0;
5692
5693 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5694 &dev->features, &features);
5695
5696 if (dev->netdev_ops->ndo_set_features)
5697 err = dev->netdev_ops->ndo_set_features(dev, features);
5698
5699 if (unlikely(err < 0)) {
5700 netdev_err(dev,
5701 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5702 err, &features, &dev->features);
5703 return -1;
5704 }
5705
5706 if (!err)
5707 dev->features = features;
5708
5709 return 1;
5710 }
5711
5712 /**
5713 * netdev_update_features - recalculate device features
5714 * @dev: the device to check
5715 *
5716 * Recalculate dev->features set and send notifications if it
5717 * has changed. Should be called after driver or hardware dependent
5718 * conditions might have changed that influence the features.
5719 */
5720 void netdev_update_features(struct net_device *dev)
5721 {
5722 if (__netdev_update_features(dev))
5723 netdev_features_change(dev);
5724 }
5725 EXPORT_SYMBOL(netdev_update_features);
5726
5727 /**
5728 * netdev_change_features - recalculate device features
5729 * @dev: the device to check
5730 *
5731 * Recalculate dev->features set and send notifications even
5732 * if they have not changed. Should be called instead of
5733 * netdev_update_features() if also dev->vlan_features might
5734 * have changed to allow the changes to be propagated to stacked
5735 * VLAN devices.
5736 */
5737 void netdev_change_features(struct net_device *dev)
5738 {
5739 __netdev_update_features(dev);
5740 netdev_features_change(dev);
5741 }
5742 EXPORT_SYMBOL(netdev_change_features);
5743
5744 /**
5745 * netif_stacked_transfer_operstate - transfer operstate
5746 * @rootdev: the root or lower level device to transfer state from
5747 * @dev: the device to transfer operstate to
5748 *
5749 * Transfer operational state from root to device. This is normally
5750 * called when a stacking relationship exists between the root
5751 * device and the device(a leaf device).
5752 */
5753 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5754 struct net_device *dev)
5755 {
5756 if (rootdev->operstate == IF_OPER_DORMANT)
5757 netif_dormant_on(dev);
5758 else
5759 netif_dormant_off(dev);
5760
5761 if (netif_carrier_ok(rootdev)) {
5762 if (!netif_carrier_ok(dev))
5763 netif_carrier_on(dev);
5764 } else {
5765 if (netif_carrier_ok(dev))
5766 netif_carrier_off(dev);
5767 }
5768 }
5769 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5770
5771 #ifdef CONFIG_SYSFS
5772 static int netif_alloc_rx_queues(struct net_device *dev)
5773 {
5774 unsigned int i, count = dev->num_rx_queues;
5775 struct netdev_rx_queue *rx;
5776
5777 BUG_ON(count < 1);
5778
5779 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5780 if (!rx)
5781 return -ENOMEM;
5782
5783 dev->_rx = rx;
5784
5785 for (i = 0; i < count; i++)
5786 rx[i].dev = dev;
5787 return 0;
5788 }
5789 #endif
5790
5791 static void netdev_init_one_queue(struct net_device *dev,
5792 struct netdev_queue *queue, void *_unused)
5793 {
5794 /* Initialize queue lock */
5795 spin_lock_init(&queue->_xmit_lock);
5796 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5797 queue->xmit_lock_owner = -1;
5798 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5799 queue->dev = dev;
5800 #ifdef CONFIG_BQL
5801 dql_init(&queue->dql, HZ);
5802 #endif
5803 }
5804
5805 static void netif_free_tx_queues(struct net_device *dev)
5806 {
5807 if (is_vmalloc_addr(dev->_tx))
5808 vfree(dev->_tx);
5809 else
5810 kfree(dev->_tx);
5811 }
5812
5813 static int netif_alloc_netdev_queues(struct net_device *dev)
5814 {
5815 unsigned int count = dev->num_tx_queues;
5816 struct netdev_queue *tx;
5817 size_t sz = count * sizeof(*tx);
5818
5819 BUG_ON(count < 1 || count > 0xffff);
5820
5821 tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
5822 if (!tx) {
5823 tx = vzalloc(sz);
5824 if (!tx)
5825 return -ENOMEM;
5826 }
5827 dev->_tx = tx;
5828
5829 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5830 spin_lock_init(&dev->tx_global_lock);
5831
5832 return 0;
5833 }
5834
5835 /**
5836 * register_netdevice - register a network device
5837 * @dev: device to register
5838 *
5839 * Take a completed network device structure and add it to the kernel
5840 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5841 * chain. 0 is returned on success. A negative errno code is returned
5842 * on a failure to set up the device, or if the name is a duplicate.
5843 *
5844 * Callers must hold the rtnl semaphore. You may want
5845 * register_netdev() instead of this.
5846 *
5847 * BUGS:
5848 * The locking appears insufficient to guarantee two parallel registers
5849 * will not get the same name.
5850 */
5851
5852 int register_netdevice(struct net_device *dev)
5853 {
5854 int ret;
5855 struct net *net = dev_net(dev);
5856
5857 BUG_ON(dev_boot_phase);
5858 ASSERT_RTNL();
5859
5860 might_sleep();
5861
5862 /* When net_device's are persistent, this will be fatal. */
5863 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5864 BUG_ON(!net);
5865
5866 spin_lock_init(&dev->addr_list_lock);
5867 netdev_set_addr_lockdep_class(dev);
5868
5869 dev->iflink = -1;
5870
5871 ret = dev_get_valid_name(net, dev, dev->name);
5872 if (ret < 0)
5873 goto out;
5874
5875 /* Init, if this function is available */
5876 if (dev->netdev_ops->ndo_init) {
5877 ret = dev->netdev_ops->ndo_init(dev);
5878 if (ret) {
5879 if (ret > 0)
5880 ret = -EIO;
5881 goto out;
5882 }
5883 }
5884
5885 if (((dev->hw_features | dev->features) &
5886 NETIF_F_HW_VLAN_CTAG_FILTER) &&
5887 (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
5888 !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
5889 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
5890 ret = -EINVAL;
5891 goto err_uninit;
5892 }
5893
5894 ret = -EBUSY;
5895 if (!dev->ifindex)
5896 dev->ifindex = dev_new_index(net);
5897 else if (__dev_get_by_index(net, dev->ifindex))
5898 goto err_uninit;
5899
5900 if (dev->iflink == -1)
5901 dev->iflink = dev->ifindex;
5902
5903 /* Transfer changeable features to wanted_features and enable
5904 * software offloads (GSO and GRO).
5905 */
5906 dev->hw_features |= NETIF_F_SOFT_FEATURES;
5907 dev->features |= NETIF_F_SOFT_FEATURES;
5908 dev->wanted_features = dev->features & dev->hw_features;
5909
5910 if (!(dev->flags & IFF_LOOPBACK)) {
5911 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5912 }
5913
5914 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5915 */
5916 dev->vlan_features |= NETIF_F_HIGHDMA;
5917
5918 /* Make NETIF_F_SG inheritable to tunnel devices.
5919 */
5920 dev->hw_enc_features |= NETIF_F_SG;
5921
5922 /* Make NETIF_F_SG inheritable to MPLS.
5923 */
5924 dev->mpls_features |= NETIF_F_SG;
5925
5926 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5927 ret = notifier_to_errno(ret);
5928 if (ret)
5929 goto err_uninit;
5930
5931 ret = netdev_register_kobject(dev);
5932 if (ret)
5933 goto err_uninit;
5934 dev->reg_state = NETREG_REGISTERED;
5935
5936 __netdev_update_features(dev);
5937
5938 /*
5939 * Default initial state at registry is that the
5940 * device is present.
5941 */
5942
5943 set_bit(__LINK_STATE_PRESENT, &dev->state);
5944
5945 linkwatch_init_dev(dev);
5946
5947 dev_init_scheduler(dev);
5948 dev_hold(dev);
5949 list_netdevice(dev);
5950 add_device_randomness(dev->dev_addr, dev->addr_len);
5951
5952 /* If the device has permanent device address, driver should
5953 * set dev_addr and also addr_assign_type should be set to
5954 * NET_ADDR_PERM (default value).
5955 */
5956 if (dev->addr_assign_type == NET_ADDR_PERM)
5957 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5958
5959 /* Notify protocols, that a new device appeared. */
5960 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5961 ret = notifier_to_errno(ret);
5962 if (ret) {
5963 rollback_registered(dev);
5964 dev->reg_state = NETREG_UNREGISTERED;
5965 }
5966 /*
5967 * Prevent userspace races by waiting until the network
5968 * device is fully setup before sending notifications.
5969 */
5970 if (!dev->rtnl_link_ops ||
5971 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5972 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
5973
5974 out:
5975 return ret;
5976
5977 err_uninit:
5978 if (dev->netdev_ops->ndo_uninit)
5979 dev->netdev_ops->ndo_uninit(dev);
5980 goto out;
5981 }
5982 EXPORT_SYMBOL(register_netdevice);
5983
5984 /**
5985 * init_dummy_netdev - init a dummy network device for NAPI
5986 * @dev: device to init
5987 *
5988 * This takes a network device structure and initialize the minimum
5989 * amount of fields so it can be used to schedule NAPI polls without
5990 * registering a full blown interface. This is to be used by drivers
5991 * that need to tie several hardware interfaces to a single NAPI
5992 * poll scheduler due to HW limitations.
5993 */
5994 int init_dummy_netdev(struct net_device *dev)
5995 {
5996 /* Clear everything. Note we don't initialize spinlocks
5997 * are they aren't supposed to be taken by any of the
5998 * NAPI code and this dummy netdev is supposed to be
5999 * only ever used for NAPI polls
6000 */
6001 memset(dev, 0, sizeof(struct net_device));
6002
6003 /* make sure we BUG if trying to hit standard
6004 * register/unregister code path
6005 */
6006 dev->reg_state = NETREG_DUMMY;
6007
6008 /* NAPI wants this */
6009 INIT_LIST_HEAD(&dev->napi_list);
6010
6011 /* a dummy interface is started by default */
6012 set_bit(__LINK_STATE_PRESENT, &dev->state);
6013 set_bit(__LINK_STATE_START, &dev->state);
6014
6015 /* Note : We dont allocate pcpu_refcnt for dummy devices,
6016 * because users of this 'device' dont need to change
6017 * its refcount.
6018 */
6019
6020 return 0;
6021 }
6022 EXPORT_SYMBOL_GPL(init_dummy_netdev);
6023
6024
6025 /**
6026 * register_netdev - register a network device
6027 * @dev: device to register
6028 *
6029 * Take a completed network device structure and add it to the kernel
6030 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6031 * chain. 0 is returned on success. A negative errno code is returned
6032 * on a failure to set up the device, or if the name is a duplicate.
6033 *
6034 * This is a wrapper around register_netdevice that takes the rtnl semaphore
6035 * and expands the device name if you passed a format string to
6036 * alloc_netdev.
6037 */
6038 int register_netdev(struct net_device *dev)
6039 {
6040 int err;
6041
6042 rtnl_lock();
6043 err = register_netdevice(dev);
6044 rtnl_unlock();
6045 return err;
6046 }
6047 EXPORT_SYMBOL(register_netdev);
6048
6049 int netdev_refcnt_read(const struct net_device *dev)
6050 {
6051 int i, refcnt = 0;
6052
6053 for_each_possible_cpu(i)
6054 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6055 return refcnt;
6056 }
6057 EXPORT_SYMBOL(netdev_refcnt_read);
6058
6059 /**
6060 * netdev_wait_allrefs - wait until all references are gone.
6061 * @dev: target net_device
6062 *
6063 * This is called when unregistering network devices.
6064 *
6065 * Any protocol or device that holds a reference should register
6066 * for netdevice notification, and cleanup and put back the
6067 * reference if they receive an UNREGISTER event.
6068 * We can get stuck here if buggy protocols don't correctly
6069 * call dev_put.
6070 */
6071 static void netdev_wait_allrefs(struct net_device *dev)
6072 {
6073 unsigned long rebroadcast_time, warning_time;
6074 int refcnt;
6075
6076 linkwatch_forget_dev(dev);
6077
6078 rebroadcast_time = warning_time = jiffies;
6079 refcnt = netdev_refcnt_read(dev);
6080
6081 while (refcnt != 0) {
6082 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6083 rtnl_lock();
6084
6085 /* Rebroadcast unregister notification */
6086 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6087
6088 __rtnl_unlock();
6089 rcu_barrier();
6090 rtnl_lock();
6091
6092 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6093 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6094 &dev->state)) {
6095 /* We must not have linkwatch events
6096 * pending on unregister. If this
6097 * happens, we simply run the queue
6098 * unscheduled, resulting in a noop
6099 * for this device.
6100 */
6101 linkwatch_run_queue();
6102 }
6103
6104 __rtnl_unlock();
6105
6106 rebroadcast_time = jiffies;
6107 }
6108
6109 msleep(250);
6110
6111 refcnt = netdev_refcnt_read(dev);
6112
6113 if (time_after(jiffies, warning_time + 10 * HZ)) {
6114 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6115 dev->name, refcnt);
6116 warning_time = jiffies;
6117 }
6118 }
6119 }
6120
6121 /* The sequence is:
6122 *
6123 * rtnl_lock();
6124 * ...
6125 * register_netdevice(x1);
6126 * register_netdevice(x2);
6127 * ...
6128 * unregister_netdevice(y1);
6129 * unregister_netdevice(y2);
6130 * ...
6131 * rtnl_unlock();
6132 * free_netdev(y1);
6133 * free_netdev(y2);
6134 *
6135 * We are invoked by rtnl_unlock().
6136 * This allows us to deal with problems:
6137 * 1) We can delete sysfs objects which invoke hotplug
6138 * without deadlocking with linkwatch via keventd.
6139 * 2) Since we run with the RTNL semaphore not held, we can sleep
6140 * safely in order to wait for the netdev refcnt to drop to zero.
6141 *
6142 * We must not return until all unregister events added during
6143 * the interval the lock was held have been completed.
6144 */
6145 void netdev_run_todo(void)
6146 {
6147 struct list_head list;
6148
6149 /* Snapshot list, allow later requests */
6150 list_replace_init(&net_todo_list, &list);
6151
6152 __rtnl_unlock();
6153
6154
6155 /* Wait for rcu callbacks to finish before next phase */
6156 if (!list_empty(&list))
6157 rcu_barrier();
6158
6159 while (!list_empty(&list)) {
6160 struct net_device *dev
6161 = list_first_entry(&list, struct net_device, todo_list);
6162 list_del(&dev->todo_list);
6163
6164 rtnl_lock();
6165 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6166 __rtnl_unlock();
6167
6168 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
6169 pr_err("network todo '%s' but state %d\n",
6170 dev->name, dev->reg_state);
6171 dump_stack();
6172 continue;
6173 }
6174
6175 dev->reg_state = NETREG_UNREGISTERED;
6176
6177 on_each_cpu(flush_backlog, dev, 1);
6178
6179 netdev_wait_allrefs(dev);
6180
6181 /* paranoia */
6182 BUG_ON(netdev_refcnt_read(dev));
6183 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6184 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
6185 WARN_ON(dev->dn_ptr);
6186
6187 if (dev->destructor)
6188 dev->destructor(dev);
6189
6190 /* Report a network device has been unregistered */
6191 rtnl_lock();
6192 dev_net(dev)->dev_unreg_count--;
6193 __rtnl_unlock();
6194 wake_up(&netdev_unregistering_wq);
6195
6196 /* Free network device */
6197 kobject_put(&dev->dev.kobj);
6198 }
6199 }
6200
6201 /* Convert net_device_stats to rtnl_link_stats64. They have the same
6202 * fields in the same order, with only the type differing.
6203 */
6204 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6205 const struct net_device_stats *netdev_stats)
6206 {
6207 #if BITS_PER_LONG == 64
6208 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6209 memcpy(stats64, netdev_stats, sizeof(*stats64));
6210 #else
6211 size_t i, n = sizeof(*stats64) / sizeof(u64);
6212 const unsigned long *src = (const unsigned long *)netdev_stats;
6213 u64 *dst = (u64 *)stats64;
6214
6215 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6216 sizeof(*stats64) / sizeof(u64));
6217 for (i = 0; i < n; i++)
6218 dst[i] = src[i];
6219 #endif
6220 }
6221 EXPORT_SYMBOL(netdev_stats_to_stats64);
6222
6223 /**
6224 * dev_get_stats - get network device statistics
6225 * @dev: device to get statistics from
6226 * @storage: place to store stats
6227 *
6228 * Get network statistics from device. Return @storage.
6229 * The device driver may provide its own method by setting
6230 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6231 * otherwise the internal statistics structure is used.
6232 */
6233 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6234 struct rtnl_link_stats64 *storage)
6235 {
6236 const struct net_device_ops *ops = dev->netdev_ops;
6237
6238 if (ops->ndo_get_stats64) {
6239 memset(storage, 0, sizeof(*storage));
6240 ops->ndo_get_stats64(dev, storage);
6241 } else if (ops->ndo_get_stats) {
6242 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
6243 } else {
6244 netdev_stats_to_stats64(storage, &dev->stats);
6245 }
6246 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
6247 return storage;
6248 }
6249 EXPORT_SYMBOL(dev_get_stats);
6250
6251 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6252 {
6253 struct netdev_queue *queue = dev_ingress_queue(dev);
6254
6255 #ifdef CONFIG_NET_CLS_ACT
6256 if (queue)
6257 return queue;
6258 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6259 if (!queue)
6260 return NULL;
6261 netdev_init_one_queue(dev, queue, NULL);
6262 queue->qdisc = &noop_qdisc;
6263 queue->qdisc_sleeping = &noop_qdisc;
6264 rcu_assign_pointer(dev->ingress_queue, queue);
6265 #endif
6266 return queue;
6267 }
6268
6269 static const struct ethtool_ops default_ethtool_ops;
6270
6271 void netdev_set_default_ethtool_ops(struct net_device *dev,
6272 const struct ethtool_ops *ops)
6273 {
6274 if (dev->ethtool_ops == &default_ethtool_ops)
6275 dev->ethtool_ops = ops;
6276 }
6277 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6278
6279 void netdev_freemem(struct net_device *dev)
6280 {
6281 char *addr = (char *)dev - dev->padded;
6282
6283 if (is_vmalloc_addr(addr))
6284 vfree(addr);
6285 else
6286 kfree(addr);
6287 }
6288
6289 /**
6290 * alloc_netdev_mqs - allocate network device
6291 * @sizeof_priv: size of private data to allocate space for
6292 * @name: device name format string
6293 * @setup: callback to initialize device
6294 * @txqs: the number of TX subqueues to allocate
6295 * @rxqs: the number of RX subqueues to allocate
6296 *
6297 * Allocates a struct net_device with private data area for driver use
6298 * and performs basic initialization. Also allocates subqueue structs
6299 * for each queue on the device.
6300 */
6301 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6302 void (*setup)(struct net_device *),
6303 unsigned int txqs, unsigned int rxqs)
6304 {
6305 struct net_device *dev;
6306 size_t alloc_size;
6307 struct net_device *p;
6308
6309 BUG_ON(strlen(name) >= sizeof(dev->name));
6310
6311 if (txqs < 1) {
6312 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6313 return NULL;
6314 }
6315
6316 #ifdef CONFIG_SYSFS
6317 if (rxqs < 1) {
6318 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6319 return NULL;
6320 }
6321 #endif
6322
6323 alloc_size = sizeof(struct net_device);
6324 if (sizeof_priv) {
6325 /* ensure 32-byte alignment of private area */
6326 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6327 alloc_size += sizeof_priv;
6328 }
6329 /* ensure 32-byte alignment of whole construct */
6330 alloc_size += NETDEV_ALIGN - 1;
6331
6332 p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6333 if (!p)
6334 p = vzalloc(alloc_size);
6335 if (!p)
6336 return NULL;
6337
6338 dev = PTR_ALIGN(p, NETDEV_ALIGN);
6339 dev->padded = (char *)dev - (char *)p;
6340
6341 dev->pcpu_refcnt = alloc_percpu(int);
6342 if (!dev->pcpu_refcnt)
6343 goto free_dev;
6344
6345 if (dev_addr_init(dev))
6346 goto free_pcpu;
6347
6348 dev_mc_init(dev);
6349 dev_uc_init(dev);
6350
6351 dev_net_set(dev, &init_net);
6352
6353 dev->gso_max_size = GSO_MAX_SIZE;
6354 dev->gso_max_segs = GSO_MAX_SEGS;
6355
6356 INIT_LIST_HEAD(&dev->napi_list);
6357 INIT_LIST_HEAD(&dev->unreg_list);
6358 INIT_LIST_HEAD(&dev->close_list);
6359 INIT_LIST_HEAD(&dev->link_watch_list);
6360 INIT_LIST_HEAD(&dev->adj_list.upper);
6361 INIT_LIST_HEAD(&dev->adj_list.lower);
6362 INIT_LIST_HEAD(&dev->all_adj_list.upper);
6363 INIT_LIST_HEAD(&dev->all_adj_list.lower);
6364 dev->priv_flags = IFF_XMIT_DST_RELEASE;
6365 setup(dev);
6366
6367 dev->num_tx_queues = txqs;
6368 dev->real_num_tx_queues = txqs;
6369 if (netif_alloc_netdev_queues(dev))
6370 goto free_all;
6371
6372 #ifdef CONFIG_SYSFS
6373 dev->num_rx_queues = rxqs;
6374 dev->real_num_rx_queues = rxqs;
6375 if (netif_alloc_rx_queues(dev))
6376 goto free_all;
6377 #endif
6378
6379 strcpy(dev->name, name);
6380 dev->group = INIT_NETDEV_GROUP;
6381 if (!dev->ethtool_ops)
6382 dev->ethtool_ops = &default_ethtool_ops;
6383 return dev;
6384
6385 free_all:
6386 free_netdev(dev);
6387 return NULL;
6388
6389 free_pcpu:
6390 free_percpu(dev->pcpu_refcnt);
6391 netif_free_tx_queues(dev);
6392 #ifdef CONFIG_SYSFS
6393 kfree(dev->_rx);
6394 #endif
6395
6396 free_dev:
6397 netdev_freemem(dev);
6398 return NULL;
6399 }
6400 EXPORT_SYMBOL(alloc_netdev_mqs);
6401
6402 /**
6403 * free_netdev - free network device
6404 * @dev: device
6405 *
6406 * This function does the last stage of destroying an allocated device
6407 * interface. The reference to the device object is released.
6408 * If this is the last reference then it will be freed.
6409 */
6410 void free_netdev(struct net_device *dev)
6411 {
6412 struct napi_struct *p, *n;
6413
6414 release_net(dev_net(dev));
6415
6416 netif_free_tx_queues(dev);
6417 #ifdef CONFIG_SYSFS
6418 kfree(dev->_rx);
6419 #endif
6420
6421 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6422
6423 /* Flush device addresses */
6424 dev_addr_flush(dev);
6425
6426 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6427 netif_napi_del(p);
6428
6429 free_percpu(dev->pcpu_refcnt);
6430 dev->pcpu_refcnt = NULL;
6431
6432 /* Compatibility with error handling in drivers */
6433 if (dev->reg_state == NETREG_UNINITIALIZED) {
6434 netdev_freemem(dev);
6435 return;
6436 }
6437
6438 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6439 dev->reg_state = NETREG_RELEASED;
6440
6441 /* will free via device release */
6442 put_device(&dev->dev);
6443 }
6444 EXPORT_SYMBOL(free_netdev);
6445
6446 /**
6447 * synchronize_net - Synchronize with packet receive processing
6448 *
6449 * Wait for packets currently being received to be done.
6450 * Does not block later packets from starting.
6451 */
6452 void synchronize_net(void)
6453 {
6454 might_sleep();
6455 if (rtnl_is_locked())
6456 synchronize_rcu_expedited();
6457 else
6458 synchronize_rcu();
6459 }
6460 EXPORT_SYMBOL(synchronize_net);
6461
6462 /**
6463 * unregister_netdevice_queue - remove device from the kernel
6464 * @dev: device
6465 * @head: list
6466 *
6467 * This function shuts down a device interface and removes it
6468 * from the kernel tables.
6469 * If head not NULL, device is queued to be unregistered later.
6470 *
6471 * Callers must hold the rtnl semaphore. You may want
6472 * unregister_netdev() instead of this.
6473 */
6474
6475 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6476 {
6477 ASSERT_RTNL();
6478
6479 if (head) {
6480 list_move_tail(&dev->unreg_list, head);
6481 } else {
6482 rollback_registered(dev);
6483 /* Finish processing unregister after unlock */
6484 net_set_todo(dev);
6485 }
6486 }
6487 EXPORT_SYMBOL(unregister_netdevice_queue);
6488
6489 /**
6490 * unregister_netdevice_many - unregister many devices
6491 * @head: list of devices
6492 */
6493 void unregister_netdevice_many(struct list_head *head)
6494 {
6495 struct net_device *dev;
6496
6497 if (!list_empty(head)) {
6498 rollback_registered_many(head);
6499 list_for_each_entry(dev, head, unreg_list)
6500 net_set_todo(dev);
6501 }
6502 }
6503 EXPORT_SYMBOL(unregister_netdevice_many);
6504
6505 /**
6506 * unregister_netdev - remove device from the kernel
6507 * @dev: device
6508 *
6509 * This function shuts down a device interface and removes it
6510 * from the kernel tables.
6511 *
6512 * This is just a wrapper for unregister_netdevice that takes
6513 * the rtnl semaphore. In general you want to use this and not
6514 * unregister_netdevice.
6515 */
6516 void unregister_netdev(struct net_device *dev)
6517 {
6518 rtnl_lock();
6519 unregister_netdevice(dev);
6520 rtnl_unlock();
6521 }
6522 EXPORT_SYMBOL(unregister_netdev);
6523
6524 /**
6525 * dev_change_net_namespace - move device to different nethost namespace
6526 * @dev: device
6527 * @net: network namespace
6528 * @pat: If not NULL name pattern to try if the current device name
6529 * is already taken in the destination network namespace.
6530 *
6531 * This function shuts down a device interface and moves it
6532 * to a new network namespace. On success 0 is returned, on
6533 * a failure a netagive errno code is returned.
6534 *
6535 * Callers must hold the rtnl semaphore.
6536 */
6537
6538 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6539 {
6540 int err;
6541
6542 ASSERT_RTNL();
6543
6544 /* Don't allow namespace local devices to be moved. */
6545 err = -EINVAL;
6546 if (dev->features & NETIF_F_NETNS_LOCAL)
6547 goto out;
6548
6549 /* Ensure the device has been registrered */
6550 if (dev->reg_state != NETREG_REGISTERED)
6551 goto out;
6552
6553 /* Get out if there is nothing todo */
6554 err = 0;
6555 if (net_eq(dev_net(dev), net))
6556 goto out;
6557
6558 /* Pick the destination device name, and ensure
6559 * we can use it in the destination network namespace.
6560 */
6561 err = -EEXIST;
6562 if (__dev_get_by_name(net, dev->name)) {
6563 /* We get here if we can't use the current device name */
6564 if (!pat)
6565 goto out;
6566 if (dev_get_valid_name(net, dev, pat) < 0)
6567 goto out;
6568 }
6569
6570 /*
6571 * And now a mini version of register_netdevice unregister_netdevice.
6572 */
6573
6574 /* If device is running close it first. */
6575 dev_close(dev);
6576
6577 /* And unlink it from device chain */
6578 err = -ENODEV;
6579 unlist_netdevice(dev);
6580
6581 synchronize_net();
6582
6583 /* Shutdown queueing discipline. */
6584 dev_shutdown(dev);
6585
6586 /* Notify protocols, that we are about to destroy
6587 this device. They should clean all the things.
6588
6589 Note that dev->reg_state stays at NETREG_REGISTERED.
6590 This is wanted because this way 8021q and macvlan know
6591 the device is just moving and can keep their slaves up.
6592 */
6593 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6594 rcu_barrier();
6595 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6596 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
6597
6598 /*
6599 * Flush the unicast and multicast chains
6600 */
6601 dev_uc_flush(dev);
6602 dev_mc_flush(dev);
6603
6604 /* Send a netdev-removed uevent to the old namespace */
6605 kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
6606
6607 /* Actually switch the network namespace */
6608 dev_net_set(dev, net);
6609
6610 /* If there is an ifindex conflict assign a new one */
6611 if (__dev_get_by_index(net, dev->ifindex)) {
6612 int iflink = (dev->iflink == dev->ifindex);
6613 dev->ifindex = dev_new_index(net);
6614 if (iflink)
6615 dev->iflink = dev->ifindex;
6616 }
6617
6618 /* Send a netdev-add uevent to the new namespace */
6619 kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
6620
6621 /* Fixup kobjects */
6622 err = device_rename(&dev->dev, dev->name);
6623 WARN_ON(err);
6624
6625 /* Add the device back in the hashes */
6626 list_netdevice(dev);
6627
6628 /* Notify protocols, that a new device appeared. */
6629 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6630
6631 /*
6632 * Prevent userspace races by waiting until the network
6633 * device is fully setup before sending notifications.
6634 */
6635 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
6636
6637 synchronize_net();
6638 err = 0;
6639 out:
6640 return err;
6641 }
6642 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6643
6644 static int dev_cpu_callback(struct notifier_block *nfb,
6645 unsigned long action,
6646 void *ocpu)
6647 {
6648 struct sk_buff **list_skb;
6649 struct sk_buff *skb;
6650 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6651 struct softnet_data *sd, *oldsd;
6652
6653 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
6654 return NOTIFY_OK;
6655
6656 local_irq_disable();
6657 cpu = smp_processor_id();
6658 sd = &per_cpu(softnet_data, cpu);
6659 oldsd = &per_cpu(softnet_data, oldcpu);
6660
6661 /* Find end of our completion_queue. */
6662 list_skb = &sd->completion_queue;
6663 while (*list_skb)
6664 list_skb = &(*list_skb)->next;
6665 /* Append completion queue from offline CPU. */
6666 *list_skb = oldsd->completion_queue;
6667 oldsd->completion_queue = NULL;
6668
6669 /* Append output queue from offline CPU. */
6670 if (oldsd->output_queue) {
6671 *sd->output_queue_tailp = oldsd->output_queue;
6672 sd->output_queue_tailp = oldsd->output_queue_tailp;
6673 oldsd->output_queue = NULL;
6674 oldsd->output_queue_tailp = &oldsd->output_queue;
6675 }
6676 /* Append NAPI poll list from offline CPU. */
6677 if (!list_empty(&oldsd->poll_list)) {
6678 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6679 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6680 }
6681
6682 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6683 local_irq_enable();
6684
6685 /* Process offline CPU's input_pkt_queue */
6686 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6687 netif_rx_internal(skb);
6688 input_queue_head_incr(oldsd);
6689 }
6690 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6691 netif_rx_internal(skb);
6692 input_queue_head_incr(oldsd);
6693 }
6694
6695 return NOTIFY_OK;
6696 }
6697
6698
6699 /**
6700 * netdev_increment_features - increment feature set by one
6701 * @all: current feature set
6702 * @one: new feature set
6703 * @mask: mask feature set
6704 *
6705 * Computes a new feature set after adding a device with feature set
6706 * @one to the master device with current feature set @all. Will not
6707 * enable anything that is off in @mask. Returns the new feature set.
6708 */
6709 netdev_features_t netdev_increment_features(netdev_features_t all,
6710 netdev_features_t one, netdev_features_t mask)
6711 {
6712 if (mask & NETIF_F_GEN_CSUM)
6713 mask |= NETIF_F_ALL_CSUM;
6714 mask |= NETIF_F_VLAN_CHALLENGED;
6715
6716 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6717 all &= one | ~NETIF_F_ALL_FOR_ALL;
6718
6719 /* If one device supports hw checksumming, set for all. */
6720 if (all & NETIF_F_GEN_CSUM)
6721 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6722
6723 return all;
6724 }
6725 EXPORT_SYMBOL(netdev_increment_features);
6726
6727 static struct hlist_head * __net_init netdev_create_hash(void)
6728 {
6729 int i;
6730 struct hlist_head *hash;
6731
6732 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6733 if (hash != NULL)
6734 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6735 INIT_HLIST_HEAD(&hash[i]);
6736
6737 return hash;
6738 }
6739
6740 /* Initialize per network namespace state */
6741 static int __net_init netdev_init(struct net *net)
6742 {
6743 if (net != &init_net)
6744 INIT_LIST_HEAD(&net->dev_base_head);
6745
6746 net->dev_name_head = netdev_create_hash();
6747 if (net->dev_name_head == NULL)
6748 goto err_name;
6749
6750 net->dev_index_head = netdev_create_hash();
6751 if (net->dev_index_head == NULL)
6752 goto err_idx;
6753
6754 return 0;
6755
6756 err_idx:
6757 kfree(net->dev_name_head);
6758 err_name:
6759 return -ENOMEM;
6760 }
6761
6762 /**
6763 * netdev_drivername - network driver for the device
6764 * @dev: network device
6765 *
6766 * Determine network driver for device.
6767 */
6768 const char *netdev_drivername(const struct net_device *dev)
6769 {
6770 const struct device_driver *driver;
6771 const struct device *parent;
6772 const char *empty = "";
6773
6774 parent = dev->dev.parent;
6775 if (!parent)
6776 return empty;
6777
6778 driver = parent->driver;
6779 if (driver && driver->name)
6780 return driver->name;
6781 return empty;
6782 }
6783
6784 static int __netdev_printk(const char *level, const struct net_device *dev,
6785 struct va_format *vaf)
6786 {
6787 int r;
6788
6789 if (dev && dev->dev.parent) {
6790 r = dev_printk_emit(level[1] - '0',
6791 dev->dev.parent,
6792 "%s %s %s: %pV",
6793 dev_driver_string(dev->dev.parent),
6794 dev_name(dev->dev.parent),
6795 netdev_name(dev), vaf);
6796 } else if (dev) {
6797 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6798 } else {
6799 r = printk("%s(NULL net_device): %pV", level, vaf);
6800 }
6801
6802 return r;
6803 }
6804
6805 int netdev_printk(const char *level, const struct net_device *dev,
6806 const char *format, ...)
6807 {
6808 struct va_format vaf;
6809 va_list args;
6810 int r;
6811
6812 va_start(args, format);
6813
6814 vaf.fmt = format;
6815 vaf.va = &args;
6816
6817 r = __netdev_printk(level, dev, &vaf);
6818
6819 va_end(args);
6820
6821 return r;
6822 }
6823 EXPORT_SYMBOL(netdev_printk);
6824
6825 #define define_netdev_printk_level(func, level) \
6826 int func(const struct net_device *dev, const char *fmt, ...) \
6827 { \
6828 int r; \
6829 struct va_format vaf; \
6830 va_list args; \
6831 \
6832 va_start(args, fmt); \
6833 \
6834 vaf.fmt = fmt; \
6835 vaf.va = &args; \
6836 \
6837 r = __netdev_printk(level, dev, &vaf); \
6838 \
6839 va_end(args); \
6840 \
6841 return r; \
6842 } \
6843 EXPORT_SYMBOL(func);
6844
6845 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6846 define_netdev_printk_level(netdev_alert, KERN_ALERT);
6847 define_netdev_printk_level(netdev_crit, KERN_CRIT);
6848 define_netdev_printk_level(netdev_err, KERN_ERR);
6849 define_netdev_printk_level(netdev_warn, KERN_WARNING);
6850 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6851 define_netdev_printk_level(netdev_info, KERN_INFO);
6852
6853 static void __net_exit netdev_exit(struct net *net)
6854 {
6855 kfree(net->dev_name_head);
6856 kfree(net->dev_index_head);
6857 }
6858
6859 static struct pernet_operations __net_initdata netdev_net_ops = {
6860 .init = netdev_init,
6861 .exit = netdev_exit,
6862 };
6863
6864 static void __net_exit default_device_exit(struct net *net)
6865 {
6866 struct net_device *dev, *aux;
6867 /*
6868 * Push all migratable network devices back to the
6869 * initial network namespace
6870 */
6871 rtnl_lock();
6872 for_each_netdev_safe(net, dev, aux) {
6873 int err;
6874 char fb_name[IFNAMSIZ];
6875
6876 /* Ignore unmoveable devices (i.e. loopback) */
6877 if (dev->features & NETIF_F_NETNS_LOCAL)
6878 continue;
6879
6880 /* Leave virtual devices for the generic cleanup */
6881 if (dev->rtnl_link_ops)
6882 continue;
6883
6884 /* Push remaining network devices to init_net */
6885 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6886 err = dev_change_net_namespace(dev, &init_net, fb_name);
6887 if (err) {
6888 pr_emerg("%s: failed to move %s to init_net: %d\n",
6889 __func__, dev->name, err);
6890 BUG();
6891 }
6892 }
6893 rtnl_unlock();
6894 }
6895
6896 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
6897 {
6898 /* Return with the rtnl_lock held when there are no network
6899 * devices unregistering in any network namespace in net_list.
6900 */
6901 struct net *net;
6902 bool unregistering;
6903 DEFINE_WAIT(wait);
6904
6905 for (;;) {
6906 prepare_to_wait(&netdev_unregistering_wq, &wait,
6907 TASK_UNINTERRUPTIBLE);
6908 unregistering = false;
6909 rtnl_lock();
6910 list_for_each_entry(net, net_list, exit_list) {
6911 if (net->dev_unreg_count > 0) {
6912 unregistering = true;
6913 break;
6914 }
6915 }
6916 if (!unregistering)
6917 break;
6918 __rtnl_unlock();
6919 schedule();
6920 }
6921 finish_wait(&netdev_unregistering_wq, &wait);
6922 }
6923
6924 static void __net_exit default_device_exit_batch(struct list_head *net_list)
6925 {
6926 /* At exit all network devices most be removed from a network
6927 * namespace. Do this in the reverse order of registration.
6928 * Do this across as many network namespaces as possible to
6929 * improve batching efficiency.
6930 */
6931 struct net_device *dev;
6932 struct net *net;
6933 LIST_HEAD(dev_kill_list);
6934
6935 /* To prevent network device cleanup code from dereferencing
6936 * loopback devices or network devices that have been freed
6937 * wait here for all pending unregistrations to complete,
6938 * before unregistring the loopback device and allowing the
6939 * network namespace be freed.
6940 *
6941 * The netdev todo list containing all network devices
6942 * unregistrations that happen in default_device_exit_batch
6943 * will run in the rtnl_unlock() at the end of
6944 * default_device_exit_batch.
6945 */
6946 rtnl_lock_unregistering(net_list);
6947 list_for_each_entry(net, net_list, exit_list) {
6948 for_each_netdev_reverse(net, dev) {
6949 if (dev->rtnl_link_ops)
6950 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6951 else
6952 unregister_netdevice_queue(dev, &dev_kill_list);
6953 }
6954 }
6955 unregister_netdevice_many(&dev_kill_list);
6956 list_del(&dev_kill_list);
6957 rtnl_unlock();
6958 }
6959
6960 static struct pernet_operations __net_initdata default_device_ops = {
6961 .exit = default_device_exit,
6962 .exit_batch = default_device_exit_batch,
6963 };
6964
6965 /*
6966 * Initialize the DEV module. At boot time this walks the device list and
6967 * unhooks any devices that fail to initialise (normally hardware not
6968 * present) and leaves us with a valid list of present and active devices.
6969 *
6970 */
6971
6972 /*
6973 * This is called single threaded during boot, so no need
6974 * to take the rtnl semaphore.
6975 */
6976 static int __init net_dev_init(void)
6977 {
6978 int i, rc = -ENOMEM;
6979
6980 BUG_ON(!dev_boot_phase);
6981
6982 if (dev_proc_init())
6983 goto out;
6984
6985 if (netdev_kobject_init())
6986 goto out;
6987
6988 INIT_LIST_HEAD(&ptype_all);
6989 for (i = 0; i < PTYPE_HASH_SIZE; i++)
6990 INIT_LIST_HEAD(&ptype_base[i]);
6991
6992 INIT_LIST_HEAD(&offload_base);
6993
6994 if (register_pernet_subsys(&netdev_net_ops))
6995 goto out;
6996
6997 /*
6998 * Initialise the packet receive queues.
6999 */
7000
7001 for_each_possible_cpu(i) {
7002 struct softnet_data *sd = &per_cpu(softnet_data, i);
7003
7004 skb_queue_head_init(&sd->input_pkt_queue);
7005 skb_queue_head_init(&sd->process_queue);
7006 INIT_LIST_HEAD(&sd->poll_list);
7007 sd->output_queue_tailp = &sd->output_queue;
7008 #ifdef CONFIG_RPS
7009 sd->csd.func = rps_trigger_softirq;
7010 sd->csd.info = sd;
7011 sd->cpu = i;
7012 #endif
7013
7014 sd->backlog.poll = process_backlog;
7015 sd->backlog.weight = weight_p;
7016 }
7017
7018 dev_boot_phase = 0;
7019
7020 /* The loopback device is special if any other network devices
7021 * is present in a network namespace the loopback device must
7022 * be present. Since we now dynamically allocate and free the
7023 * loopback device ensure this invariant is maintained by
7024 * keeping the loopback device as the first device on the
7025 * list of network devices. Ensuring the loopback devices
7026 * is the first device that appears and the last network device
7027 * that disappears.
7028 */
7029 if (register_pernet_device(&loopback_net_ops))
7030 goto out;
7031
7032 if (register_pernet_device(&default_device_ops))
7033 goto out;
7034
7035 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7036 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
7037
7038 hotcpu_notifier(dev_cpu_callback, 0);
7039 dst_init();
7040 rc = 0;
7041 out:
7042 return rc;
7043 }
7044
7045 subsys_initcall(net_dev_init);
This page took 0.205793 seconds and 5 git commands to generate.