net: Convert net_ratelimit uses to net_<level>_ratelimited
[deliverable/linux.git] / net / core / dev.c
1 /*
2 * NET3 Protocol independent device support routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Derived from the non IP parts of dev.c 1.0.19
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 *
14 * Additional Authors:
15 * Florian la Roche <rzsfl@rz.uni-sb.de>
16 * Alan Cox <gw4pts@gw4pts.ampr.org>
17 * David Hinds <dahinds@users.sourceforge.net>
18 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
19 * Adam Sulmicki <adam@cfar.umd.edu>
20 * Pekka Riikonen <priikone@poesidon.pspt.fi>
21 *
22 * Changes:
23 * D.J. Barrow : Fixed bug where dev->refcnt gets set
24 * to 2 if register_netdev gets called
25 * before net_dev_init & also removed a
26 * few lines of code in the process.
27 * Alan Cox : device private ioctl copies fields back.
28 * Alan Cox : Transmit queue code does relevant
29 * stunts to keep the queue safe.
30 * Alan Cox : Fixed double lock.
31 * Alan Cox : Fixed promisc NULL pointer trap
32 * ???????? : Support the full private ioctl range
33 * Alan Cox : Moved ioctl permission check into
34 * drivers
35 * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
36 * Alan Cox : 100 backlog just doesn't cut it when
37 * you start doing multicast video 8)
38 * Alan Cox : Rewrote net_bh and list manager.
39 * Alan Cox : Fix ETH_P_ALL echoback lengths.
40 * Alan Cox : Took out transmit every packet pass
41 * Saved a few bytes in the ioctl handler
42 * Alan Cox : Network driver sets packet type before
43 * calling netif_rx. Saves a function
44 * call a packet.
45 * Alan Cox : Hashed net_bh()
46 * Richard Kooijman: Timestamp fixes.
47 * Alan Cox : Wrong field in SIOCGIFDSTADDR
48 * Alan Cox : Device lock protection.
49 * Alan Cox : Fixed nasty side effect of device close
50 * changes.
51 * Rudi Cilibrasi : Pass the right thing to
52 * set_mac_address()
53 * Dave Miller : 32bit quantity for the device lock to
54 * make it work out on a Sparc.
55 * Bjorn Ekwall : Added KERNELD hack.
56 * Alan Cox : Cleaned up the backlog initialise.
57 * Craig Metz : SIOCGIFCONF fix if space for under
58 * 1 device.
59 * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
60 * is no device open function.
61 * Andi Kleen : Fix error reporting for SIOCGIFCONF
62 * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
63 * Cyrus Durgin : Cleaned for KMOD
64 * Adam Sulmicki : Bug Fix : Network Device Unload
65 * A network device unload needs to purge
66 * the backlog queue.
67 * Paul Rusty Russell : SIOCSIFNAME
68 * Pekka Riikonen : Netdev boot-time settings code
69 * Andrew Morton : Make unregister_netdevice wait
70 * indefinitely on dev->refcnt
71 * J Hadi Salim : - Backlog queue sampling
72 * - netif_rx() feedback
73 */
74
75 #include <asm/uaccess.h>
76 #include <linux/bitops.h>
77 #include <linux/capability.h>
78 #include <linux/cpu.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/hash.h>
82 #include <linux/slab.h>
83 #include <linux/sched.h>
84 #include <linux/mutex.h>
85 #include <linux/string.h>
86 #include <linux/mm.h>
87 #include <linux/socket.h>
88 #include <linux/sockios.h>
89 #include <linux/errno.h>
90 #include <linux/interrupt.h>
91 #include <linux/if_ether.h>
92 #include <linux/netdevice.h>
93 #include <linux/etherdevice.h>
94 #include <linux/ethtool.h>
95 #include <linux/notifier.h>
96 #include <linux/skbuff.h>
97 #include <net/net_namespace.h>
98 #include <net/sock.h>
99 #include <linux/rtnetlink.h>
100 #include <linux/proc_fs.h>
101 #include <linux/seq_file.h>
102 #include <linux/stat.h>
103 #include <net/dst.h>
104 #include <net/pkt_sched.h>
105 #include <net/checksum.h>
106 #include <net/xfrm.h>
107 #include <linux/highmem.h>
108 #include <linux/init.h>
109 #include <linux/kmod.h>
110 #include <linux/module.h>
111 #include <linux/netpoll.h>
112 #include <linux/rcupdate.h>
113 #include <linux/delay.h>
114 #include <net/wext.h>
115 #include <net/iw_handler.h>
116 #include <asm/current.h>
117 #include <linux/audit.h>
118 #include <linux/dmaengine.h>
119 #include <linux/err.h>
120 #include <linux/ctype.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_vlan.h>
123 #include <linux/ip.h>
124 #include <net/ip.h>
125 #include <linux/ipv6.h>
126 #include <linux/in.h>
127 #include <linux/jhash.h>
128 #include <linux/random.h>
129 #include <trace/events/napi.h>
130 #include <trace/events/net.h>
131 #include <trace/events/skb.h>
132 #include <linux/pci.h>
133 #include <linux/inetdevice.h>
134 #include <linux/cpu_rmap.h>
135 #include <linux/net_tstamp.h>
136 #include <linux/static_key.h>
137 #include <net/flow_keys.h>
138
139 #include "net-sysfs.h"
140
141 /* Instead of increasing this, you should create a hash table. */
142 #define MAX_GRO_SKBS 8
143
144 /* This should be increased if a protocol with a bigger head is added. */
145 #define GRO_MAX_HEAD (MAX_HEADER + 128)
146
147 /*
148 * The list of packet types we will receive (as opposed to discard)
149 * and the routines to invoke.
150 *
151 * Why 16. Because with 16 the only overlap we get on a hash of the
152 * low nibble of the protocol value is RARP/SNAP/X.25.
153 *
154 * NOTE: That is no longer true with the addition of VLAN tags. Not
155 * sure which should go first, but I bet it won't make much
156 * difference if we are running VLANs. The good news is that
157 * this protocol won't be in the list unless compiled in, so
158 * the average user (w/out VLANs) will not be adversely affected.
159 * --BLG
160 *
161 * 0800 IP
162 * 8100 802.1Q VLAN
163 * 0001 802.3
164 * 0002 AX.25
165 * 0004 802.2
166 * 8035 RARP
167 * 0005 SNAP
168 * 0805 X.25
169 * 0806 ARP
170 * 8137 IPX
171 * 0009 Localtalk
172 * 86DD IPv6
173 */
174
175 #define PTYPE_HASH_SIZE (16)
176 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
177
178 static DEFINE_SPINLOCK(ptype_lock);
179 static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
180 static struct list_head ptype_all __read_mostly; /* Taps */
181
182 /*
183 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
184 * semaphore.
185 *
186 * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
187 *
188 * Writers must hold the rtnl semaphore while they loop through the
189 * dev_base_head list, and hold dev_base_lock for writing when they do the
190 * actual updates. This allows pure readers to access the list even
191 * while a writer is preparing to update it.
192 *
193 * To put it another way, dev_base_lock is held for writing only to
194 * protect against pure readers; the rtnl semaphore provides the
195 * protection against other writers.
196 *
197 * See, for example usages, register_netdevice() and
198 * unregister_netdevice(), which must be called with the rtnl
199 * semaphore held.
200 */
201 DEFINE_RWLOCK(dev_base_lock);
202 EXPORT_SYMBOL(dev_base_lock);
203
204 static inline void dev_base_seq_inc(struct net *net)
205 {
206 while (++net->dev_base_seq == 0);
207 }
208
209 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
210 {
211 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
212
213 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
214 }
215
216 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
217 {
218 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
219 }
220
221 static inline void rps_lock(struct softnet_data *sd)
222 {
223 #ifdef CONFIG_RPS
224 spin_lock(&sd->input_pkt_queue.lock);
225 #endif
226 }
227
228 static inline void rps_unlock(struct softnet_data *sd)
229 {
230 #ifdef CONFIG_RPS
231 spin_unlock(&sd->input_pkt_queue.lock);
232 #endif
233 }
234
235 /* Device list insertion */
236 static int list_netdevice(struct net_device *dev)
237 {
238 struct net *net = dev_net(dev);
239
240 ASSERT_RTNL();
241
242 write_lock_bh(&dev_base_lock);
243 list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
244 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
245 hlist_add_head_rcu(&dev->index_hlist,
246 dev_index_hash(net, dev->ifindex));
247 write_unlock_bh(&dev_base_lock);
248
249 dev_base_seq_inc(net);
250
251 return 0;
252 }
253
254 /* Device list removal
255 * caller must respect a RCU grace period before freeing/reusing dev
256 */
257 static void unlist_netdevice(struct net_device *dev)
258 {
259 ASSERT_RTNL();
260
261 /* Unlink dev from the device chain */
262 write_lock_bh(&dev_base_lock);
263 list_del_rcu(&dev->dev_list);
264 hlist_del_rcu(&dev->name_hlist);
265 hlist_del_rcu(&dev->index_hlist);
266 write_unlock_bh(&dev_base_lock);
267
268 dev_base_seq_inc(dev_net(dev));
269 }
270
271 /*
272 * Our notifier list
273 */
274
275 static RAW_NOTIFIER_HEAD(netdev_chain);
276
277 /*
278 * Device drivers call our routines to queue packets here. We empty the
279 * queue in the local softnet handler.
280 */
281
282 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
283 EXPORT_PER_CPU_SYMBOL(softnet_data);
284
285 #ifdef CONFIG_LOCKDEP
286 /*
287 * register_netdevice() inits txq->_xmit_lock and sets lockdep class
288 * according to dev->type
289 */
290 static const unsigned short netdev_lock_type[] =
291 {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
292 ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
293 ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
294 ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
295 ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
296 ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
297 ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
298 ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
299 ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
300 ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
301 ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
302 ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
303 ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
304 ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
305 ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
306 ARPHRD_VOID, ARPHRD_NONE};
307
308 static const char *const netdev_lock_name[] =
309 {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
310 "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
311 "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
312 "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
313 "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
314 "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
315 "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
316 "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
317 "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
318 "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
319 "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
320 "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
321 "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
322 "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
323 "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
324 "_xmit_VOID", "_xmit_NONE"};
325
326 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
327 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
328
329 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
330 {
331 int i;
332
333 for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
334 if (netdev_lock_type[i] == dev_type)
335 return i;
336 /* the last key is used by default */
337 return ARRAY_SIZE(netdev_lock_type) - 1;
338 }
339
340 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
341 unsigned short dev_type)
342 {
343 int i;
344
345 i = netdev_lock_pos(dev_type);
346 lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
347 netdev_lock_name[i]);
348 }
349
350 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
351 {
352 int i;
353
354 i = netdev_lock_pos(dev->type);
355 lockdep_set_class_and_name(&dev->addr_list_lock,
356 &netdev_addr_lock_key[i],
357 netdev_lock_name[i]);
358 }
359 #else
360 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
361 unsigned short dev_type)
362 {
363 }
364 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
365 {
366 }
367 #endif
368
369 /*******************************************************************************
370
371 Protocol management and registration routines
372
373 *******************************************************************************/
374
375 /*
376 * Add a protocol ID to the list. Now that the input handler is
377 * smarter we can dispense with all the messy stuff that used to be
378 * here.
379 *
380 * BEWARE!!! Protocol handlers, mangling input packets,
381 * MUST BE last in hash buckets and checking protocol handlers
382 * MUST start from promiscuous ptype_all chain in net_bh.
383 * It is true now, do not change it.
384 * Explanation follows: if protocol handler, mangling packet, will
385 * be the first on list, it is not able to sense, that packet
386 * is cloned and should be copied-on-write, so that it will
387 * change it and subsequent readers will get broken packet.
388 * --ANK (980803)
389 */
390
391 static inline struct list_head *ptype_head(const struct packet_type *pt)
392 {
393 if (pt->type == htons(ETH_P_ALL))
394 return &ptype_all;
395 else
396 return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
397 }
398
399 /**
400 * dev_add_pack - add packet handler
401 * @pt: packet type declaration
402 *
403 * Add a protocol handler to the networking stack. The passed &packet_type
404 * is linked into kernel lists and may not be freed until it has been
405 * removed from the kernel lists.
406 *
407 * This call does not sleep therefore it can not
408 * guarantee all CPU's that are in middle of receiving packets
409 * will see the new packet type (until the next received packet).
410 */
411
412 void dev_add_pack(struct packet_type *pt)
413 {
414 struct list_head *head = ptype_head(pt);
415
416 spin_lock(&ptype_lock);
417 list_add_rcu(&pt->list, head);
418 spin_unlock(&ptype_lock);
419 }
420 EXPORT_SYMBOL(dev_add_pack);
421
422 /**
423 * __dev_remove_pack - remove packet handler
424 * @pt: packet type declaration
425 *
426 * Remove a protocol handler that was previously added to the kernel
427 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
428 * from the kernel lists and can be freed or reused once this function
429 * returns.
430 *
431 * The packet type might still be in use by receivers
432 * and must not be freed until after all the CPU's have gone
433 * through a quiescent state.
434 */
435 void __dev_remove_pack(struct packet_type *pt)
436 {
437 struct list_head *head = ptype_head(pt);
438 struct packet_type *pt1;
439
440 spin_lock(&ptype_lock);
441
442 list_for_each_entry(pt1, head, list) {
443 if (pt == pt1) {
444 list_del_rcu(&pt->list);
445 goto out;
446 }
447 }
448
449 pr_warn("dev_remove_pack: %p not found\n", pt);
450 out:
451 spin_unlock(&ptype_lock);
452 }
453 EXPORT_SYMBOL(__dev_remove_pack);
454
455 /**
456 * dev_remove_pack - remove packet handler
457 * @pt: packet type declaration
458 *
459 * Remove a protocol handler that was previously added to the kernel
460 * protocol handlers by dev_add_pack(). The passed &packet_type is removed
461 * from the kernel lists and can be freed or reused once this function
462 * returns.
463 *
464 * This call sleeps to guarantee that no CPU is looking at the packet
465 * type after return.
466 */
467 void dev_remove_pack(struct packet_type *pt)
468 {
469 __dev_remove_pack(pt);
470
471 synchronize_net();
472 }
473 EXPORT_SYMBOL(dev_remove_pack);
474
475 /******************************************************************************
476
477 Device Boot-time Settings Routines
478
479 *******************************************************************************/
480
481 /* Boot time configuration table */
482 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
483
484 /**
485 * netdev_boot_setup_add - add new setup entry
486 * @name: name of the device
487 * @map: configured settings for the device
488 *
489 * Adds new setup entry to the dev_boot_setup list. The function
490 * returns 0 on error and 1 on success. This is a generic routine to
491 * all netdevices.
492 */
493 static int netdev_boot_setup_add(char *name, struct ifmap *map)
494 {
495 struct netdev_boot_setup *s;
496 int i;
497
498 s = dev_boot_setup;
499 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
500 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
501 memset(s[i].name, 0, sizeof(s[i].name));
502 strlcpy(s[i].name, name, IFNAMSIZ);
503 memcpy(&s[i].map, map, sizeof(s[i].map));
504 break;
505 }
506 }
507
508 return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
509 }
510
511 /**
512 * netdev_boot_setup_check - check boot time settings
513 * @dev: the netdevice
514 *
515 * Check boot time settings for the device.
516 * The found settings are set for the device to be used
517 * later in the device probing.
518 * Returns 0 if no settings found, 1 if they are.
519 */
520 int netdev_boot_setup_check(struct net_device *dev)
521 {
522 struct netdev_boot_setup *s = dev_boot_setup;
523 int i;
524
525 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
526 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
527 !strcmp(dev->name, s[i].name)) {
528 dev->irq = s[i].map.irq;
529 dev->base_addr = s[i].map.base_addr;
530 dev->mem_start = s[i].map.mem_start;
531 dev->mem_end = s[i].map.mem_end;
532 return 1;
533 }
534 }
535 return 0;
536 }
537 EXPORT_SYMBOL(netdev_boot_setup_check);
538
539
540 /**
541 * netdev_boot_base - get address from boot time settings
542 * @prefix: prefix for network device
543 * @unit: id for network device
544 *
545 * Check boot time settings for the base address of device.
546 * The found settings are set for the device to be used
547 * later in the device probing.
548 * Returns 0 if no settings found.
549 */
550 unsigned long netdev_boot_base(const char *prefix, int unit)
551 {
552 const struct netdev_boot_setup *s = dev_boot_setup;
553 char name[IFNAMSIZ];
554 int i;
555
556 sprintf(name, "%s%d", prefix, unit);
557
558 /*
559 * If device already registered then return base of 1
560 * to indicate not to probe for this interface
561 */
562 if (__dev_get_by_name(&init_net, name))
563 return 1;
564
565 for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
566 if (!strcmp(name, s[i].name))
567 return s[i].map.base_addr;
568 return 0;
569 }
570
571 /*
572 * Saves at boot time configured settings for any netdevice.
573 */
574 int __init netdev_boot_setup(char *str)
575 {
576 int ints[5];
577 struct ifmap map;
578
579 str = get_options(str, ARRAY_SIZE(ints), ints);
580 if (!str || !*str)
581 return 0;
582
583 /* Save settings */
584 memset(&map, 0, sizeof(map));
585 if (ints[0] > 0)
586 map.irq = ints[1];
587 if (ints[0] > 1)
588 map.base_addr = ints[2];
589 if (ints[0] > 2)
590 map.mem_start = ints[3];
591 if (ints[0] > 3)
592 map.mem_end = ints[4];
593
594 /* Add new entry to the list */
595 return netdev_boot_setup_add(str, &map);
596 }
597
598 __setup("netdev=", netdev_boot_setup);
599
600 /*******************************************************************************
601
602 Device Interface Subroutines
603
604 *******************************************************************************/
605
606 /**
607 * __dev_get_by_name - find a device by its name
608 * @net: the applicable net namespace
609 * @name: name to find
610 *
611 * Find an interface by name. Must be called under RTNL semaphore
612 * or @dev_base_lock. If the name is found a pointer to the device
613 * is returned. If the name is not found then %NULL is returned. The
614 * reference counters are not incremented so the caller must be
615 * careful with locks.
616 */
617
618 struct net_device *__dev_get_by_name(struct net *net, const char *name)
619 {
620 struct hlist_node *p;
621 struct net_device *dev;
622 struct hlist_head *head = dev_name_hash(net, name);
623
624 hlist_for_each_entry(dev, p, head, name_hlist)
625 if (!strncmp(dev->name, name, IFNAMSIZ))
626 return dev;
627
628 return NULL;
629 }
630 EXPORT_SYMBOL(__dev_get_by_name);
631
632 /**
633 * dev_get_by_name_rcu - find a device by its name
634 * @net: the applicable net namespace
635 * @name: name to find
636 *
637 * Find an interface by name.
638 * If the name is found a pointer to the device is returned.
639 * If the name is not found then %NULL is returned.
640 * The reference counters are not incremented so the caller must be
641 * careful with locks. The caller must hold RCU lock.
642 */
643
644 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
645 {
646 struct hlist_node *p;
647 struct net_device *dev;
648 struct hlist_head *head = dev_name_hash(net, name);
649
650 hlist_for_each_entry_rcu(dev, p, head, name_hlist)
651 if (!strncmp(dev->name, name, IFNAMSIZ))
652 return dev;
653
654 return NULL;
655 }
656 EXPORT_SYMBOL(dev_get_by_name_rcu);
657
658 /**
659 * dev_get_by_name - find a device by its name
660 * @net: the applicable net namespace
661 * @name: name to find
662 *
663 * Find an interface by name. This can be called from any
664 * context and does its own locking. The returned handle has
665 * the usage count incremented and the caller must use dev_put() to
666 * release it when it is no longer needed. %NULL is returned if no
667 * matching device is found.
668 */
669
670 struct net_device *dev_get_by_name(struct net *net, const char *name)
671 {
672 struct net_device *dev;
673
674 rcu_read_lock();
675 dev = dev_get_by_name_rcu(net, name);
676 if (dev)
677 dev_hold(dev);
678 rcu_read_unlock();
679 return dev;
680 }
681 EXPORT_SYMBOL(dev_get_by_name);
682
683 /**
684 * __dev_get_by_index - find a device by its ifindex
685 * @net: the applicable net namespace
686 * @ifindex: index of device
687 *
688 * Search for an interface by index. Returns %NULL if the device
689 * is not found or a pointer to the device. The device has not
690 * had its reference counter increased so the caller must be careful
691 * about locking. The caller must hold either the RTNL semaphore
692 * or @dev_base_lock.
693 */
694
695 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
696 {
697 struct hlist_node *p;
698 struct net_device *dev;
699 struct hlist_head *head = dev_index_hash(net, ifindex);
700
701 hlist_for_each_entry(dev, p, head, index_hlist)
702 if (dev->ifindex == ifindex)
703 return dev;
704
705 return NULL;
706 }
707 EXPORT_SYMBOL(__dev_get_by_index);
708
709 /**
710 * dev_get_by_index_rcu - find a device by its ifindex
711 * @net: the applicable net namespace
712 * @ifindex: index of device
713 *
714 * Search for an interface by index. Returns %NULL if the device
715 * is not found or a pointer to the device. The device has not
716 * had its reference counter increased so the caller must be careful
717 * about locking. The caller must hold RCU lock.
718 */
719
720 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
721 {
722 struct hlist_node *p;
723 struct net_device *dev;
724 struct hlist_head *head = dev_index_hash(net, ifindex);
725
726 hlist_for_each_entry_rcu(dev, p, head, index_hlist)
727 if (dev->ifindex == ifindex)
728 return dev;
729
730 return NULL;
731 }
732 EXPORT_SYMBOL(dev_get_by_index_rcu);
733
734
735 /**
736 * dev_get_by_index - find a device by its ifindex
737 * @net: the applicable net namespace
738 * @ifindex: index of device
739 *
740 * Search for an interface by index. Returns NULL if the device
741 * is not found or a pointer to the device. The device returned has
742 * had a reference added and the pointer is safe until the user calls
743 * dev_put to indicate they have finished with it.
744 */
745
746 struct net_device *dev_get_by_index(struct net *net, int ifindex)
747 {
748 struct net_device *dev;
749
750 rcu_read_lock();
751 dev = dev_get_by_index_rcu(net, ifindex);
752 if (dev)
753 dev_hold(dev);
754 rcu_read_unlock();
755 return dev;
756 }
757 EXPORT_SYMBOL(dev_get_by_index);
758
759 /**
760 * dev_getbyhwaddr_rcu - find a device by its hardware address
761 * @net: the applicable net namespace
762 * @type: media type of device
763 * @ha: hardware address
764 *
765 * Search for an interface by MAC address. Returns NULL if the device
766 * is not found or a pointer to the device.
767 * The caller must hold RCU or RTNL.
768 * The returned device has not had its ref count increased
769 * and the caller must therefore be careful about locking
770 *
771 */
772
773 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
774 const char *ha)
775 {
776 struct net_device *dev;
777
778 for_each_netdev_rcu(net, dev)
779 if (dev->type == type &&
780 !memcmp(dev->dev_addr, ha, dev->addr_len))
781 return dev;
782
783 return NULL;
784 }
785 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
786
787 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
788 {
789 struct net_device *dev;
790
791 ASSERT_RTNL();
792 for_each_netdev(net, dev)
793 if (dev->type == type)
794 return dev;
795
796 return NULL;
797 }
798 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
799
800 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
801 {
802 struct net_device *dev, *ret = NULL;
803
804 rcu_read_lock();
805 for_each_netdev_rcu(net, dev)
806 if (dev->type == type) {
807 dev_hold(dev);
808 ret = dev;
809 break;
810 }
811 rcu_read_unlock();
812 return ret;
813 }
814 EXPORT_SYMBOL(dev_getfirstbyhwtype);
815
816 /**
817 * dev_get_by_flags_rcu - find any device with given flags
818 * @net: the applicable net namespace
819 * @if_flags: IFF_* values
820 * @mask: bitmask of bits in if_flags to check
821 *
822 * Search for any interface with the given flags. Returns NULL if a device
823 * is not found or a pointer to the device. Must be called inside
824 * rcu_read_lock(), and result refcount is unchanged.
825 */
826
827 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short if_flags,
828 unsigned short mask)
829 {
830 struct net_device *dev, *ret;
831
832 ret = NULL;
833 for_each_netdev_rcu(net, dev) {
834 if (((dev->flags ^ if_flags) & mask) == 0) {
835 ret = dev;
836 break;
837 }
838 }
839 return ret;
840 }
841 EXPORT_SYMBOL(dev_get_by_flags_rcu);
842
843 /**
844 * dev_valid_name - check if name is okay for network device
845 * @name: name string
846 *
847 * Network device names need to be valid file names to
848 * to allow sysfs to work. We also disallow any kind of
849 * whitespace.
850 */
851 bool dev_valid_name(const char *name)
852 {
853 if (*name == '\0')
854 return false;
855 if (strlen(name) >= IFNAMSIZ)
856 return false;
857 if (!strcmp(name, ".") || !strcmp(name, ".."))
858 return false;
859
860 while (*name) {
861 if (*name == '/' || isspace(*name))
862 return false;
863 name++;
864 }
865 return true;
866 }
867 EXPORT_SYMBOL(dev_valid_name);
868
869 /**
870 * __dev_alloc_name - allocate a name for a device
871 * @net: network namespace to allocate the device name in
872 * @name: name format string
873 * @buf: scratch buffer and result name string
874 *
875 * Passed a format string - eg "lt%d" it will try and find a suitable
876 * id. It scans list of devices to build up a free map, then chooses
877 * the first empty slot. The caller must hold the dev_base or rtnl lock
878 * while allocating the name and adding the device in order to avoid
879 * duplicates.
880 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
881 * Returns the number of the unit assigned or a negative errno code.
882 */
883
884 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
885 {
886 int i = 0;
887 const char *p;
888 const int max_netdevices = 8*PAGE_SIZE;
889 unsigned long *inuse;
890 struct net_device *d;
891
892 p = strnchr(name, IFNAMSIZ-1, '%');
893 if (p) {
894 /*
895 * Verify the string as this thing may have come from
896 * the user. There must be either one "%d" and no other "%"
897 * characters.
898 */
899 if (p[1] != 'd' || strchr(p + 2, '%'))
900 return -EINVAL;
901
902 /* Use one page as a bit array of possible slots */
903 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
904 if (!inuse)
905 return -ENOMEM;
906
907 for_each_netdev(net, d) {
908 if (!sscanf(d->name, name, &i))
909 continue;
910 if (i < 0 || i >= max_netdevices)
911 continue;
912
913 /* avoid cases where sscanf is not exact inverse of printf */
914 snprintf(buf, IFNAMSIZ, name, i);
915 if (!strncmp(buf, d->name, IFNAMSIZ))
916 set_bit(i, inuse);
917 }
918
919 i = find_first_zero_bit(inuse, max_netdevices);
920 free_page((unsigned long) inuse);
921 }
922
923 if (buf != name)
924 snprintf(buf, IFNAMSIZ, name, i);
925 if (!__dev_get_by_name(net, buf))
926 return i;
927
928 /* It is possible to run out of possible slots
929 * when the name is long and there isn't enough space left
930 * for the digits, or if all bits are used.
931 */
932 return -ENFILE;
933 }
934
935 /**
936 * dev_alloc_name - allocate a name for a device
937 * @dev: device
938 * @name: name format string
939 *
940 * Passed a format string - eg "lt%d" it will try and find a suitable
941 * id. It scans list of devices to build up a free map, then chooses
942 * the first empty slot. The caller must hold the dev_base or rtnl lock
943 * while allocating the name and adding the device in order to avoid
944 * duplicates.
945 * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
946 * Returns the number of the unit assigned or a negative errno code.
947 */
948
949 int dev_alloc_name(struct net_device *dev, const char *name)
950 {
951 char buf[IFNAMSIZ];
952 struct net *net;
953 int ret;
954
955 BUG_ON(!dev_net(dev));
956 net = dev_net(dev);
957 ret = __dev_alloc_name(net, name, buf);
958 if (ret >= 0)
959 strlcpy(dev->name, buf, IFNAMSIZ);
960 return ret;
961 }
962 EXPORT_SYMBOL(dev_alloc_name);
963
964 static int dev_get_valid_name(struct net_device *dev, const char *name)
965 {
966 struct net *net;
967
968 BUG_ON(!dev_net(dev));
969 net = dev_net(dev);
970
971 if (!dev_valid_name(name))
972 return -EINVAL;
973
974 if (strchr(name, '%'))
975 return dev_alloc_name(dev, name);
976 else if (__dev_get_by_name(net, name))
977 return -EEXIST;
978 else if (dev->name != name)
979 strlcpy(dev->name, name, IFNAMSIZ);
980
981 return 0;
982 }
983
984 /**
985 * dev_change_name - change name of a device
986 * @dev: device
987 * @newname: name (or format string) must be at least IFNAMSIZ
988 *
989 * Change name of a device, can pass format strings "eth%d".
990 * for wildcarding.
991 */
992 int dev_change_name(struct net_device *dev, const char *newname)
993 {
994 char oldname[IFNAMSIZ];
995 int err = 0;
996 int ret;
997 struct net *net;
998
999 ASSERT_RTNL();
1000 BUG_ON(!dev_net(dev));
1001
1002 net = dev_net(dev);
1003 if (dev->flags & IFF_UP)
1004 return -EBUSY;
1005
1006 if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
1007 return 0;
1008
1009 memcpy(oldname, dev->name, IFNAMSIZ);
1010
1011 err = dev_get_valid_name(dev, newname);
1012 if (err < 0)
1013 return err;
1014
1015 rollback:
1016 ret = device_rename(&dev->dev, dev->name);
1017 if (ret) {
1018 memcpy(dev->name, oldname, IFNAMSIZ);
1019 return ret;
1020 }
1021
1022 write_lock_bh(&dev_base_lock);
1023 hlist_del_rcu(&dev->name_hlist);
1024 write_unlock_bh(&dev_base_lock);
1025
1026 synchronize_rcu();
1027
1028 write_lock_bh(&dev_base_lock);
1029 hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1030 write_unlock_bh(&dev_base_lock);
1031
1032 ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1033 ret = notifier_to_errno(ret);
1034
1035 if (ret) {
1036 /* err >= 0 after dev_alloc_name() or stores the first errno */
1037 if (err >= 0) {
1038 err = ret;
1039 memcpy(dev->name, oldname, IFNAMSIZ);
1040 goto rollback;
1041 } else {
1042 pr_err("%s: name change rollback failed: %d\n",
1043 dev->name, ret);
1044 }
1045 }
1046
1047 return err;
1048 }
1049
1050 /**
1051 * dev_set_alias - change ifalias of a device
1052 * @dev: device
1053 * @alias: name up to IFALIASZ
1054 * @len: limit of bytes to copy from info
1055 *
1056 * Set ifalias for a device,
1057 */
1058 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1059 {
1060 ASSERT_RTNL();
1061
1062 if (len >= IFALIASZ)
1063 return -EINVAL;
1064
1065 if (!len) {
1066 if (dev->ifalias) {
1067 kfree(dev->ifalias);
1068 dev->ifalias = NULL;
1069 }
1070 return 0;
1071 }
1072
1073 dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1074 if (!dev->ifalias)
1075 return -ENOMEM;
1076
1077 strlcpy(dev->ifalias, alias, len+1);
1078 return len;
1079 }
1080
1081
1082 /**
1083 * netdev_features_change - device changes features
1084 * @dev: device to cause notification
1085 *
1086 * Called to indicate a device has changed features.
1087 */
1088 void netdev_features_change(struct net_device *dev)
1089 {
1090 call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1091 }
1092 EXPORT_SYMBOL(netdev_features_change);
1093
1094 /**
1095 * netdev_state_change - device changes state
1096 * @dev: device to cause notification
1097 *
1098 * Called to indicate a device has changed state. This function calls
1099 * the notifier chains for netdev_chain and sends a NEWLINK message
1100 * to the routing socket.
1101 */
1102 void netdev_state_change(struct net_device *dev)
1103 {
1104 if (dev->flags & IFF_UP) {
1105 call_netdevice_notifiers(NETDEV_CHANGE, dev);
1106 rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
1107 }
1108 }
1109 EXPORT_SYMBOL(netdev_state_change);
1110
1111 int netdev_bonding_change(struct net_device *dev, unsigned long event)
1112 {
1113 return call_netdevice_notifiers(event, dev);
1114 }
1115 EXPORT_SYMBOL(netdev_bonding_change);
1116
1117 /**
1118 * dev_load - load a network module
1119 * @net: the applicable net namespace
1120 * @name: name of interface
1121 *
1122 * If a network interface is not present and the process has suitable
1123 * privileges this function loads the module. If module loading is not
1124 * available in this kernel then it becomes a nop.
1125 */
1126
1127 void dev_load(struct net *net, const char *name)
1128 {
1129 struct net_device *dev;
1130 int no_module;
1131
1132 rcu_read_lock();
1133 dev = dev_get_by_name_rcu(net, name);
1134 rcu_read_unlock();
1135
1136 no_module = !dev;
1137 if (no_module && capable(CAP_NET_ADMIN))
1138 no_module = request_module("netdev-%s", name);
1139 if (no_module && capable(CAP_SYS_MODULE)) {
1140 if (!request_module("%s", name))
1141 pr_err("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
1142 name);
1143 }
1144 }
1145 EXPORT_SYMBOL(dev_load);
1146
1147 static int __dev_open(struct net_device *dev)
1148 {
1149 const struct net_device_ops *ops = dev->netdev_ops;
1150 int ret;
1151
1152 ASSERT_RTNL();
1153
1154 if (!netif_device_present(dev))
1155 return -ENODEV;
1156
1157 ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1158 ret = notifier_to_errno(ret);
1159 if (ret)
1160 return ret;
1161
1162 set_bit(__LINK_STATE_START, &dev->state);
1163
1164 if (ops->ndo_validate_addr)
1165 ret = ops->ndo_validate_addr(dev);
1166
1167 if (!ret && ops->ndo_open)
1168 ret = ops->ndo_open(dev);
1169
1170 if (ret)
1171 clear_bit(__LINK_STATE_START, &dev->state);
1172 else {
1173 dev->flags |= IFF_UP;
1174 net_dmaengine_get();
1175 dev_set_rx_mode(dev);
1176 dev_activate(dev);
1177 }
1178
1179 return ret;
1180 }
1181
1182 /**
1183 * dev_open - prepare an interface for use.
1184 * @dev: device to open
1185 *
1186 * Takes a device from down to up state. The device's private open
1187 * function is invoked and then the multicast lists are loaded. Finally
1188 * the device is moved into the up state and a %NETDEV_UP message is
1189 * sent to the netdev notifier chain.
1190 *
1191 * Calling this function on an active interface is a nop. On a failure
1192 * a negative errno code is returned.
1193 */
1194 int dev_open(struct net_device *dev)
1195 {
1196 int ret;
1197
1198 if (dev->flags & IFF_UP)
1199 return 0;
1200
1201 ret = __dev_open(dev);
1202 if (ret < 0)
1203 return ret;
1204
1205 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1206 call_netdevice_notifiers(NETDEV_UP, dev);
1207
1208 return ret;
1209 }
1210 EXPORT_SYMBOL(dev_open);
1211
1212 static int __dev_close_many(struct list_head *head)
1213 {
1214 struct net_device *dev;
1215
1216 ASSERT_RTNL();
1217 might_sleep();
1218
1219 list_for_each_entry(dev, head, unreg_list) {
1220 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1221
1222 clear_bit(__LINK_STATE_START, &dev->state);
1223
1224 /* Synchronize to scheduled poll. We cannot touch poll list, it
1225 * can be even on different cpu. So just clear netif_running().
1226 *
1227 * dev->stop() will invoke napi_disable() on all of it's
1228 * napi_struct instances on this device.
1229 */
1230 smp_mb__after_clear_bit(); /* Commit netif_running(). */
1231 }
1232
1233 dev_deactivate_many(head);
1234
1235 list_for_each_entry(dev, head, unreg_list) {
1236 const struct net_device_ops *ops = dev->netdev_ops;
1237
1238 /*
1239 * Call the device specific close. This cannot fail.
1240 * Only if device is UP
1241 *
1242 * We allow it to be called even after a DETACH hot-plug
1243 * event.
1244 */
1245 if (ops->ndo_stop)
1246 ops->ndo_stop(dev);
1247
1248 dev->flags &= ~IFF_UP;
1249 net_dmaengine_put();
1250 }
1251
1252 return 0;
1253 }
1254
1255 static int __dev_close(struct net_device *dev)
1256 {
1257 int retval;
1258 LIST_HEAD(single);
1259
1260 list_add(&dev->unreg_list, &single);
1261 retval = __dev_close_many(&single);
1262 list_del(&single);
1263 return retval;
1264 }
1265
1266 static int dev_close_many(struct list_head *head)
1267 {
1268 struct net_device *dev, *tmp;
1269 LIST_HEAD(tmp_list);
1270
1271 list_for_each_entry_safe(dev, tmp, head, unreg_list)
1272 if (!(dev->flags & IFF_UP))
1273 list_move(&dev->unreg_list, &tmp_list);
1274
1275 __dev_close_many(head);
1276
1277 list_for_each_entry(dev, head, unreg_list) {
1278 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
1279 call_netdevice_notifiers(NETDEV_DOWN, dev);
1280 }
1281
1282 /* rollback_registered_many needs the complete original list */
1283 list_splice(&tmp_list, head);
1284 return 0;
1285 }
1286
1287 /**
1288 * dev_close - shutdown an interface.
1289 * @dev: device to shutdown
1290 *
1291 * This function moves an active device into down state. A
1292 * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1293 * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1294 * chain.
1295 */
1296 int dev_close(struct net_device *dev)
1297 {
1298 if (dev->flags & IFF_UP) {
1299 LIST_HEAD(single);
1300
1301 list_add(&dev->unreg_list, &single);
1302 dev_close_many(&single);
1303 list_del(&single);
1304 }
1305 return 0;
1306 }
1307 EXPORT_SYMBOL(dev_close);
1308
1309
1310 /**
1311 * dev_disable_lro - disable Large Receive Offload on a device
1312 * @dev: device
1313 *
1314 * Disable Large Receive Offload (LRO) on a net device. Must be
1315 * called under RTNL. This is needed if received packets may be
1316 * forwarded to another interface.
1317 */
1318 void dev_disable_lro(struct net_device *dev)
1319 {
1320 /*
1321 * If we're trying to disable lro on a vlan device
1322 * use the underlying physical device instead
1323 */
1324 if (is_vlan_dev(dev))
1325 dev = vlan_dev_real_dev(dev);
1326
1327 dev->wanted_features &= ~NETIF_F_LRO;
1328 netdev_update_features(dev);
1329
1330 if (unlikely(dev->features & NETIF_F_LRO))
1331 netdev_WARN(dev, "failed to disable LRO!\n");
1332 }
1333 EXPORT_SYMBOL(dev_disable_lro);
1334
1335
1336 static int dev_boot_phase = 1;
1337
1338 /**
1339 * register_netdevice_notifier - register a network notifier block
1340 * @nb: notifier
1341 *
1342 * Register a notifier to be called when network device events occur.
1343 * The notifier passed is linked into the kernel structures and must
1344 * not be reused until it has been unregistered. A negative errno code
1345 * is returned on a failure.
1346 *
1347 * When registered all registration and up events are replayed
1348 * to the new notifier to allow device to have a race free
1349 * view of the network device list.
1350 */
1351
1352 int register_netdevice_notifier(struct notifier_block *nb)
1353 {
1354 struct net_device *dev;
1355 struct net_device *last;
1356 struct net *net;
1357 int err;
1358
1359 rtnl_lock();
1360 err = raw_notifier_chain_register(&netdev_chain, nb);
1361 if (err)
1362 goto unlock;
1363 if (dev_boot_phase)
1364 goto unlock;
1365 for_each_net(net) {
1366 for_each_netdev(net, dev) {
1367 err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
1368 err = notifier_to_errno(err);
1369 if (err)
1370 goto rollback;
1371
1372 if (!(dev->flags & IFF_UP))
1373 continue;
1374
1375 nb->notifier_call(nb, NETDEV_UP, dev);
1376 }
1377 }
1378
1379 unlock:
1380 rtnl_unlock();
1381 return err;
1382
1383 rollback:
1384 last = dev;
1385 for_each_net(net) {
1386 for_each_netdev(net, dev) {
1387 if (dev == last)
1388 goto outroll;
1389
1390 if (dev->flags & IFF_UP) {
1391 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1392 nb->notifier_call(nb, NETDEV_DOWN, dev);
1393 }
1394 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1395 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1396 }
1397 }
1398
1399 outroll:
1400 raw_notifier_chain_unregister(&netdev_chain, nb);
1401 goto unlock;
1402 }
1403 EXPORT_SYMBOL(register_netdevice_notifier);
1404
1405 /**
1406 * unregister_netdevice_notifier - unregister a network notifier block
1407 * @nb: notifier
1408 *
1409 * Unregister a notifier previously registered by
1410 * register_netdevice_notifier(). The notifier is unlinked into the
1411 * kernel structures and may then be reused. A negative errno code
1412 * is returned on a failure.
1413 *
1414 * After unregistering unregister and down device events are synthesized
1415 * for all devices on the device list to the removed notifier to remove
1416 * the need for special case cleanup code.
1417 */
1418
1419 int unregister_netdevice_notifier(struct notifier_block *nb)
1420 {
1421 struct net_device *dev;
1422 struct net *net;
1423 int err;
1424
1425 rtnl_lock();
1426 err = raw_notifier_chain_unregister(&netdev_chain, nb);
1427 if (err)
1428 goto unlock;
1429
1430 for_each_net(net) {
1431 for_each_netdev(net, dev) {
1432 if (dev->flags & IFF_UP) {
1433 nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
1434 nb->notifier_call(nb, NETDEV_DOWN, dev);
1435 }
1436 nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
1437 nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
1438 }
1439 }
1440 unlock:
1441 rtnl_unlock();
1442 return err;
1443 }
1444 EXPORT_SYMBOL(unregister_netdevice_notifier);
1445
1446 /**
1447 * call_netdevice_notifiers - call all network notifier blocks
1448 * @val: value passed unmodified to notifier function
1449 * @dev: net_device pointer passed unmodified to notifier function
1450 *
1451 * Call all network notifier blocks. Parameters and return value
1452 * are as for raw_notifier_call_chain().
1453 */
1454
1455 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1456 {
1457 ASSERT_RTNL();
1458 return raw_notifier_call_chain(&netdev_chain, val, dev);
1459 }
1460 EXPORT_SYMBOL(call_netdevice_notifiers);
1461
1462 static struct static_key netstamp_needed __read_mostly;
1463 #ifdef HAVE_JUMP_LABEL
1464 /* We are not allowed to call static_key_slow_dec() from irq context
1465 * If net_disable_timestamp() is called from irq context, defer the
1466 * static_key_slow_dec() calls.
1467 */
1468 static atomic_t netstamp_needed_deferred;
1469 #endif
1470
1471 void net_enable_timestamp(void)
1472 {
1473 #ifdef HAVE_JUMP_LABEL
1474 int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1475
1476 if (deferred) {
1477 while (--deferred)
1478 static_key_slow_dec(&netstamp_needed);
1479 return;
1480 }
1481 #endif
1482 WARN_ON(in_interrupt());
1483 static_key_slow_inc(&netstamp_needed);
1484 }
1485 EXPORT_SYMBOL(net_enable_timestamp);
1486
1487 void net_disable_timestamp(void)
1488 {
1489 #ifdef HAVE_JUMP_LABEL
1490 if (in_interrupt()) {
1491 atomic_inc(&netstamp_needed_deferred);
1492 return;
1493 }
1494 #endif
1495 static_key_slow_dec(&netstamp_needed);
1496 }
1497 EXPORT_SYMBOL(net_disable_timestamp);
1498
1499 static inline void net_timestamp_set(struct sk_buff *skb)
1500 {
1501 skb->tstamp.tv64 = 0;
1502 if (static_key_false(&netstamp_needed))
1503 __net_timestamp(skb);
1504 }
1505
1506 #define net_timestamp_check(COND, SKB) \
1507 if (static_key_false(&netstamp_needed)) { \
1508 if ((COND) && !(SKB)->tstamp.tv64) \
1509 __net_timestamp(SKB); \
1510 } \
1511
1512 static int net_hwtstamp_validate(struct ifreq *ifr)
1513 {
1514 struct hwtstamp_config cfg;
1515 enum hwtstamp_tx_types tx_type;
1516 enum hwtstamp_rx_filters rx_filter;
1517 int tx_type_valid = 0;
1518 int rx_filter_valid = 0;
1519
1520 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
1521 return -EFAULT;
1522
1523 if (cfg.flags) /* reserved for future extensions */
1524 return -EINVAL;
1525
1526 tx_type = cfg.tx_type;
1527 rx_filter = cfg.rx_filter;
1528
1529 switch (tx_type) {
1530 case HWTSTAMP_TX_OFF:
1531 case HWTSTAMP_TX_ON:
1532 case HWTSTAMP_TX_ONESTEP_SYNC:
1533 tx_type_valid = 1;
1534 break;
1535 }
1536
1537 switch (rx_filter) {
1538 case HWTSTAMP_FILTER_NONE:
1539 case HWTSTAMP_FILTER_ALL:
1540 case HWTSTAMP_FILTER_SOME:
1541 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1542 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1543 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1544 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1545 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1546 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1547 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1548 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1549 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1550 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1551 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1552 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1553 rx_filter_valid = 1;
1554 break;
1555 }
1556
1557 if (!tx_type_valid || !rx_filter_valid)
1558 return -ERANGE;
1559
1560 return 0;
1561 }
1562
1563 static inline bool is_skb_forwardable(struct net_device *dev,
1564 struct sk_buff *skb)
1565 {
1566 unsigned int len;
1567
1568 if (!(dev->flags & IFF_UP))
1569 return false;
1570
1571 len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1572 if (skb->len <= len)
1573 return true;
1574
1575 /* if TSO is enabled, we don't care about the length as the packet
1576 * could be forwarded without being segmented before
1577 */
1578 if (skb_is_gso(skb))
1579 return true;
1580
1581 return false;
1582 }
1583
1584 /**
1585 * dev_forward_skb - loopback an skb to another netif
1586 *
1587 * @dev: destination network device
1588 * @skb: buffer to forward
1589 *
1590 * return values:
1591 * NET_RX_SUCCESS (no congestion)
1592 * NET_RX_DROP (packet was dropped, but freed)
1593 *
1594 * dev_forward_skb can be used for injecting an skb from the
1595 * start_xmit function of one device into the receive queue
1596 * of another device.
1597 *
1598 * The receiving device may be in another namespace, so
1599 * we have to clear all information in the skb that could
1600 * impact namespace isolation.
1601 */
1602 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1603 {
1604 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
1605 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
1606 atomic_long_inc(&dev->rx_dropped);
1607 kfree_skb(skb);
1608 return NET_RX_DROP;
1609 }
1610 }
1611
1612 skb_orphan(skb);
1613 nf_reset(skb);
1614
1615 if (unlikely(!is_skb_forwardable(dev, skb))) {
1616 atomic_long_inc(&dev->rx_dropped);
1617 kfree_skb(skb);
1618 return NET_RX_DROP;
1619 }
1620 skb->skb_iif = 0;
1621 skb_set_dev(skb, dev);
1622 skb->tstamp.tv64 = 0;
1623 skb->pkt_type = PACKET_HOST;
1624 skb->protocol = eth_type_trans(skb, dev);
1625 return netif_rx(skb);
1626 }
1627 EXPORT_SYMBOL_GPL(dev_forward_skb);
1628
1629 static inline int deliver_skb(struct sk_buff *skb,
1630 struct packet_type *pt_prev,
1631 struct net_device *orig_dev)
1632 {
1633 atomic_inc(&skb->users);
1634 return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1635 }
1636
1637 /*
1638 * Support routine. Sends outgoing frames to any network
1639 * taps currently in use.
1640 */
1641
1642 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1643 {
1644 struct packet_type *ptype;
1645 struct sk_buff *skb2 = NULL;
1646 struct packet_type *pt_prev = NULL;
1647
1648 rcu_read_lock();
1649 list_for_each_entry_rcu(ptype, &ptype_all, list) {
1650 /* Never send packets back to the socket
1651 * they originated from - MvS (miquels@drinkel.ow.org)
1652 */
1653 if ((ptype->dev == dev || !ptype->dev) &&
1654 (ptype->af_packet_priv == NULL ||
1655 (struct sock *)ptype->af_packet_priv != skb->sk)) {
1656 if (pt_prev) {
1657 deliver_skb(skb2, pt_prev, skb->dev);
1658 pt_prev = ptype;
1659 continue;
1660 }
1661
1662 skb2 = skb_clone(skb, GFP_ATOMIC);
1663 if (!skb2)
1664 break;
1665
1666 net_timestamp_set(skb2);
1667
1668 /* skb->nh should be correctly
1669 set by sender, so that the second statement is
1670 just protection against buggy protocols.
1671 */
1672 skb_reset_mac_header(skb2);
1673
1674 if (skb_network_header(skb2) < skb2->data ||
1675 skb2->network_header > skb2->tail) {
1676 net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1677 ntohs(skb2->protocol),
1678 dev->name);
1679 skb_reset_network_header(skb2);
1680 }
1681
1682 skb2->transport_header = skb2->network_header;
1683 skb2->pkt_type = PACKET_OUTGOING;
1684 pt_prev = ptype;
1685 }
1686 }
1687 if (pt_prev)
1688 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1689 rcu_read_unlock();
1690 }
1691
1692 /* netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1693 * @dev: Network device
1694 * @txq: number of queues available
1695 *
1696 * If real_num_tx_queues is changed the tc mappings may no longer be
1697 * valid. To resolve this verify the tc mapping remains valid and if
1698 * not NULL the mapping. With no priorities mapping to this
1699 * offset/count pair it will no longer be used. In the worst case TC0
1700 * is invalid nothing can be done so disable priority mappings. If is
1701 * expected that drivers will fix this mapping if they can before
1702 * calling netif_set_real_num_tx_queues.
1703 */
1704 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1705 {
1706 int i;
1707 struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1708
1709 /* If TC0 is invalidated disable TC mapping */
1710 if (tc->offset + tc->count > txq) {
1711 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1712 dev->num_tc = 0;
1713 return;
1714 }
1715
1716 /* Invalidated prio to tc mappings set to TC0 */
1717 for (i = 1; i < TC_BITMASK + 1; i++) {
1718 int q = netdev_get_prio_tc_map(dev, i);
1719
1720 tc = &dev->tc_to_txq[q];
1721 if (tc->offset + tc->count > txq) {
1722 pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1723 i, q);
1724 netdev_set_prio_tc_map(dev, i, 0);
1725 }
1726 }
1727 }
1728
1729 /*
1730 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1731 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1732 */
1733 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1734 {
1735 int rc;
1736
1737 if (txq < 1 || txq > dev->num_tx_queues)
1738 return -EINVAL;
1739
1740 if (dev->reg_state == NETREG_REGISTERED ||
1741 dev->reg_state == NETREG_UNREGISTERING) {
1742 ASSERT_RTNL();
1743
1744 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
1745 txq);
1746 if (rc)
1747 return rc;
1748
1749 if (dev->num_tc)
1750 netif_setup_tc(dev, txq);
1751
1752 if (txq < dev->real_num_tx_queues)
1753 qdisc_reset_all_tx_gt(dev, txq);
1754 }
1755
1756 dev->real_num_tx_queues = txq;
1757 return 0;
1758 }
1759 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1760
1761 #ifdef CONFIG_RPS
1762 /**
1763 * netif_set_real_num_rx_queues - set actual number of RX queues used
1764 * @dev: Network device
1765 * @rxq: Actual number of RX queues
1766 *
1767 * This must be called either with the rtnl_lock held or before
1768 * registration of the net device. Returns 0 on success, or a
1769 * negative error code. If called before registration, it always
1770 * succeeds.
1771 */
1772 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
1773 {
1774 int rc;
1775
1776 if (rxq < 1 || rxq > dev->num_rx_queues)
1777 return -EINVAL;
1778
1779 if (dev->reg_state == NETREG_REGISTERED) {
1780 ASSERT_RTNL();
1781
1782 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
1783 rxq);
1784 if (rc)
1785 return rc;
1786 }
1787
1788 dev->real_num_rx_queues = rxq;
1789 return 0;
1790 }
1791 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
1792 #endif
1793
1794 static inline void __netif_reschedule(struct Qdisc *q)
1795 {
1796 struct softnet_data *sd;
1797 unsigned long flags;
1798
1799 local_irq_save(flags);
1800 sd = &__get_cpu_var(softnet_data);
1801 q->next_sched = NULL;
1802 *sd->output_queue_tailp = q;
1803 sd->output_queue_tailp = &q->next_sched;
1804 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1805 local_irq_restore(flags);
1806 }
1807
1808 void __netif_schedule(struct Qdisc *q)
1809 {
1810 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
1811 __netif_reschedule(q);
1812 }
1813 EXPORT_SYMBOL(__netif_schedule);
1814
1815 void dev_kfree_skb_irq(struct sk_buff *skb)
1816 {
1817 if (atomic_dec_and_test(&skb->users)) {
1818 struct softnet_data *sd;
1819 unsigned long flags;
1820
1821 local_irq_save(flags);
1822 sd = &__get_cpu_var(softnet_data);
1823 skb->next = sd->completion_queue;
1824 sd->completion_queue = skb;
1825 raise_softirq_irqoff(NET_TX_SOFTIRQ);
1826 local_irq_restore(flags);
1827 }
1828 }
1829 EXPORT_SYMBOL(dev_kfree_skb_irq);
1830
1831 void dev_kfree_skb_any(struct sk_buff *skb)
1832 {
1833 if (in_irq() || irqs_disabled())
1834 dev_kfree_skb_irq(skb);
1835 else
1836 dev_kfree_skb(skb);
1837 }
1838 EXPORT_SYMBOL(dev_kfree_skb_any);
1839
1840
1841 /**
1842 * netif_device_detach - mark device as removed
1843 * @dev: network device
1844 *
1845 * Mark device as removed from system and therefore no longer available.
1846 */
1847 void netif_device_detach(struct net_device *dev)
1848 {
1849 if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
1850 netif_running(dev)) {
1851 netif_tx_stop_all_queues(dev);
1852 }
1853 }
1854 EXPORT_SYMBOL(netif_device_detach);
1855
1856 /**
1857 * netif_device_attach - mark device as attached
1858 * @dev: network device
1859 *
1860 * Mark device as attached from system and restart if needed.
1861 */
1862 void netif_device_attach(struct net_device *dev)
1863 {
1864 if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
1865 netif_running(dev)) {
1866 netif_tx_wake_all_queues(dev);
1867 __netdev_watchdog_up(dev);
1868 }
1869 }
1870 EXPORT_SYMBOL(netif_device_attach);
1871
1872 /**
1873 * skb_dev_set -- assign a new device to a buffer
1874 * @skb: buffer for the new device
1875 * @dev: network device
1876 *
1877 * If an skb is owned by a device already, we have to reset
1878 * all data private to the namespace a device belongs to
1879 * before assigning it a new device.
1880 */
1881 #ifdef CONFIG_NET_NS
1882 void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1883 {
1884 skb_dst_drop(skb);
1885 if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) {
1886 secpath_reset(skb);
1887 nf_reset(skb);
1888 skb_init_secmark(skb);
1889 skb->mark = 0;
1890 skb->priority = 0;
1891 skb->nf_trace = 0;
1892 skb->ipvs_property = 0;
1893 #ifdef CONFIG_NET_SCHED
1894 skb->tc_index = 0;
1895 #endif
1896 }
1897 skb->dev = dev;
1898 }
1899 EXPORT_SYMBOL(skb_set_dev);
1900 #endif /* CONFIG_NET_NS */
1901
1902 static void skb_warn_bad_offload(const struct sk_buff *skb)
1903 {
1904 static const netdev_features_t null_features = 0;
1905 struct net_device *dev = skb->dev;
1906 const char *driver = "";
1907
1908 if (dev && dev->dev.parent)
1909 driver = dev_driver_string(dev->dev.parent);
1910
1911 WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
1912 "gso_type=%d ip_summed=%d\n",
1913 driver, dev ? &dev->features : &null_features,
1914 skb->sk ? &skb->sk->sk_route_caps : &null_features,
1915 skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
1916 skb_shinfo(skb)->gso_type, skb->ip_summed);
1917 }
1918
1919 /*
1920 * Invalidate hardware checksum when packet is to be mangled, and
1921 * complete checksum manually on outgoing path.
1922 */
1923 int skb_checksum_help(struct sk_buff *skb)
1924 {
1925 __wsum csum;
1926 int ret = 0, offset;
1927
1928 if (skb->ip_summed == CHECKSUM_COMPLETE)
1929 goto out_set_summed;
1930
1931 if (unlikely(skb_shinfo(skb)->gso_size)) {
1932 skb_warn_bad_offload(skb);
1933 return -EINVAL;
1934 }
1935
1936 offset = skb_checksum_start_offset(skb);
1937 BUG_ON(offset >= skb_headlen(skb));
1938 csum = skb_checksum(skb, offset, skb->len - offset, 0);
1939
1940 offset += skb->csum_offset;
1941 BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
1942
1943 if (skb_cloned(skb) &&
1944 !skb_clone_writable(skb, offset + sizeof(__sum16))) {
1945 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1946 if (ret)
1947 goto out;
1948 }
1949
1950 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
1951 out_set_summed:
1952 skb->ip_summed = CHECKSUM_NONE;
1953 out:
1954 return ret;
1955 }
1956 EXPORT_SYMBOL(skb_checksum_help);
1957
1958 /**
1959 * skb_gso_segment - Perform segmentation on skb.
1960 * @skb: buffer to segment
1961 * @features: features for the output path (see dev->features)
1962 *
1963 * This function segments the given skb and returns a list of segments.
1964 *
1965 * It may return NULL if the skb requires no segmentation. This is
1966 * only possible when GSO is used for verifying header integrity.
1967 */
1968 struct sk_buff *skb_gso_segment(struct sk_buff *skb,
1969 netdev_features_t features)
1970 {
1971 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
1972 struct packet_type *ptype;
1973 __be16 type = skb->protocol;
1974 int vlan_depth = ETH_HLEN;
1975 int err;
1976
1977 while (type == htons(ETH_P_8021Q)) {
1978 struct vlan_hdr *vh;
1979
1980 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
1981 return ERR_PTR(-EINVAL);
1982
1983 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
1984 type = vh->h_vlan_encapsulated_proto;
1985 vlan_depth += VLAN_HLEN;
1986 }
1987
1988 skb_reset_mac_header(skb);
1989 skb->mac_len = skb->network_header - skb->mac_header;
1990 __skb_pull(skb, skb->mac_len);
1991
1992 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
1993 skb_warn_bad_offload(skb);
1994
1995 if (skb_header_cloned(skb) &&
1996 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1997 return ERR_PTR(err);
1998 }
1999
2000 rcu_read_lock();
2001 list_for_each_entry_rcu(ptype,
2002 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2003 if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
2004 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
2005 err = ptype->gso_send_check(skb);
2006 segs = ERR_PTR(err);
2007 if (err || skb_gso_ok(skb, features))
2008 break;
2009 __skb_push(skb, (skb->data -
2010 skb_network_header(skb)));
2011 }
2012 segs = ptype->gso_segment(skb, features);
2013 break;
2014 }
2015 }
2016 rcu_read_unlock();
2017
2018 __skb_push(skb, skb->data - skb_mac_header(skb));
2019
2020 return segs;
2021 }
2022 EXPORT_SYMBOL(skb_gso_segment);
2023
2024 /* Take action when hardware reception checksum errors are detected. */
2025 #ifdef CONFIG_BUG
2026 void netdev_rx_csum_fault(struct net_device *dev)
2027 {
2028 if (net_ratelimit()) {
2029 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2030 dump_stack();
2031 }
2032 }
2033 EXPORT_SYMBOL(netdev_rx_csum_fault);
2034 #endif
2035
2036 /* Actually, we should eliminate this check as soon as we know, that:
2037 * 1. IOMMU is present and allows to map all the memory.
2038 * 2. No high memory really exists on this machine.
2039 */
2040
2041 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2042 {
2043 #ifdef CONFIG_HIGHMEM
2044 int i;
2045 if (!(dev->features & NETIF_F_HIGHDMA)) {
2046 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2047 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2048 if (PageHighMem(skb_frag_page(frag)))
2049 return 1;
2050 }
2051 }
2052
2053 if (PCI_DMA_BUS_IS_PHYS) {
2054 struct device *pdev = dev->dev.parent;
2055
2056 if (!pdev)
2057 return 0;
2058 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2059 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2060 dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2061 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2062 return 1;
2063 }
2064 }
2065 #endif
2066 return 0;
2067 }
2068
2069 struct dev_gso_cb {
2070 void (*destructor)(struct sk_buff *skb);
2071 };
2072
2073 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
2074
2075 static void dev_gso_skb_destructor(struct sk_buff *skb)
2076 {
2077 struct dev_gso_cb *cb;
2078
2079 do {
2080 struct sk_buff *nskb = skb->next;
2081
2082 skb->next = nskb->next;
2083 nskb->next = NULL;
2084 kfree_skb(nskb);
2085 } while (skb->next);
2086
2087 cb = DEV_GSO_CB(skb);
2088 if (cb->destructor)
2089 cb->destructor(skb);
2090 }
2091
2092 /**
2093 * dev_gso_segment - Perform emulated hardware segmentation on skb.
2094 * @skb: buffer to segment
2095 * @features: device features as applicable to this skb
2096 *
2097 * This function segments the given skb and stores the list of segments
2098 * in skb->next.
2099 */
2100 static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2101 {
2102 struct sk_buff *segs;
2103
2104 segs = skb_gso_segment(skb, features);
2105
2106 /* Verifying header integrity only. */
2107 if (!segs)
2108 return 0;
2109
2110 if (IS_ERR(segs))
2111 return PTR_ERR(segs);
2112
2113 skb->next = segs;
2114 DEV_GSO_CB(skb)->destructor = skb->destructor;
2115 skb->destructor = dev_gso_skb_destructor;
2116
2117 return 0;
2118 }
2119
2120 /*
2121 * Try to orphan skb early, right before transmission by the device.
2122 * We cannot orphan skb if tx timestamp is requested or the sk-reference
2123 * is needed on driver level for other reasons, e.g. see net/can/raw.c
2124 */
2125 static inline void skb_orphan_try(struct sk_buff *skb)
2126 {
2127 struct sock *sk = skb->sk;
2128
2129 if (sk && !skb_shinfo(skb)->tx_flags) {
2130 /* skb_tx_hash() wont be able to get sk.
2131 * We copy sk_hash into skb->rxhash
2132 */
2133 if (!skb->rxhash)
2134 skb->rxhash = sk->sk_hash;
2135 skb_orphan(skb);
2136 }
2137 }
2138
2139 static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2140 {
2141 return ((features & NETIF_F_GEN_CSUM) ||
2142 ((features & NETIF_F_V4_CSUM) &&
2143 protocol == htons(ETH_P_IP)) ||
2144 ((features & NETIF_F_V6_CSUM) &&
2145 protocol == htons(ETH_P_IPV6)) ||
2146 ((features & NETIF_F_FCOE_CRC) &&
2147 protocol == htons(ETH_P_FCOE)));
2148 }
2149
2150 static netdev_features_t harmonize_features(struct sk_buff *skb,
2151 __be16 protocol, netdev_features_t features)
2152 {
2153 if (!can_checksum_protocol(features, protocol)) {
2154 features &= ~NETIF_F_ALL_CSUM;
2155 features &= ~NETIF_F_SG;
2156 } else if (illegal_highdma(skb->dev, skb)) {
2157 features &= ~NETIF_F_SG;
2158 }
2159
2160 return features;
2161 }
2162
2163 netdev_features_t netif_skb_features(struct sk_buff *skb)
2164 {
2165 __be16 protocol = skb->protocol;
2166 netdev_features_t features = skb->dev->features;
2167
2168 if (protocol == htons(ETH_P_8021Q)) {
2169 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2170 protocol = veh->h_vlan_encapsulated_proto;
2171 } else if (!vlan_tx_tag_present(skb)) {
2172 return harmonize_features(skb, protocol, features);
2173 }
2174
2175 features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_TX);
2176
2177 if (protocol != htons(ETH_P_8021Q)) {
2178 return harmonize_features(skb, protocol, features);
2179 } else {
2180 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2181 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_TX;
2182 return harmonize_features(skb, protocol, features);
2183 }
2184 }
2185 EXPORT_SYMBOL(netif_skb_features);
2186
2187 /*
2188 * Returns true if either:
2189 * 1. skb has frag_list and the device doesn't support FRAGLIST, or
2190 * 2. skb is fragmented and the device does not support SG, or if
2191 * at least one of fragments is in highmem and device does not
2192 * support DMA from it.
2193 */
2194 static inline int skb_needs_linearize(struct sk_buff *skb,
2195 int features)
2196 {
2197 return skb_is_nonlinear(skb) &&
2198 ((skb_has_frag_list(skb) &&
2199 !(features & NETIF_F_FRAGLIST)) ||
2200 (skb_shinfo(skb)->nr_frags &&
2201 !(features & NETIF_F_SG)));
2202 }
2203
2204 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2205 struct netdev_queue *txq)
2206 {
2207 const struct net_device_ops *ops = dev->netdev_ops;
2208 int rc = NETDEV_TX_OK;
2209 unsigned int skb_len;
2210
2211 if (likely(!skb->next)) {
2212 netdev_features_t features;
2213
2214 /*
2215 * If device doesn't need skb->dst, release it right now while
2216 * its hot in this cpu cache
2217 */
2218 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2219 skb_dst_drop(skb);
2220
2221 if (!list_empty(&ptype_all))
2222 dev_queue_xmit_nit(skb, dev);
2223
2224 skb_orphan_try(skb);
2225
2226 features = netif_skb_features(skb);
2227
2228 if (vlan_tx_tag_present(skb) &&
2229 !(features & NETIF_F_HW_VLAN_TX)) {
2230 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2231 if (unlikely(!skb))
2232 goto out;
2233
2234 skb->vlan_tci = 0;
2235 }
2236
2237 if (netif_needs_gso(skb, features)) {
2238 if (unlikely(dev_gso_segment(skb, features)))
2239 goto out_kfree_skb;
2240 if (skb->next)
2241 goto gso;
2242 } else {
2243 if (skb_needs_linearize(skb, features) &&
2244 __skb_linearize(skb))
2245 goto out_kfree_skb;
2246
2247 /* If packet is not checksummed and device does not
2248 * support checksumming for this protocol, complete
2249 * checksumming here.
2250 */
2251 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2252 skb_set_transport_header(skb,
2253 skb_checksum_start_offset(skb));
2254 if (!(features & NETIF_F_ALL_CSUM) &&
2255 skb_checksum_help(skb))
2256 goto out_kfree_skb;
2257 }
2258 }
2259
2260 skb_len = skb->len;
2261 rc = ops->ndo_start_xmit(skb, dev);
2262 trace_net_dev_xmit(skb, rc, dev, skb_len);
2263 if (rc == NETDEV_TX_OK)
2264 txq_trans_update(txq);
2265 return rc;
2266 }
2267
2268 gso:
2269 do {
2270 struct sk_buff *nskb = skb->next;
2271
2272 skb->next = nskb->next;
2273 nskb->next = NULL;
2274
2275 /*
2276 * If device doesn't need nskb->dst, release it right now while
2277 * its hot in this cpu cache
2278 */
2279 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2280 skb_dst_drop(nskb);
2281
2282 skb_len = nskb->len;
2283 rc = ops->ndo_start_xmit(nskb, dev);
2284 trace_net_dev_xmit(nskb, rc, dev, skb_len);
2285 if (unlikely(rc != NETDEV_TX_OK)) {
2286 if (rc & ~NETDEV_TX_MASK)
2287 goto out_kfree_gso_skb;
2288 nskb->next = skb->next;
2289 skb->next = nskb;
2290 return rc;
2291 }
2292 txq_trans_update(txq);
2293 if (unlikely(netif_xmit_stopped(txq) && skb->next))
2294 return NETDEV_TX_BUSY;
2295 } while (skb->next);
2296
2297 out_kfree_gso_skb:
2298 if (likely(skb->next == NULL))
2299 skb->destructor = DEV_GSO_CB(skb)->destructor;
2300 out_kfree_skb:
2301 kfree_skb(skb);
2302 out:
2303 return rc;
2304 }
2305
2306 static u32 hashrnd __read_mostly;
2307
2308 /*
2309 * Returns a Tx hash based on the given packet descriptor a Tx queues' number
2310 * to be used as a distribution range.
2311 */
2312 u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
2313 unsigned int num_tx_queues)
2314 {
2315 u32 hash;
2316 u16 qoffset = 0;
2317 u16 qcount = num_tx_queues;
2318
2319 if (skb_rx_queue_recorded(skb)) {
2320 hash = skb_get_rx_queue(skb);
2321 while (unlikely(hash >= num_tx_queues))
2322 hash -= num_tx_queues;
2323 return hash;
2324 }
2325
2326 if (dev->num_tc) {
2327 u8 tc = netdev_get_prio_tc_map(dev, skb->priority);
2328 qoffset = dev->tc_to_txq[tc].offset;
2329 qcount = dev->tc_to_txq[tc].count;
2330 }
2331
2332 if (skb->sk && skb->sk->sk_hash)
2333 hash = skb->sk->sk_hash;
2334 else
2335 hash = (__force u16) skb->protocol ^ skb->rxhash;
2336 hash = jhash_1word(hash, hashrnd);
2337
2338 return (u16) (((u64) hash * qcount) >> 32) + qoffset;
2339 }
2340 EXPORT_SYMBOL(__skb_tx_hash);
2341
2342 static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
2343 {
2344 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
2345 net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
2346 dev->name, queue_index,
2347 dev->real_num_tx_queues);
2348 return 0;
2349 }
2350 return queue_index;
2351 }
2352
2353 static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
2354 {
2355 #ifdef CONFIG_XPS
2356 struct xps_dev_maps *dev_maps;
2357 struct xps_map *map;
2358 int queue_index = -1;
2359
2360 rcu_read_lock();
2361 dev_maps = rcu_dereference(dev->xps_maps);
2362 if (dev_maps) {
2363 map = rcu_dereference(
2364 dev_maps->cpu_map[raw_smp_processor_id()]);
2365 if (map) {
2366 if (map->len == 1)
2367 queue_index = map->queues[0];
2368 else {
2369 u32 hash;
2370 if (skb->sk && skb->sk->sk_hash)
2371 hash = skb->sk->sk_hash;
2372 else
2373 hash = (__force u16) skb->protocol ^
2374 skb->rxhash;
2375 hash = jhash_1word(hash, hashrnd);
2376 queue_index = map->queues[
2377 ((u64)hash * map->len) >> 32];
2378 }
2379 if (unlikely(queue_index >= dev->real_num_tx_queues))
2380 queue_index = -1;
2381 }
2382 }
2383 rcu_read_unlock();
2384
2385 return queue_index;
2386 #else
2387 return -1;
2388 #endif
2389 }
2390
2391 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
2392 struct sk_buff *skb)
2393 {
2394 int queue_index;
2395 const struct net_device_ops *ops = dev->netdev_ops;
2396
2397 if (dev->real_num_tx_queues == 1)
2398 queue_index = 0;
2399 else if (ops->ndo_select_queue) {
2400 queue_index = ops->ndo_select_queue(dev, skb);
2401 queue_index = dev_cap_txqueue(dev, queue_index);
2402 } else {
2403 struct sock *sk = skb->sk;
2404 queue_index = sk_tx_queue_get(sk);
2405
2406 if (queue_index < 0 || skb->ooo_okay ||
2407 queue_index >= dev->real_num_tx_queues) {
2408 int old_index = queue_index;
2409
2410 queue_index = get_xps_queue(dev, skb);
2411 if (queue_index < 0)
2412 queue_index = skb_tx_hash(dev, skb);
2413
2414 if (queue_index != old_index && sk) {
2415 struct dst_entry *dst =
2416 rcu_dereference_check(sk->sk_dst_cache, 1);
2417
2418 if (dst && skb_dst(skb) == dst)
2419 sk_tx_queue_set(sk, queue_index);
2420 }
2421 }
2422 }
2423
2424 skb_set_queue_mapping(skb, queue_index);
2425 return netdev_get_tx_queue(dev, queue_index);
2426 }
2427
2428 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2429 struct net_device *dev,
2430 struct netdev_queue *txq)
2431 {
2432 spinlock_t *root_lock = qdisc_lock(q);
2433 bool contended;
2434 int rc;
2435
2436 qdisc_skb_cb(skb)->pkt_len = skb->len;
2437 qdisc_calculate_pkt_len(skb, q);
2438 /*
2439 * Heuristic to force contended enqueues to serialize on a
2440 * separate lock before trying to get qdisc main lock.
2441 * This permits __QDISC_STATE_RUNNING owner to get the lock more often
2442 * and dequeue packets faster.
2443 */
2444 contended = qdisc_is_running(q);
2445 if (unlikely(contended))
2446 spin_lock(&q->busylock);
2447
2448 spin_lock(root_lock);
2449 if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2450 kfree_skb(skb);
2451 rc = NET_XMIT_DROP;
2452 } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2453 qdisc_run_begin(q)) {
2454 /*
2455 * This is a work-conserving queue; there are no old skbs
2456 * waiting to be sent out; and the qdisc is not running -
2457 * xmit the skb directly.
2458 */
2459 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2460 skb_dst_force(skb);
2461
2462 qdisc_bstats_update(q, skb);
2463
2464 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2465 if (unlikely(contended)) {
2466 spin_unlock(&q->busylock);
2467 contended = false;
2468 }
2469 __qdisc_run(q);
2470 } else
2471 qdisc_run_end(q);
2472
2473 rc = NET_XMIT_SUCCESS;
2474 } else {
2475 skb_dst_force(skb);
2476 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2477 if (qdisc_run_begin(q)) {
2478 if (unlikely(contended)) {
2479 spin_unlock(&q->busylock);
2480 contended = false;
2481 }
2482 __qdisc_run(q);
2483 }
2484 }
2485 spin_unlock(root_lock);
2486 if (unlikely(contended))
2487 spin_unlock(&q->busylock);
2488 return rc;
2489 }
2490
2491 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
2492 static void skb_update_prio(struct sk_buff *skb)
2493 {
2494 struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2495
2496 if ((!skb->priority) && (skb->sk) && map)
2497 skb->priority = map->priomap[skb->sk->sk_cgrp_prioidx];
2498 }
2499 #else
2500 #define skb_update_prio(skb)
2501 #endif
2502
2503 static DEFINE_PER_CPU(int, xmit_recursion);
2504 #define RECURSION_LIMIT 10
2505
2506 /**
2507 * dev_queue_xmit - transmit a buffer
2508 * @skb: buffer to transmit
2509 *
2510 * Queue a buffer for transmission to a network device. The caller must
2511 * have set the device and priority and built the buffer before calling
2512 * this function. The function can be called from an interrupt.
2513 *
2514 * A negative errno code is returned on a failure. A success does not
2515 * guarantee the frame will be transmitted as it may be dropped due
2516 * to congestion or traffic shaping.
2517 *
2518 * -----------------------------------------------------------------------------------
2519 * I notice this method can also return errors from the queue disciplines,
2520 * including NET_XMIT_DROP, which is a positive value. So, errors can also
2521 * be positive.
2522 *
2523 * Regardless of the return value, the skb is consumed, so it is currently
2524 * difficult to retry a send to this method. (You can bump the ref count
2525 * before sending to hold a reference for retry if you are careful.)
2526 *
2527 * When calling this method, interrupts MUST be enabled. This is because
2528 * the BH enable code must have IRQs enabled so that it will not deadlock.
2529 * --BLG
2530 */
2531 int dev_queue_xmit(struct sk_buff *skb)
2532 {
2533 struct net_device *dev = skb->dev;
2534 struct netdev_queue *txq;
2535 struct Qdisc *q;
2536 int rc = -ENOMEM;
2537
2538 /* Disable soft irqs for various locks below. Also
2539 * stops preemption for RCU.
2540 */
2541 rcu_read_lock_bh();
2542
2543 skb_update_prio(skb);
2544
2545 txq = dev_pick_tx(dev, skb);
2546 q = rcu_dereference_bh(txq->qdisc);
2547
2548 #ifdef CONFIG_NET_CLS_ACT
2549 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2550 #endif
2551 trace_net_dev_queue(skb);
2552 if (q->enqueue) {
2553 rc = __dev_xmit_skb(skb, q, dev, txq);
2554 goto out;
2555 }
2556
2557 /* The device has no queue. Common case for software devices:
2558 loopback, all the sorts of tunnels...
2559
2560 Really, it is unlikely that netif_tx_lock protection is necessary
2561 here. (f.e. loopback and IP tunnels are clean ignoring statistics
2562 counters.)
2563 However, it is possible, that they rely on protection
2564 made by us here.
2565
2566 Check this and shot the lock. It is not prone from deadlocks.
2567 Either shot noqueue qdisc, it is even simpler 8)
2568 */
2569 if (dev->flags & IFF_UP) {
2570 int cpu = smp_processor_id(); /* ok because BHs are off */
2571
2572 if (txq->xmit_lock_owner != cpu) {
2573
2574 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2575 goto recursion_alert;
2576
2577 HARD_TX_LOCK(dev, txq, cpu);
2578
2579 if (!netif_xmit_stopped(txq)) {
2580 __this_cpu_inc(xmit_recursion);
2581 rc = dev_hard_start_xmit(skb, dev, txq);
2582 __this_cpu_dec(xmit_recursion);
2583 if (dev_xmit_complete(rc)) {
2584 HARD_TX_UNLOCK(dev, txq);
2585 goto out;
2586 }
2587 }
2588 HARD_TX_UNLOCK(dev, txq);
2589 net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
2590 dev->name);
2591 } else {
2592 /* Recursion is detected! It is possible,
2593 * unfortunately
2594 */
2595 recursion_alert:
2596 net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
2597 dev->name);
2598 }
2599 }
2600
2601 rc = -ENETDOWN;
2602 rcu_read_unlock_bh();
2603
2604 kfree_skb(skb);
2605 return rc;
2606 out:
2607 rcu_read_unlock_bh();
2608 return rc;
2609 }
2610 EXPORT_SYMBOL(dev_queue_xmit);
2611
2612
2613 /*=======================================================================
2614 Receiver routines
2615 =======================================================================*/
2616
2617 int netdev_max_backlog __read_mostly = 1000;
2618 int netdev_tstamp_prequeue __read_mostly = 1;
2619 int netdev_budget __read_mostly = 300;
2620 int weight_p __read_mostly = 64; /* old backlog weight */
2621
2622 /* Called with irq disabled */
2623 static inline void ____napi_schedule(struct softnet_data *sd,
2624 struct napi_struct *napi)
2625 {
2626 list_add_tail(&napi->poll_list, &sd->poll_list);
2627 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2628 }
2629
2630 /*
2631 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2632 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2633 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2634 * if hash is a canonical 4-tuple hash over transport ports.
2635 */
2636 void __skb_get_rxhash(struct sk_buff *skb)
2637 {
2638 struct flow_keys keys;
2639 u32 hash;
2640
2641 if (!skb_flow_dissect(skb, &keys))
2642 return;
2643
2644 if (keys.ports) {
2645 if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
2646 swap(keys.port16[0], keys.port16[1]);
2647 skb->l4_rxhash = 1;
2648 }
2649
2650 /* get a consistent hash (same value on both flow directions) */
2651 if ((__force u32)keys.dst < (__force u32)keys.src)
2652 swap(keys.dst, keys.src);
2653
2654 hash = jhash_3words((__force u32)keys.dst,
2655 (__force u32)keys.src,
2656 (__force u32)keys.ports, hashrnd);
2657 if (!hash)
2658 hash = 1;
2659
2660 skb->rxhash = hash;
2661 }
2662 EXPORT_SYMBOL(__skb_get_rxhash);
2663
2664 #ifdef CONFIG_RPS
2665
2666 /* One global table that all flow-based protocols share. */
2667 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
2668 EXPORT_SYMBOL(rps_sock_flow_table);
2669
2670 struct static_key rps_needed __read_mostly;
2671
2672 static struct rps_dev_flow *
2673 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2674 struct rps_dev_flow *rflow, u16 next_cpu)
2675 {
2676 if (next_cpu != RPS_NO_CPU) {
2677 #ifdef CONFIG_RFS_ACCEL
2678 struct netdev_rx_queue *rxqueue;
2679 struct rps_dev_flow_table *flow_table;
2680 struct rps_dev_flow *old_rflow;
2681 u32 flow_id;
2682 u16 rxq_index;
2683 int rc;
2684
2685 /* Should we steer this flow to a different hardware queue? */
2686 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
2687 !(dev->features & NETIF_F_NTUPLE))
2688 goto out;
2689 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
2690 if (rxq_index == skb_get_rx_queue(skb))
2691 goto out;
2692
2693 rxqueue = dev->_rx + rxq_index;
2694 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2695 if (!flow_table)
2696 goto out;
2697 flow_id = skb->rxhash & flow_table->mask;
2698 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
2699 rxq_index, flow_id);
2700 if (rc < 0)
2701 goto out;
2702 old_rflow = rflow;
2703 rflow = &flow_table->flows[flow_id];
2704 rflow->filter = rc;
2705 if (old_rflow->filter == rflow->filter)
2706 old_rflow->filter = RPS_NO_FILTER;
2707 out:
2708 #endif
2709 rflow->last_qtail =
2710 per_cpu(softnet_data, next_cpu).input_queue_head;
2711 }
2712
2713 rflow->cpu = next_cpu;
2714 return rflow;
2715 }
2716
2717 /*
2718 * get_rps_cpu is called from netif_receive_skb and returns the target
2719 * CPU from the RPS map of the receiving queue for a given skb.
2720 * rcu_read_lock must be held on entry.
2721 */
2722 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2723 struct rps_dev_flow **rflowp)
2724 {
2725 struct netdev_rx_queue *rxqueue;
2726 struct rps_map *map;
2727 struct rps_dev_flow_table *flow_table;
2728 struct rps_sock_flow_table *sock_flow_table;
2729 int cpu = -1;
2730 u16 tcpu;
2731
2732 if (skb_rx_queue_recorded(skb)) {
2733 u16 index = skb_get_rx_queue(skb);
2734 if (unlikely(index >= dev->real_num_rx_queues)) {
2735 WARN_ONCE(dev->real_num_rx_queues > 1,
2736 "%s received packet on queue %u, but number "
2737 "of RX queues is %u\n",
2738 dev->name, index, dev->real_num_rx_queues);
2739 goto done;
2740 }
2741 rxqueue = dev->_rx + index;
2742 } else
2743 rxqueue = dev->_rx;
2744
2745 map = rcu_dereference(rxqueue->rps_map);
2746 if (map) {
2747 if (map->len == 1 &&
2748 !rcu_access_pointer(rxqueue->rps_flow_table)) {
2749 tcpu = map->cpus[0];
2750 if (cpu_online(tcpu))
2751 cpu = tcpu;
2752 goto done;
2753 }
2754 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2755 goto done;
2756 }
2757
2758 skb_reset_network_header(skb);
2759 if (!skb_get_rxhash(skb))
2760 goto done;
2761
2762 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2763 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2764 if (flow_table && sock_flow_table) {
2765 u16 next_cpu;
2766 struct rps_dev_flow *rflow;
2767
2768 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2769 tcpu = rflow->cpu;
2770
2771 next_cpu = sock_flow_table->ents[skb->rxhash &
2772 sock_flow_table->mask];
2773
2774 /*
2775 * If the desired CPU (where last recvmsg was done) is
2776 * different from current CPU (one in the rx-queue flow
2777 * table entry), switch if one of the following holds:
2778 * - Current CPU is unset (equal to RPS_NO_CPU).
2779 * - Current CPU is offline.
2780 * - The current CPU's queue tail has advanced beyond the
2781 * last packet that was enqueued using this table entry.
2782 * This guarantees that all previous packets for the flow
2783 * have been dequeued, thus preserving in order delivery.
2784 */
2785 if (unlikely(tcpu != next_cpu) &&
2786 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2787 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2788 rflow->last_qtail)) >= 0))
2789 rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
2790
2791 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2792 *rflowp = rflow;
2793 cpu = tcpu;
2794 goto done;
2795 }
2796 }
2797
2798 if (map) {
2799 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2800
2801 if (cpu_online(tcpu)) {
2802 cpu = tcpu;
2803 goto done;
2804 }
2805 }
2806
2807 done:
2808 return cpu;
2809 }
2810
2811 #ifdef CONFIG_RFS_ACCEL
2812
2813 /**
2814 * rps_may_expire_flow - check whether an RFS hardware filter may be removed
2815 * @dev: Device on which the filter was set
2816 * @rxq_index: RX queue index
2817 * @flow_id: Flow ID passed to ndo_rx_flow_steer()
2818 * @filter_id: Filter ID returned by ndo_rx_flow_steer()
2819 *
2820 * Drivers that implement ndo_rx_flow_steer() should periodically call
2821 * this function for each installed filter and remove the filters for
2822 * which it returns %true.
2823 */
2824 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
2825 u32 flow_id, u16 filter_id)
2826 {
2827 struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
2828 struct rps_dev_flow_table *flow_table;
2829 struct rps_dev_flow *rflow;
2830 bool expire = true;
2831 int cpu;
2832
2833 rcu_read_lock();
2834 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2835 if (flow_table && flow_id <= flow_table->mask) {
2836 rflow = &flow_table->flows[flow_id];
2837 cpu = ACCESS_ONCE(rflow->cpu);
2838 if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
2839 ((int)(per_cpu(softnet_data, cpu).input_queue_head -
2840 rflow->last_qtail) <
2841 (int)(10 * flow_table->mask)))
2842 expire = false;
2843 }
2844 rcu_read_unlock();
2845 return expire;
2846 }
2847 EXPORT_SYMBOL(rps_may_expire_flow);
2848
2849 #endif /* CONFIG_RFS_ACCEL */
2850
2851 /* Called from hardirq (IPI) context */
2852 static void rps_trigger_softirq(void *data)
2853 {
2854 struct softnet_data *sd = data;
2855
2856 ____napi_schedule(sd, &sd->backlog);
2857 sd->received_rps++;
2858 }
2859
2860 #endif /* CONFIG_RPS */
2861
2862 /*
2863 * Check if this softnet_data structure is another cpu one
2864 * If yes, queue it to our IPI list and return 1
2865 * If no, return 0
2866 */
2867 static int rps_ipi_queued(struct softnet_data *sd)
2868 {
2869 #ifdef CONFIG_RPS
2870 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2871
2872 if (sd != mysd) {
2873 sd->rps_ipi_next = mysd->rps_ipi_list;
2874 mysd->rps_ipi_list = sd;
2875
2876 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2877 return 1;
2878 }
2879 #endif /* CONFIG_RPS */
2880 return 0;
2881 }
2882
2883 /*
2884 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2885 * queue (may be a remote CPU queue).
2886 */
2887 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2888 unsigned int *qtail)
2889 {
2890 struct softnet_data *sd;
2891 unsigned long flags;
2892
2893 sd = &per_cpu(softnet_data, cpu);
2894
2895 local_irq_save(flags);
2896
2897 rps_lock(sd);
2898 if (skb_queue_len(&sd->input_pkt_queue) <= netdev_max_backlog) {
2899 if (skb_queue_len(&sd->input_pkt_queue)) {
2900 enqueue:
2901 __skb_queue_tail(&sd->input_pkt_queue, skb);
2902 input_queue_tail_incr_save(sd, qtail);
2903 rps_unlock(sd);
2904 local_irq_restore(flags);
2905 return NET_RX_SUCCESS;
2906 }
2907
2908 /* Schedule NAPI for backlog device
2909 * We can use non atomic operation since we own the queue lock
2910 */
2911 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
2912 if (!rps_ipi_queued(sd))
2913 ____napi_schedule(sd, &sd->backlog);
2914 }
2915 goto enqueue;
2916 }
2917
2918 sd->dropped++;
2919 rps_unlock(sd);
2920
2921 local_irq_restore(flags);
2922
2923 atomic_long_inc(&skb->dev->rx_dropped);
2924 kfree_skb(skb);
2925 return NET_RX_DROP;
2926 }
2927
2928 /**
2929 * netif_rx - post buffer to the network code
2930 * @skb: buffer to post
2931 *
2932 * This function receives a packet from a device driver and queues it for
2933 * the upper (protocol) levels to process. It always succeeds. The buffer
2934 * may be dropped during processing for congestion control or by the
2935 * protocol layers.
2936 *
2937 * return values:
2938 * NET_RX_SUCCESS (no congestion)
2939 * NET_RX_DROP (packet was dropped)
2940 *
2941 */
2942
2943 int netif_rx(struct sk_buff *skb)
2944 {
2945 int ret;
2946
2947 /* if netpoll wants it, pretend we never saw it */
2948 if (netpoll_rx(skb))
2949 return NET_RX_DROP;
2950
2951 net_timestamp_check(netdev_tstamp_prequeue, skb);
2952
2953 trace_netif_rx(skb);
2954 #ifdef CONFIG_RPS
2955 if (static_key_false(&rps_needed)) {
2956 struct rps_dev_flow voidflow, *rflow = &voidflow;
2957 int cpu;
2958
2959 preempt_disable();
2960 rcu_read_lock();
2961
2962 cpu = get_rps_cpu(skb->dev, skb, &rflow);
2963 if (cpu < 0)
2964 cpu = smp_processor_id();
2965
2966 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2967
2968 rcu_read_unlock();
2969 preempt_enable();
2970 } else
2971 #endif
2972 {
2973 unsigned int qtail;
2974 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2975 put_cpu();
2976 }
2977 return ret;
2978 }
2979 EXPORT_SYMBOL(netif_rx);
2980
2981 int netif_rx_ni(struct sk_buff *skb)
2982 {
2983 int err;
2984
2985 preempt_disable();
2986 err = netif_rx(skb);
2987 if (local_softirq_pending())
2988 do_softirq();
2989 preempt_enable();
2990
2991 return err;
2992 }
2993 EXPORT_SYMBOL(netif_rx_ni);
2994
2995 static void net_tx_action(struct softirq_action *h)
2996 {
2997 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2998
2999 if (sd->completion_queue) {
3000 struct sk_buff *clist;
3001
3002 local_irq_disable();
3003 clist = sd->completion_queue;
3004 sd->completion_queue = NULL;
3005 local_irq_enable();
3006
3007 while (clist) {
3008 struct sk_buff *skb = clist;
3009 clist = clist->next;
3010
3011 WARN_ON(atomic_read(&skb->users));
3012 trace_kfree_skb(skb, net_tx_action);
3013 __kfree_skb(skb);
3014 }
3015 }
3016
3017 if (sd->output_queue) {
3018 struct Qdisc *head;
3019
3020 local_irq_disable();
3021 head = sd->output_queue;
3022 sd->output_queue = NULL;
3023 sd->output_queue_tailp = &sd->output_queue;
3024 local_irq_enable();
3025
3026 while (head) {
3027 struct Qdisc *q = head;
3028 spinlock_t *root_lock;
3029
3030 head = head->next_sched;
3031
3032 root_lock = qdisc_lock(q);
3033 if (spin_trylock(root_lock)) {
3034 smp_mb__before_clear_bit();
3035 clear_bit(__QDISC_STATE_SCHED,
3036 &q->state);
3037 qdisc_run(q);
3038 spin_unlock(root_lock);
3039 } else {
3040 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3041 &q->state)) {
3042 __netif_reschedule(q);
3043 } else {
3044 smp_mb__before_clear_bit();
3045 clear_bit(__QDISC_STATE_SCHED,
3046 &q->state);
3047 }
3048 }
3049 }
3050 }
3051 }
3052
3053 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3054 (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3055 /* This hook is defined here for ATM LANE */
3056 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3057 unsigned char *addr) __read_mostly;
3058 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3059 #endif
3060
3061 #ifdef CONFIG_NET_CLS_ACT
3062 /* TODO: Maybe we should just force sch_ingress to be compiled in
3063 * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3064 * a compare and 2 stores extra right now if we dont have it on
3065 * but have CONFIG_NET_CLS_ACT
3066 * NOTE: This doesn't stop any functionality; if you dont have
3067 * the ingress scheduler, you just can't add policies on ingress.
3068 *
3069 */
3070 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3071 {
3072 struct net_device *dev = skb->dev;
3073 u32 ttl = G_TC_RTTL(skb->tc_verd);
3074 int result = TC_ACT_OK;
3075 struct Qdisc *q;
3076
3077 if (unlikely(MAX_RED_LOOP < ttl++)) {
3078 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3079 skb->skb_iif, dev->ifindex);
3080 return TC_ACT_SHOT;
3081 }
3082
3083 skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3084 skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3085
3086 q = rxq->qdisc;
3087 if (q != &noop_qdisc) {
3088 spin_lock(qdisc_lock(q));
3089 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3090 result = qdisc_enqueue_root(skb, q);
3091 spin_unlock(qdisc_lock(q));
3092 }
3093
3094 return result;
3095 }
3096
3097 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3098 struct packet_type **pt_prev,
3099 int *ret, struct net_device *orig_dev)
3100 {
3101 struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3102
3103 if (!rxq || rxq->qdisc == &noop_qdisc)
3104 goto out;
3105
3106 if (*pt_prev) {
3107 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3108 *pt_prev = NULL;
3109 }
3110
3111 switch (ing_filter(skb, rxq)) {
3112 case TC_ACT_SHOT:
3113 case TC_ACT_STOLEN:
3114 kfree_skb(skb);
3115 return NULL;
3116 }
3117
3118 out:
3119 skb->tc_verd = 0;
3120 return skb;
3121 }
3122 #endif
3123
3124 /**
3125 * netdev_rx_handler_register - register receive handler
3126 * @dev: device to register a handler for
3127 * @rx_handler: receive handler to register
3128 * @rx_handler_data: data pointer that is used by rx handler
3129 *
3130 * Register a receive hander for a device. This handler will then be
3131 * called from __netif_receive_skb. A negative errno code is returned
3132 * on a failure.
3133 *
3134 * The caller must hold the rtnl_mutex.
3135 *
3136 * For a general description of rx_handler, see enum rx_handler_result.
3137 */
3138 int netdev_rx_handler_register(struct net_device *dev,
3139 rx_handler_func_t *rx_handler,
3140 void *rx_handler_data)
3141 {
3142 ASSERT_RTNL();
3143
3144 if (dev->rx_handler)
3145 return -EBUSY;
3146
3147 rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3148 rcu_assign_pointer(dev->rx_handler, rx_handler);
3149
3150 return 0;
3151 }
3152 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3153
3154 /**
3155 * netdev_rx_handler_unregister - unregister receive handler
3156 * @dev: device to unregister a handler from
3157 *
3158 * Unregister a receive hander from a device.
3159 *
3160 * The caller must hold the rtnl_mutex.
3161 */
3162 void netdev_rx_handler_unregister(struct net_device *dev)
3163 {
3164
3165 ASSERT_RTNL();
3166 RCU_INIT_POINTER(dev->rx_handler, NULL);
3167 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3168 }
3169 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3170
3171 static int __netif_receive_skb(struct sk_buff *skb)
3172 {
3173 struct packet_type *ptype, *pt_prev;
3174 rx_handler_func_t *rx_handler;
3175 struct net_device *orig_dev;
3176 struct net_device *null_or_dev;
3177 bool deliver_exact = false;
3178 int ret = NET_RX_DROP;
3179 __be16 type;
3180
3181 net_timestamp_check(!netdev_tstamp_prequeue, skb);
3182
3183 trace_netif_receive_skb(skb);
3184
3185 /* if we've gotten here through NAPI, check netpoll */
3186 if (netpoll_receive_skb(skb))
3187 return NET_RX_DROP;
3188
3189 if (!skb->skb_iif)
3190 skb->skb_iif = skb->dev->ifindex;
3191 orig_dev = skb->dev;
3192
3193 skb_reset_network_header(skb);
3194 skb_reset_transport_header(skb);
3195 skb_reset_mac_len(skb);
3196
3197 pt_prev = NULL;
3198
3199 rcu_read_lock();
3200
3201 another_round:
3202
3203 __this_cpu_inc(softnet_data.processed);
3204
3205 if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
3206 skb = vlan_untag(skb);
3207 if (unlikely(!skb))
3208 goto out;
3209 }
3210
3211 #ifdef CONFIG_NET_CLS_ACT
3212 if (skb->tc_verd & TC_NCLS) {
3213 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3214 goto ncls;
3215 }
3216 #endif
3217
3218 list_for_each_entry_rcu(ptype, &ptype_all, list) {
3219 if (!ptype->dev || ptype->dev == skb->dev) {
3220 if (pt_prev)
3221 ret = deliver_skb(skb, pt_prev, orig_dev);
3222 pt_prev = ptype;
3223 }
3224 }
3225
3226 #ifdef CONFIG_NET_CLS_ACT
3227 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3228 if (!skb)
3229 goto out;
3230 ncls:
3231 #endif
3232
3233 rx_handler = rcu_dereference(skb->dev->rx_handler);
3234 if (vlan_tx_tag_present(skb)) {
3235 if (pt_prev) {
3236 ret = deliver_skb(skb, pt_prev, orig_dev);
3237 pt_prev = NULL;
3238 }
3239 if (vlan_do_receive(&skb, !rx_handler))
3240 goto another_round;
3241 else if (unlikely(!skb))
3242 goto out;
3243 }
3244
3245 if (rx_handler) {
3246 if (pt_prev) {
3247 ret = deliver_skb(skb, pt_prev, orig_dev);
3248 pt_prev = NULL;
3249 }
3250 switch (rx_handler(&skb)) {
3251 case RX_HANDLER_CONSUMED:
3252 goto out;
3253 case RX_HANDLER_ANOTHER:
3254 goto another_round;
3255 case RX_HANDLER_EXACT:
3256 deliver_exact = true;
3257 case RX_HANDLER_PASS:
3258 break;
3259 default:
3260 BUG();
3261 }
3262 }
3263
3264 /* deliver only exact match when indicated */
3265 null_or_dev = deliver_exact ? skb->dev : NULL;
3266
3267 type = skb->protocol;
3268 list_for_each_entry_rcu(ptype,
3269 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
3270 if (ptype->type == type &&
3271 (ptype->dev == null_or_dev || ptype->dev == skb->dev ||
3272 ptype->dev == orig_dev)) {
3273 if (pt_prev)
3274 ret = deliver_skb(skb, pt_prev, orig_dev);
3275 pt_prev = ptype;
3276 }
3277 }
3278
3279 if (pt_prev) {
3280 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3281 } else {
3282 atomic_long_inc(&skb->dev->rx_dropped);
3283 kfree_skb(skb);
3284 /* Jamal, now you will not able to escape explaining
3285 * me how you were going to use this. :-)
3286 */
3287 ret = NET_RX_DROP;
3288 }
3289
3290 out:
3291 rcu_read_unlock();
3292 return ret;
3293 }
3294
3295 /**
3296 * netif_receive_skb - process receive buffer from network
3297 * @skb: buffer to process
3298 *
3299 * netif_receive_skb() is the main receive data processing function.
3300 * It always succeeds. The buffer may be dropped during processing
3301 * for congestion control or by the protocol layers.
3302 *
3303 * This function may only be called from softirq context and interrupts
3304 * should be enabled.
3305 *
3306 * Return values (usually ignored):
3307 * NET_RX_SUCCESS: no congestion
3308 * NET_RX_DROP: packet was dropped
3309 */
3310 int netif_receive_skb(struct sk_buff *skb)
3311 {
3312 net_timestamp_check(netdev_tstamp_prequeue, skb);
3313
3314 if (skb_defer_rx_timestamp(skb))
3315 return NET_RX_SUCCESS;
3316
3317 #ifdef CONFIG_RPS
3318 if (static_key_false(&rps_needed)) {
3319 struct rps_dev_flow voidflow, *rflow = &voidflow;
3320 int cpu, ret;
3321
3322 rcu_read_lock();
3323
3324 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3325
3326 if (cpu >= 0) {
3327 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3328 rcu_read_unlock();
3329 return ret;
3330 }
3331 rcu_read_unlock();
3332 }
3333 #endif
3334 return __netif_receive_skb(skb);
3335 }
3336 EXPORT_SYMBOL(netif_receive_skb);
3337
3338 /* Network device is going away, flush any packets still pending
3339 * Called with irqs disabled.
3340 */
3341 static void flush_backlog(void *arg)
3342 {
3343 struct net_device *dev = arg;
3344 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3345 struct sk_buff *skb, *tmp;
3346
3347 rps_lock(sd);
3348 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3349 if (skb->dev == dev) {
3350 __skb_unlink(skb, &sd->input_pkt_queue);
3351 kfree_skb(skb);
3352 input_queue_head_incr(sd);
3353 }
3354 }
3355 rps_unlock(sd);
3356
3357 skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3358 if (skb->dev == dev) {
3359 __skb_unlink(skb, &sd->process_queue);
3360 kfree_skb(skb);
3361 input_queue_head_incr(sd);
3362 }
3363 }
3364 }
3365
3366 static int napi_gro_complete(struct sk_buff *skb)
3367 {
3368 struct packet_type *ptype;
3369 __be16 type = skb->protocol;
3370 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3371 int err = -ENOENT;
3372
3373 if (NAPI_GRO_CB(skb)->count == 1) {
3374 skb_shinfo(skb)->gso_size = 0;
3375 goto out;
3376 }
3377
3378 rcu_read_lock();
3379 list_for_each_entry_rcu(ptype, head, list) {
3380 if (ptype->type != type || ptype->dev || !ptype->gro_complete)
3381 continue;
3382
3383 err = ptype->gro_complete(skb);
3384 break;
3385 }
3386 rcu_read_unlock();
3387
3388 if (err) {
3389 WARN_ON(&ptype->list == head);
3390 kfree_skb(skb);
3391 return NET_RX_SUCCESS;
3392 }
3393
3394 out:
3395 return netif_receive_skb(skb);
3396 }
3397
3398 inline void napi_gro_flush(struct napi_struct *napi)
3399 {
3400 struct sk_buff *skb, *next;
3401
3402 for (skb = napi->gro_list; skb; skb = next) {
3403 next = skb->next;
3404 skb->next = NULL;
3405 napi_gro_complete(skb);
3406 }
3407
3408 napi->gro_count = 0;
3409 napi->gro_list = NULL;
3410 }
3411 EXPORT_SYMBOL(napi_gro_flush);
3412
3413 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3414 {
3415 struct sk_buff **pp = NULL;
3416 struct packet_type *ptype;
3417 __be16 type = skb->protocol;
3418 struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
3419 int same_flow;
3420 int mac_len;
3421 enum gro_result ret;
3422
3423 if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
3424 goto normal;
3425
3426 if (skb_is_gso(skb) || skb_has_frag_list(skb))
3427 goto normal;
3428
3429 rcu_read_lock();
3430 list_for_each_entry_rcu(ptype, head, list) {
3431 if (ptype->type != type || ptype->dev || !ptype->gro_receive)
3432 continue;
3433
3434 skb_set_network_header(skb, skb_gro_offset(skb));
3435 mac_len = skb->network_header - skb->mac_header;
3436 skb->mac_len = mac_len;
3437 NAPI_GRO_CB(skb)->same_flow = 0;
3438 NAPI_GRO_CB(skb)->flush = 0;
3439 NAPI_GRO_CB(skb)->free = 0;
3440
3441 pp = ptype->gro_receive(&napi->gro_list, skb);
3442 break;
3443 }
3444 rcu_read_unlock();
3445
3446 if (&ptype->list == head)
3447 goto normal;
3448
3449 same_flow = NAPI_GRO_CB(skb)->same_flow;
3450 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
3451
3452 if (pp) {
3453 struct sk_buff *nskb = *pp;
3454
3455 *pp = nskb->next;
3456 nskb->next = NULL;
3457 napi_gro_complete(nskb);
3458 napi->gro_count--;
3459 }
3460
3461 if (same_flow)
3462 goto ok;
3463
3464 if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
3465 goto normal;
3466
3467 napi->gro_count++;
3468 NAPI_GRO_CB(skb)->count = 1;
3469 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
3470 skb->next = napi->gro_list;
3471 napi->gro_list = skb;
3472 ret = GRO_HELD;
3473
3474 pull:
3475 if (skb_headlen(skb) < skb_gro_offset(skb)) {
3476 int grow = skb_gro_offset(skb) - skb_headlen(skb);
3477
3478 BUG_ON(skb->end - skb->tail < grow);
3479
3480 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
3481
3482 skb->tail += grow;
3483 skb->data_len -= grow;
3484
3485 skb_shinfo(skb)->frags[0].page_offset += grow;
3486 skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow);
3487
3488 if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) {
3489 skb_frag_unref(skb, 0);
3490 memmove(skb_shinfo(skb)->frags,
3491 skb_shinfo(skb)->frags + 1,
3492 --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
3493 }
3494 }
3495
3496 ok:
3497 return ret;
3498
3499 normal:
3500 ret = GRO_NORMAL;
3501 goto pull;
3502 }
3503 EXPORT_SYMBOL(dev_gro_receive);
3504
3505 static inline gro_result_t
3506 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3507 {
3508 struct sk_buff *p;
3509 unsigned int maclen = skb->dev->hard_header_len;
3510
3511 for (p = napi->gro_list; p; p = p->next) {
3512 unsigned long diffs;
3513
3514 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3515 diffs |= p->vlan_tci ^ skb->vlan_tci;
3516 if (maclen == ETH_HLEN)
3517 diffs |= compare_ether_header(skb_mac_header(p),
3518 skb_gro_mac_header(skb));
3519 else if (!diffs)
3520 diffs = memcmp(skb_mac_header(p),
3521 skb_gro_mac_header(skb),
3522 maclen);
3523 NAPI_GRO_CB(p)->same_flow = !diffs;
3524 NAPI_GRO_CB(p)->flush = 0;
3525 }
3526
3527 return dev_gro_receive(napi, skb);
3528 }
3529
3530 gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3531 {
3532 switch (ret) {
3533 case GRO_NORMAL:
3534 if (netif_receive_skb(skb))
3535 ret = GRO_DROP;
3536 break;
3537
3538 case GRO_DROP:
3539 kfree_skb(skb);
3540 break;
3541
3542 case GRO_MERGED_FREE:
3543 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3544 kmem_cache_free(skbuff_head_cache, skb);
3545 else
3546 __kfree_skb(skb);
3547 break;
3548
3549 case GRO_HELD:
3550 case GRO_MERGED:
3551 break;
3552 }
3553
3554 return ret;
3555 }
3556 EXPORT_SYMBOL(napi_skb_finish);
3557
3558 void skb_gro_reset_offset(struct sk_buff *skb)
3559 {
3560 NAPI_GRO_CB(skb)->data_offset = 0;
3561 NAPI_GRO_CB(skb)->frag0 = NULL;
3562 NAPI_GRO_CB(skb)->frag0_len = 0;
3563
3564 if (skb->mac_header == skb->tail &&
3565 !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) {
3566 NAPI_GRO_CB(skb)->frag0 =
3567 skb_frag_address(&skb_shinfo(skb)->frags[0]);
3568 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]);
3569 }
3570 }
3571 EXPORT_SYMBOL(skb_gro_reset_offset);
3572
3573 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
3574 {
3575 skb_gro_reset_offset(skb);
3576
3577 return napi_skb_finish(__napi_gro_receive(napi, skb), skb);
3578 }
3579 EXPORT_SYMBOL(napi_gro_receive);
3580
3581 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
3582 {
3583 __skb_pull(skb, skb_headlen(skb));
3584 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
3585 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
3586 skb->vlan_tci = 0;
3587 skb->dev = napi->dev;
3588 skb->skb_iif = 0;
3589
3590 napi->skb = skb;
3591 }
3592
3593 struct sk_buff *napi_get_frags(struct napi_struct *napi)
3594 {
3595 struct sk_buff *skb = napi->skb;
3596
3597 if (!skb) {
3598 skb = netdev_alloc_skb_ip_align(napi->dev, GRO_MAX_HEAD);
3599 if (skb)
3600 napi->skb = skb;
3601 }
3602 return skb;
3603 }
3604 EXPORT_SYMBOL(napi_get_frags);
3605
3606 gro_result_t napi_frags_finish(struct napi_struct *napi, struct sk_buff *skb,
3607 gro_result_t ret)
3608 {
3609 switch (ret) {
3610 case GRO_NORMAL:
3611 case GRO_HELD:
3612 skb->protocol = eth_type_trans(skb, skb->dev);
3613
3614 if (ret == GRO_HELD)
3615 skb_gro_pull(skb, -ETH_HLEN);
3616 else if (netif_receive_skb(skb))
3617 ret = GRO_DROP;
3618 break;
3619
3620 case GRO_DROP:
3621 case GRO_MERGED_FREE:
3622 napi_reuse_skb(napi, skb);
3623 break;
3624
3625 case GRO_MERGED:
3626 break;
3627 }
3628
3629 return ret;
3630 }
3631 EXPORT_SYMBOL(napi_frags_finish);
3632
3633 struct sk_buff *napi_frags_skb(struct napi_struct *napi)
3634 {
3635 struct sk_buff *skb = napi->skb;
3636 struct ethhdr *eth;
3637 unsigned int hlen;
3638 unsigned int off;
3639
3640 napi->skb = NULL;
3641
3642 skb_reset_mac_header(skb);
3643 skb_gro_reset_offset(skb);
3644
3645 off = skb_gro_offset(skb);
3646 hlen = off + sizeof(*eth);
3647 eth = skb_gro_header_fast(skb, off);
3648 if (skb_gro_header_hard(skb, hlen)) {
3649 eth = skb_gro_header_slow(skb, hlen, off);
3650 if (unlikely(!eth)) {
3651 napi_reuse_skb(napi, skb);
3652 skb = NULL;
3653 goto out;
3654 }
3655 }
3656
3657 skb_gro_pull(skb, sizeof(*eth));
3658
3659 /*
3660 * This works because the only protocols we care about don't require
3661 * special handling. We'll fix it up properly at the end.
3662 */
3663 skb->protocol = eth->h_proto;
3664
3665 out:
3666 return skb;
3667 }
3668 EXPORT_SYMBOL(napi_frags_skb);
3669
3670 gro_result_t napi_gro_frags(struct napi_struct *napi)
3671 {
3672 struct sk_buff *skb = napi_frags_skb(napi);
3673
3674 if (!skb)
3675 return GRO_DROP;
3676
3677 return napi_frags_finish(napi, skb, __napi_gro_receive(napi, skb));
3678 }
3679 EXPORT_SYMBOL(napi_gro_frags);
3680
3681 /*
3682 * net_rps_action sends any pending IPI's for rps.
3683 * Note: called with local irq disabled, but exits with local irq enabled.
3684 */
3685 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
3686 {
3687 #ifdef CONFIG_RPS
3688 struct softnet_data *remsd = sd->rps_ipi_list;
3689
3690 if (remsd) {
3691 sd->rps_ipi_list = NULL;
3692
3693 local_irq_enable();
3694
3695 /* Send pending IPI's to kick RPS processing on remote cpus. */
3696 while (remsd) {
3697 struct softnet_data *next = remsd->rps_ipi_next;
3698
3699 if (cpu_online(remsd->cpu))
3700 __smp_call_function_single(remsd->cpu,
3701 &remsd->csd, 0);
3702 remsd = next;
3703 }
3704 } else
3705 #endif
3706 local_irq_enable();
3707 }
3708
3709 static int process_backlog(struct napi_struct *napi, int quota)
3710 {
3711 int work = 0;
3712 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
3713
3714 #ifdef CONFIG_RPS
3715 /* Check if we have pending ipi, its better to send them now,
3716 * not waiting net_rx_action() end.
3717 */
3718 if (sd->rps_ipi_list) {
3719 local_irq_disable();
3720 net_rps_action_and_irq_enable(sd);
3721 }
3722 #endif
3723 napi->weight = weight_p;
3724 local_irq_disable();
3725 while (work < quota) {
3726 struct sk_buff *skb;
3727 unsigned int qlen;
3728
3729 while ((skb = __skb_dequeue(&sd->process_queue))) {
3730 local_irq_enable();
3731 __netif_receive_skb(skb);
3732 local_irq_disable();
3733 input_queue_head_incr(sd);
3734 if (++work >= quota) {
3735 local_irq_enable();
3736 return work;
3737 }
3738 }
3739
3740 rps_lock(sd);
3741 qlen = skb_queue_len(&sd->input_pkt_queue);
3742 if (qlen)
3743 skb_queue_splice_tail_init(&sd->input_pkt_queue,
3744 &sd->process_queue);
3745
3746 if (qlen < quota - work) {
3747 /*
3748 * Inline a custom version of __napi_complete().
3749 * only current cpu owns and manipulates this napi,
3750 * and NAPI_STATE_SCHED is the only possible flag set on backlog.
3751 * we can use a plain write instead of clear_bit(),
3752 * and we dont need an smp_mb() memory barrier.
3753 */
3754 list_del(&napi->poll_list);
3755 napi->state = 0;
3756
3757 quota = work + qlen;
3758 }
3759 rps_unlock(sd);
3760 }
3761 local_irq_enable();
3762
3763 return work;
3764 }
3765
3766 /**
3767 * __napi_schedule - schedule for receive
3768 * @n: entry to schedule
3769 *
3770 * The entry's receive function will be scheduled to run
3771 */
3772 void __napi_schedule(struct napi_struct *n)
3773 {
3774 unsigned long flags;
3775
3776 local_irq_save(flags);
3777 ____napi_schedule(&__get_cpu_var(softnet_data), n);
3778 local_irq_restore(flags);
3779 }
3780 EXPORT_SYMBOL(__napi_schedule);
3781
3782 void __napi_complete(struct napi_struct *n)
3783 {
3784 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
3785 BUG_ON(n->gro_list);
3786
3787 list_del(&n->poll_list);
3788 smp_mb__before_clear_bit();
3789 clear_bit(NAPI_STATE_SCHED, &n->state);
3790 }
3791 EXPORT_SYMBOL(__napi_complete);
3792
3793 void napi_complete(struct napi_struct *n)
3794 {
3795 unsigned long flags;
3796
3797 /*
3798 * don't let napi dequeue from the cpu poll list
3799 * just in case its running on a different cpu
3800 */
3801 if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
3802 return;
3803
3804 napi_gro_flush(n);
3805 local_irq_save(flags);
3806 __napi_complete(n);
3807 local_irq_restore(flags);
3808 }
3809 EXPORT_SYMBOL(napi_complete);
3810
3811 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
3812 int (*poll)(struct napi_struct *, int), int weight)
3813 {
3814 INIT_LIST_HEAD(&napi->poll_list);
3815 napi->gro_count = 0;
3816 napi->gro_list = NULL;
3817 napi->skb = NULL;
3818 napi->poll = poll;
3819 napi->weight = weight;
3820 list_add(&napi->dev_list, &dev->napi_list);
3821 napi->dev = dev;
3822 #ifdef CONFIG_NETPOLL
3823 spin_lock_init(&napi->poll_lock);
3824 napi->poll_owner = -1;
3825 #endif
3826 set_bit(NAPI_STATE_SCHED, &napi->state);
3827 }
3828 EXPORT_SYMBOL(netif_napi_add);
3829
3830 void netif_napi_del(struct napi_struct *napi)
3831 {
3832 struct sk_buff *skb, *next;
3833
3834 list_del_init(&napi->dev_list);
3835 napi_free_frags(napi);
3836
3837 for (skb = napi->gro_list; skb; skb = next) {
3838 next = skb->next;
3839 skb->next = NULL;
3840 kfree_skb(skb);
3841 }
3842
3843 napi->gro_list = NULL;
3844 napi->gro_count = 0;
3845 }
3846 EXPORT_SYMBOL(netif_napi_del);
3847
3848 static void net_rx_action(struct softirq_action *h)
3849 {
3850 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3851 unsigned long time_limit = jiffies + 2;
3852 int budget = netdev_budget;
3853 void *have;
3854
3855 local_irq_disable();
3856
3857 while (!list_empty(&sd->poll_list)) {
3858 struct napi_struct *n;
3859 int work, weight;
3860
3861 /* If softirq window is exhuasted then punt.
3862 * Allow this to run for 2 jiffies since which will allow
3863 * an average latency of 1.5/HZ.
3864 */
3865 if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
3866 goto softnet_break;
3867
3868 local_irq_enable();
3869
3870 /* Even though interrupts have been re-enabled, this
3871 * access is safe because interrupts can only add new
3872 * entries to the tail of this list, and only ->poll()
3873 * calls can remove this head entry from the list.
3874 */
3875 n = list_first_entry(&sd->poll_list, struct napi_struct, poll_list);
3876
3877 have = netpoll_poll_lock(n);
3878
3879 weight = n->weight;
3880
3881 /* This NAPI_STATE_SCHED test is for avoiding a race
3882 * with netpoll's poll_napi(). Only the entity which
3883 * obtains the lock and sees NAPI_STATE_SCHED set will
3884 * actually make the ->poll() call. Therefore we avoid
3885 * accidentally calling ->poll() when NAPI is not scheduled.
3886 */
3887 work = 0;
3888 if (test_bit(NAPI_STATE_SCHED, &n->state)) {
3889 work = n->poll(n, weight);
3890 trace_napi_poll(n);
3891 }
3892
3893 WARN_ON_ONCE(work > weight);
3894
3895 budget -= work;
3896
3897 local_irq_disable();
3898
3899 /* Drivers must not modify the NAPI state if they
3900 * consume the entire weight. In such cases this code
3901 * still "owns" the NAPI instance and therefore can
3902 * move the instance around on the list at-will.
3903 */
3904 if (unlikely(work == weight)) {
3905 if (unlikely(napi_disable_pending(n))) {
3906 local_irq_enable();
3907 napi_complete(n);
3908 local_irq_disable();
3909 } else
3910 list_move_tail(&n->poll_list, &sd->poll_list);
3911 }
3912
3913 netpoll_poll_unlock(have);
3914 }
3915 out:
3916 net_rps_action_and_irq_enable(sd);
3917
3918 #ifdef CONFIG_NET_DMA
3919 /*
3920 * There may not be any more sk_buffs coming right now, so push
3921 * any pending DMA copies to hardware
3922 */
3923 dma_issue_pending_all();
3924 #endif
3925
3926 return;
3927
3928 softnet_break:
3929 sd->time_squeeze++;
3930 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3931 goto out;
3932 }
3933
3934 static gifconf_func_t *gifconf_list[NPROTO];
3935
3936 /**
3937 * register_gifconf - register a SIOCGIF handler
3938 * @family: Address family
3939 * @gifconf: Function handler
3940 *
3941 * Register protocol dependent address dumping routines. The handler
3942 * that is passed must not be freed or reused until it has been replaced
3943 * by another handler.
3944 */
3945 int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
3946 {
3947 if (family >= NPROTO)
3948 return -EINVAL;
3949 gifconf_list[family] = gifconf;
3950 return 0;
3951 }
3952 EXPORT_SYMBOL(register_gifconf);
3953
3954
3955 /*
3956 * Map an interface index to its name (SIOCGIFNAME)
3957 */
3958
3959 /*
3960 * We need this ioctl for efficient implementation of the
3961 * if_indextoname() function required by the IPv6 API. Without
3962 * it, we would have to search all the interfaces to find a
3963 * match. --pb
3964 */
3965
3966 static int dev_ifname(struct net *net, struct ifreq __user *arg)
3967 {
3968 struct net_device *dev;
3969 struct ifreq ifr;
3970
3971 /*
3972 * Fetch the caller's info block.
3973 */
3974
3975 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
3976 return -EFAULT;
3977
3978 rcu_read_lock();
3979 dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
3980 if (!dev) {
3981 rcu_read_unlock();
3982 return -ENODEV;
3983 }
3984
3985 strcpy(ifr.ifr_name, dev->name);
3986 rcu_read_unlock();
3987
3988 if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
3989 return -EFAULT;
3990 return 0;
3991 }
3992
3993 /*
3994 * Perform a SIOCGIFCONF call. This structure will change
3995 * size eventually, and there is nothing I can do about it.
3996 * Thus we will need a 'compatibility mode'.
3997 */
3998
3999 static int dev_ifconf(struct net *net, char __user *arg)
4000 {
4001 struct ifconf ifc;
4002 struct net_device *dev;
4003 char __user *pos;
4004 int len;
4005 int total;
4006 int i;
4007
4008 /*
4009 * Fetch the caller's info block.
4010 */
4011
4012 if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
4013 return -EFAULT;
4014
4015 pos = ifc.ifc_buf;
4016 len = ifc.ifc_len;
4017
4018 /*
4019 * Loop over the interfaces, and write an info block for each.
4020 */
4021
4022 total = 0;
4023 for_each_netdev(net, dev) {
4024 for (i = 0; i < NPROTO; i++) {
4025 if (gifconf_list[i]) {
4026 int done;
4027 if (!pos)
4028 done = gifconf_list[i](dev, NULL, 0);
4029 else
4030 done = gifconf_list[i](dev, pos + total,
4031 len - total);
4032 if (done < 0)
4033 return -EFAULT;
4034 total += done;
4035 }
4036 }
4037 }
4038
4039 /*
4040 * All done. Write the updated control block back to the caller.
4041 */
4042 ifc.ifc_len = total;
4043
4044 /*
4045 * Both BSD and Solaris return 0 here, so we do too.
4046 */
4047 return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
4048 }
4049
4050 #ifdef CONFIG_PROC_FS
4051
4052 #define BUCKET_SPACE (32 - NETDEV_HASHBITS - 1)
4053
4054 #define get_bucket(x) ((x) >> BUCKET_SPACE)
4055 #define get_offset(x) ((x) & ((1 << BUCKET_SPACE) - 1))
4056 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
4057
4058 static inline struct net_device *dev_from_same_bucket(struct seq_file *seq, loff_t *pos)
4059 {
4060 struct net *net = seq_file_net(seq);
4061 struct net_device *dev;
4062 struct hlist_node *p;
4063 struct hlist_head *h;
4064 unsigned int count = 0, offset = get_offset(*pos);
4065
4066 h = &net->dev_name_head[get_bucket(*pos)];
4067 hlist_for_each_entry_rcu(dev, p, h, name_hlist) {
4068 if (++count == offset)
4069 return dev;
4070 }
4071
4072 return NULL;
4073 }
4074
4075 static inline struct net_device *dev_from_bucket(struct seq_file *seq, loff_t *pos)
4076 {
4077 struct net_device *dev;
4078 unsigned int bucket;
4079
4080 do {
4081 dev = dev_from_same_bucket(seq, pos);
4082 if (dev)
4083 return dev;
4084
4085 bucket = get_bucket(*pos) + 1;
4086 *pos = set_bucket_offset(bucket, 1);
4087 } while (bucket < NETDEV_HASHENTRIES);
4088
4089 return NULL;
4090 }
4091
4092 /*
4093 * This is invoked by the /proc filesystem handler to display a device
4094 * in detail.
4095 */
4096 void *dev_seq_start(struct seq_file *seq, loff_t *pos)
4097 __acquires(RCU)
4098 {
4099 rcu_read_lock();
4100 if (!*pos)
4101 return SEQ_START_TOKEN;
4102
4103 if (get_bucket(*pos) >= NETDEV_HASHENTRIES)
4104 return NULL;
4105
4106 return dev_from_bucket(seq, pos);
4107 }
4108
4109 void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4110 {
4111 ++*pos;
4112 return dev_from_bucket(seq, pos);
4113 }
4114
4115 void dev_seq_stop(struct seq_file *seq, void *v)
4116 __releases(RCU)
4117 {
4118 rcu_read_unlock();
4119 }
4120
4121 static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
4122 {
4123 struct rtnl_link_stats64 temp;
4124 const struct rtnl_link_stats64 *stats = dev_get_stats(dev, &temp);
4125
4126 seq_printf(seq, "%6s: %7llu %7llu %4llu %4llu %4llu %5llu %10llu %9llu "
4127 "%8llu %7llu %4llu %4llu %4llu %5llu %7llu %10llu\n",
4128 dev->name, stats->rx_bytes, stats->rx_packets,
4129 stats->rx_errors,
4130 stats->rx_dropped + stats->rx_missed_errors,
4131 stats->rx_fifo_errors,
4132 stats->rx_length_errors + stats->rx_over_errors +
4133 stats->rx_crc_errors + stats->rx_frame_errors,
4134 stats->rx_compressed, stats->multicast,
4135 stats->tx_bytes, stats->tx_packets,
4136 stats->tx_errors, stats->tx_dropped,
4137 stats->tx_fifo_errors, stats->collisions,
4138 stats->tx_carrier_errors +
4139 stats->tx_aborted_errors +
4140 stats->tx_window_errors +
4141 stats->tx_heartbeat_errors,
4142 stats->tx_compressed);
4143 }
4144
4145 /*
4146 * Called from the PROCfs module. This now uses the new arbitrary sized
4147 * /proc/net interface to create /proc/net/dev
4148 */
4149 static int dev_seq_show(struct seq_file *seq, void *v)
4150 {
4151 if (v == SEQ_START_TOKEN)
4152 seq_puts(seq, "Inter-| Receive "
4153 " | Transmit\n"
4154 " face |bytes packets errs drop fifo frame "
4155 "compressed multicast|bytes packets errs "
4156 "drop fifo colls carrier compressed\n");
4157 else
4158 dev_seq_printf_stats(seq, v);
4159 return 0;
4160 }
4161
4162 static struct softnet_data *softnet_get_online(loff_t *pos)
4163 {
4164 struct softnet_data *sd = NULL;
4165
4166 while (*pos < nr_cpu_ids)
4167 if (cpu_online(*pos)) {
4168 sd = &per_cpu(softnet_data, *pos);
4169 break;
4170 } else
4171 ++*pos;
4172 return sd;
4173 }
4174
4175 static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
4176 {
4177 return softnet_get_online(pos);
4178 }
4179
4180 static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4181 {
4182 ++*pos;
4183 return softnet_get_online(pos);
4184 }
4185
4186 static void softnet_seq_stop(struct seq_file *seq, void *v)
4187 {
4188 }
4189
4190 static int softnet_seq_show(struct seq_file *seq, void *v)
4191 {
4192 struct softnet_data *sd = v;
4193
4194 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
4195 sd->processed, sd->dropped, sd->time_squeeze, 0,
4196 0, 0, 0, 0, /* was fastroute */
4197 sd->cpu_collision, sd->received_rps);
4198 return 0;
4199 }
4200
4201 static const struct seq_operations dev_seq_ops = {
4202 .start = dev_seq_start,
4203 .next = dev_seq_next,
4204 .stop = dev_seq_stop,
4205 .show = dev_seq_show,
4206 };
4207
4208 static int dev_seq_open(struct inode *inode, struct file *file)
4209 {
4210 return seq_open_net(inode, file, &dev_seq_ops,
4211 sizeof(struct seq_net_private));
4212 }
4213
4214 static const struct file_operations dev_seq_fops = {
4215 .owner = THIS_MODULE,
4216 .open = dev_seq_open,
4217 .read = seq_read,
4218 .llseek = seq_lseek,
4219 .release = seq_release_net,
4220 };
4221
4222 static const struct seq_operations softnet_seq_ops = {
4223 .start = softnet_seq_start,
4224 .next = softnet_seq_next,
4225 .stop = softnet_seq_stop,
4226 .show = softnet_seq_show,
4227 };
4228
4229 static int softnet_seq_open(struct inode *inode, struct file *file)
4230 {
4231 return seq_open(file, &softnet_seq_ops);
4232 }
4233
4234 static const struct file_operations softnet_seq_fops = {
4235 .owner = THIS_MODULE,
4236 .open = softnet_seq_open,
4237 .read = seq_read,
4238 .llseek = seq_lseek,
4239 .release = seq_release,
4240 };
4241
4242 static void *ptype_get_idx(loff_t pos)
4243 {
4244 struct packet_type *pt = NULL;
4245 loff_t i = 0;
4246 int t;
4247
4248 list_for_each_entry_rcu(pt, &ptype_all, list) {
4249 if (i == pos)
4250 return pt;
4251 ++i;
4252 }
4253
4254 for (t = 0; t < PTYPE_HASH_SIZE; t++) {
4255 list_for_each_entry_rcu(pt, &ptype_base[t], list) {
4256 if (i == pos)
4257 return pt;
4258 ++i;
4259 }
4260 }
4261 return NULL;
4262 }
4263
4264 static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
4265 __acquires(RCU)
4266 {
4267 rcu_read_lock();
4268 return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
4269 }
4270
4271 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4272 {
4273 struct packet_type *pt;
4274 struct list_head *nxt;
4275 int hash;
4276
4277 ++*pos;
4278 if (v == SEQ_START_TOKEN)
4279 return ptype_get_idx(0);
4280
4281 pt = v;
4282 nxt = pt->list.next;
4283 if (pt->type == htons(ETH_P_ALL)) {
4284 if (nxt != &ptype_all)
4285 goto found;
4286 hash = 0;
4287 nxt = ptype_base[0].next;
4288 } else
4289 hash = ntohs(pt->type) & PTYPE_HASH_MASK;
4290
4291 while (nxt == &ptype_base[hash]) {
4292 if (++hash >= PTYPE_HASH_SIZE)
4293 return NULL;
4294 nxt = ptype_base[hash].next;
4295 }
4296 found:
4297 return list_entry(nxt, struct packet_type, list);
4298 }
4299
4300 static void ptype_seq_stop(struct seq_file *seq, void *v)
4301 __releases(RCU)
4302 {
4303 rcu_read_unlock();
4304 }
4305
4306 static int ptype_seq_show(struct seq_file *seq, void *v)
4307 {
4308 struct packet_type *pt = v;
4309
4310 if (v == SEQ_START_TOKEN)
4311 seq_puts(seq, "Type Device Function\n");
4312 else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
4313 if (pt->type == htons(ETH_P_ALL))
4314 seq_puts(seq, "ALL ");
4315 else
4316 seq_printf(seq, "%04x", ntohs(pt->type));
4317
4318 seq_printf(seq, " %-8s %pF\n",
4319 pt->dev ? pt->dev->name : "", pt->func);
4320 }
4321
4322 return 0;
4323 }
4324
4325 static const struct seq_operations ptype_seq_ops = {
4326 .start = ptype_seq_start,
4327 .next = ptype_seq_next,
4328 .stop = ptype_seq_stop,
4329 .show = ptype_seq_show,
4330 };
4331
4332 static int ptype_seq_open(struct inode *inode, struct file *file)
4333 {
4334 return seq_open_net(inode, file, &ptype_seq_ops,
4335 sizeof(struct seq_net_private));
4336 }
4337
4338 static const struct file_operations ptype_seq_fops = {
4339 .owner = THIS_MODULE,
4340 .open = ptype_seq_open,
4341 .read = seq_read,
4342 .llseek = seq_lseek,
4343 .release = seq_release_net,
4344 };
4345
4346
4347 static int __net_init dev_proc_net_init(struct net *net)
4348 {
4349 int rc = -ENOMEM;
4350
4351 if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
4352 goto out;
4353 if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
4354 goto out_dev;
4355 if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
4356 goto out_softnet;
4357
4358 if (wext_proc_init(net))
4359 goto out_ptype;
4360 rc = 0;
4361 out:
4362 return rc;
4363 out_ptype:
4364 proc_net_remove(net, "ptype");
4365 out_softnet:
4366 proc_net_remove(net, "softnet_stat");
4367 out_dev:
4368 proc_net_remove(net, "dev");
4369 goto out;
4370 }
4371
4372 static void __net_exit dev_proc_net_exit(struct net *net)
4373 {
4374 wext_proc_exit(net);
4375
4376 proc_net_remove(net, "ptype");
4377 proc_net_remove(net, "softnet_stat");
4378 proc_net_remove(net, "dev");
4379 }
4380
4381 static struct pernet_operations __net_initdata dev_proc_ops = {
4382 .init = dev_proc_net_init,
4383 .exit = dev_proc_net_exit,
4384 };
4385
4386 static int __init dev_proc_init(void)
4387 {
4388 return register_pernet_subsys(&dev_proc_ops);
4389 }
4390 #else
4391 #define dev_proc_init() 0
4392 #endif /* CONFIG_PROC_FS */
4393
4394
4395 /**
4396 * netdev_set_master - set up master pointer
4397 * @slave: slave device
4398 * @master: new master device
4399 *
4400 * Changes the master device of the slave. Pass %NULL to break the
4401 * bonding. The caller must hold the RTNL semaphore. On a failure
4402 * a negative errno code is returned. On success the reference counts
4403 * are adjusted and the function returns zero.
4404 */
4405 int netdev_set_master(struct net_device *slave, struct net_device *master)
4406 {
4407 struct net_device *old = slave->master;
4408
4409 ASSERT_RTNL();
4410
4411 if (master) {
4412 if (old)
4413 return -EBUSY;
4414 dev_hold(master);
4415 }
4416
4417 slave->master = master;
4418
4419 if (old)
4420 dev_put(old);
4421 return 0;
4422 }
4423 EXPORT_SYMBOL(netdev_set_master);
4424
4425 /**
4426 * netdev_set_bond_master - set up bonding master/slave pair
4427 * @slave: slave device
4428 * @master: new master device
4429 *
4430 * Changes the master device of the slave. Pass %NULL to break the
4431 * bonding. The caller must hold the RTNL semaphore. On a failure
4432 * a negative errno code is returned. On success %RTM_NEWLINK is sent
4433 * to the routing socket and the function returns zero.
4434 */
4435 int netdev_set_bond_master(struct net_device *slave, struct net_device *master)
4436 {
4437 int err;
4438
4439 ASSERT_RTNL();
4440
4441 err = netdev_set_master(slave, master);
4442 if (err)
4443 return err;
4444 if (master)
4445 slave->flags |= IFF_SLAVE;
4446 else
4447 slave->flags &= ~IFF_SLAVE;
4448
4449 rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
4450 return 0;
4451 }
4452 EXPORT_SYMBOL(netdev_set_bond_master);
4453
4454 static void dev_change_rx_flags(struct net_device *dev, int flags)
4455 {
4456 const struct net_device_ops *ops = dev->netdev_ops;
4457
4458 if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
4459 ops->ndo_change_rx_flags(dev, flags);
4460 }
4461
4462 static int __dev_set_promiscuity(struct net_device *dev, int inc)
4463 {
4464 unsigned int old_flags = dev->flags;
4465 uid_t uid;
4466 gid_t gid;
4467
4468 ASSERT_RTNL();
4469
4470 dev->flags |= IFF_PROMISC;
4471 dev->promiscuity += inc;
4472 if (dev->promiscuity == 0) {
4473 /*
4474 * Avoid overflow.
4475 * If inc causes overflow, untouch promisc and return error.
4476 */
4477 if (inc < 0)
4478 dev->flags &= ~IFF_PROMISC;
4479 else {
4480 dev->promiscuity -= inc;
4481 pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
4482 dev->name);
4483 return -EOVERFLOW;
4484 }
4485 }
4486 if (dev->flags != old_flags) {
4487 pr_info("device %s %s promiscuous mode\n",
4488 dev->name,
4489 dev->flags & IFF_PROMISC ? "entered" : "left");
4490 if (audit_enabled) {
4491 current_uid_gid(&uid, &gid);
4492 audit_log(current->audit_context, GFP_ATOMIC,
4493 AUDIT_ANOM_PROMISCUOUS,
4494 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
4495 dev->name, (dev->flags & IFF_PROMISC),
4496 (old_flags & IFF_PROMISC),
4497 audit_get_loginuid(current),
4498 uid, gid,
4499 audit_get_sessionid(current));
4500 }
4501
4502 dev_change_rx_flags(dev, IFF_PROMISC);
4503 }
4504 return 0;
4505 }
4506
4507 /**
4508 * dev_set_promiscuity - update promiscuity count on a device
4509 * @dev: device
4510 * @inc: modifier
4511 *
4512 * Add or remove promiscuity from a device. While the count in the device
4513 * remains above zero the interface remains promiscuous. Once it hits zero
4514 * the device reverts back to normal filtering operation. A negative inc
4515 * value is used to drop promiscuity on the device.
4516 * Return 0 if successful or a negative errno code on error.
4517 */
4518 int dev_set_promiscuity(struct net_device *dev, int inc)
4519 {
4520 unsigned int old_flags = dev->flags;
4521 int err;
4522
4523 err = __dev_set_promiscuity(dev, inc);
4524 if (err < 0)
4525 return err;
4526 if (dev->flags != old_flags)
4527 dev_set_rx_mode(dev);
4528 return err;
4529 }
4530 EXPORT_SYMBOL(dev_set_promiscuity);
4531
4532 /**
4533 * dev_set_allmulti - update allmulti count on a device
4534 * @dev: device
4535 * @inc: modifier
4536 *
4537 * Add or remove reception of all multicast frames to a device. While the
4538 * count in the device remains above zero the interface remains listening
4539 * to all interfaces. Once it hits zero the device reverts back to normal
4540 * filtering operation. A negative @inc value is used to drop the counter
4541 * when releasing a resource needing all multicasts.
4542 * Return 0 if successful or a negative errno code on error.
4543 */
4544
4545 int dev_set_allmulti(struct net_device *dev, int inc)
4546 {
4547 unsigned int old_flags = dev->flags;
4548
4549 ASSERT_RTNL();
4550
4551 dev->flags |= IFF_ALLMULTI;
4552 dev->allmulti += inc;
4553 if (dev->allmulti == 0) {
4554 /*
4555 * Avoid overflow.
4556 * If inc causes overflow, untouch allmulti and return error.
4557 */
4558 if (inc < 0)
4559 dev->flags &= ~IFF_ALLMULTI;
4560 else {
4561 dev->allmulti -= inc;
4562 pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
4563 dev->name);
4564 return -EOVERFLOW;
4565 }
4566 }
4567 if (dev->flags ^ old_flags) {
4568 dev_change_rx_flags(dev, IFF_ALLMULTI);
4569 dev_set_rx_mode(dev);
4570 }
4571 return 0;
4572 }
4573 EXPORT_SYMBOL(dev_set_allmulti);
4574
4575 /*
4576 * Upload unicast and multicast address lists to device and
4577 * configure RX filtering. When the device doesn't support unicast
4578 * filtering it is put in promiscuous mode while unicast addresses
4579 * are present.
4580 */
4581 void __dev_set_rx_mode(struct net_device *dev)
4582 {
4583 const struct net_device_ops *ops = dev->netdev_ops;
4584
4585 /* dev_open will call this function so the list will stay sane. */
4586 if (!(dev->flags&IFF_UP))
4587 return;
4588
4589 if (!netif_device_present(dev))
4590 return;
4591
4592 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4593 /* Unicast addresses changes may only happen under the rtnl,
4594 * therefore calling __dev_set_promiscuity here is safe.
4595 */
4596 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
4597 __dev_set_promiscuity(dev, 1);
4598 dev->uc_promisc = true;
4599 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
4600 __dev_set_promiscuity(dev, -1);
4601 dev->uc_promisc = false;
4602 }
4603 }
4604
4605 if (ops->ndo_set_rx_mode)
4606 ops->ndo_set_rx_mode(dev);
4607 }
4608
4609 void dev_set_rx_mode(struct net_device *dev)
4610 {
4611 netif_addr_lock_bh(dev);
4612 __dev_set_rx_mode(dev);
4613 netif_addr_unlock_bh(dev);
4614 }
4615
4616 /**
4617 * dev_get_flags - get flags reported to userspace
4618 * @dev: device
4619 *
4620 * Get the combination of flag bits exported through APIs to userspace.
4621 */
4622 unsigned int dev_get_flags(const struct net_device *dev)
4623 {
4624 unsigned int flags;
4625
4626 flags = (dev->flags & ~(IFF_PROMISC |
4627 IFF_ALLMULTI |
4628 IFF_RUNNING |
4629 IFF_LOWER_UP |
4630 IFF_DORMANT)) |
4631 (dev->gflags & (IFF_PROMISC |
4632 IFF_ALLMULTI));
4633
4634 if (netif_running(dev)) {
4635 if (netif_oper_up(dev))
4636 flags |= IFF_RUNNING;
4637 if (netif_carrier_ok(dev))
4638 flags |= IFF_LOWER_UP;
4639 if (netif_dormant(dev))
4640 flags |= IFF_DORMANT;
4641 }
4642
4643 return flags;
4644 }
4645 EXPORT_SYMBOL(dev_get_flags);
4646
4647 int __dev_change_flags(struct net_device *dev, unsigned int flags)
4648 {
4649 unsigned int old_flags = dev->flags;
4650 int ret;
4651
4652 ASSERT_RTNL();
4653
4654 /*
4655 * Set the flags on our device.
4656 */
4657
4658 dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
4659 IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
4660 IFF_AUTOMEDIA)) |
4661 (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
4662 IFF_ALLMULTI));
4663
4664 /*
4665 * Load in the correct multicast list now the flags have changed.
4666 */
4667
4668 if ((old_flags ^ flags) & IFF_MULTICAST)
4669 dev_change_rx_flags(dev, IFF_MULTICAST);
4670
4671 dev_set_rx_mode(dev);
4672
4673 /*
4674 * Have we downed the interface. We handle IFF_UP ourselves
4675 * according to user attempts to set it, rather than blindly
4676 * setting it.
4677 */
4678
4679 ret = 0;
4680 if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
4681 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
4682
4683 if (!ret)
4684 dev_set_rx_mode(dev);
4685 }
4686
4687 if ((flags ^ dev->gflags) & IFF_PROMISC) {
4688 int inc = (flags & IFF_PROMISC) ? 1 : -1;
4689
4690 dev->gflags ^= IFF_PROMISC;
4691 dev_set_promiscuity(dev, inc);
4692 }
4693
4694 /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
4695 is important. Some (broken) drivers set IFF_PROMISC, when
4696 IFF_ALLMULTI is requested not asking us and not reporting.
4697 */
4698 if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
4699 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
4700
4701 dev->gflags ^= IFF_ALLMULTI;
4702 dev_set_allmulti(dev, inc);
4703 }
4704
4705 return ret;
4706 }
4707
4708 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags)
4709 {
4710 unsigned int changes = dev->flags ^ old_flags;
4711
4712 if (changes & IFF_UP) {
4713 if (dev->flags & IFF_UP)
4714 call_netdevice_notifiers(NETDEV_UP, dev);
4715 else
4716 call_netdevice_notifiers(NETDEV_DOWN, dev);
4717 }
4718
4719 if (dev->flags & IFF_UP &&
4720 (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE)))
4721 call_netdevice_notifiers(NETDEV_CHANGE, dev);
4722 }
4723
4724 /**
4725 * dev_change_flags - change device settings
4726 * @dev: device
4727 * @flags: device state flags
4728 *
4729 * Change settings on device based state flags. The flags are
4730 * in the userspace exported format.
4731 */
4732 int dev_change_flags(struct net_device *dev, unsigned int flags)
4733 {
4734 int ret;
4735 unsigned int changes, old_flags = dev->flags;
4736
4737 ret = __dev_change_flags(dev, flags);
4738 if (ret < 0)
4739 return ret;
4740
4741 changes = old_flags ^ dev->flags;
4742 if (changes)
4743 rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
4744
4745 __dev_notify_flags(dev, old_flags);
4746 return ret;
4747 }
4748 EXPORT_SYMBOL(dev_change_flags);
4749
4750 /**
4751 * dev_set_mtu - Change maximum transfer unit
4752 * @dev: device
4753 * @new_mtu: new transfer unit
4754 *
4755 * Change the maximum transfer size of the network device.
4756 */
4757 int dev_set_mtu(struct net_device *dev, int new_mtu)
4758 {
4759 const struct net_device_ops *ops = dev->netdev_ops;
4760 int err;
4761
4762 if (new_mtu == dev->mtu)
4763 return 0;
4764
4765 /* MTU must be positive. */
4766 if (new_mtu < 0)
4767 return -EINVAL;
4768
4769 if (!netif_device_present(dev))
4770 return -ENODEV;
4771
4772 err = 0;
4773 if (ops->ndo_change_mtu)
4774 err = ops->ndo_change_mtu(dev, new_mtu);
4775 else
4776 dev->mtu = new_mtu;
4777
4778 if (!err && dev->flags & IFF_UP)
4779 call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
4780 return err;
4781 }
4782 EXPORT_SYMBOL(dev_set_mtu);
4783
4784 /**
4785 * dev_set_group - Change group this device belongs to
4786 * @dev: device
4787 * @new_group: group this device should belong to
4788 */
4789 void dev_set_group(struct net_device *dev, int new_group)
4790 {
4791 dev->group = new_group;
4792 }
4793 EXPORT_SYMBOL(dev_set_group);
4794
4795 /**
4796 * dev_set_mac_address - Change Media Access Control Address
4797 * @dev: device
4798 * @sa: new address
4799 *
4800 * Change the hardware (MAC) address of the device
4801 */
4802 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
4803 {
4804 const struct net_device_ops *ops = dev->netdev_ops;
4805 int err;
4806
4807 if (!ops->ndo_set_mac_address)
4808 return -EOPNOTSUPP;
4809 if (sa->sa_family != dev->type)
4810 return -EINVAL;
4811 if (!netif_device_present(dev))
4812 return -ENODEV;
4813 err = ops->ndo_set_mac_address(dev, sa);
4814 if (!err)
4815 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4816 return err;
4817 }
4818 EXPORT_SYMBOL(dev_set_mac_address);
4819
4820 /*
4821 * Perform the SIOCxIFxxx calls, inside rcu_read_lock()
4822 */
4823 static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
4824 {
4825 int err;
4826 struct net_device *dev = dev_get_by_name_rcu(net, ifr->ifr_name);
4827
4828 if (!dev)
4829 return -ENODEV;
4830
4831 switch (cmd) {
4832 case SIOCGIFFLAGS: /* Get interface flags */
4833 ifr->ifr_flags = (short) dev_get_flags(dev);
4834 return 0;
4835
4836 case SIOCGIFMETRIC: /* Get the metric on the interface
4837 (currently unused) */
4838 ifr->ifr_metric = 0;
4839 return 0;
4840
4841 case SIOCGIFMTU: /* Get the MTU of a device */
4842 ifr->ifr_mtu = dev->mtu;
4843 return 0;
4844
4845 case SIOCGIFHWADDR:
4846 if (!dev->addr_len)
4847 memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
4848 else
4849 memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
4850 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4851 ifr->ifr_hwaddr.sa_family = dev->type;
4852 return 0;
4853
4854 case SIOCGIFSLAVE:
4855 err = -EINVAL;
4856 break;
4857
4858 case SIOCGIFMAP:
4859 ifr->ifr_map.mem_start = dev->mem_start;
4860 ifr->ifr_map.mem_end = dev->mem_end;
4861 ifr->ifr_map.base_addr = dev->base_addr;
4862 ifr->ifr_map.irq = dev->irq;
4863 ifr->ifr_map.dma = dev->dma;
4864 ifr->ifr_map.port = dev->if_port;
4865 return 0;
4866
4867 case SIOCGIFINDEX:
4868 ifr->ifr_ifindex = dev->ifindex;
4869 return 0;
4870
4871 case SIOCGIFTXQLEN:
4872 ifr->ifr_qlen = dev->tx_queue_len;
4873 return 0;
4874
4875 default:
4876 /* dev_ioctl() should ensure this case
4877 * is never reached
4878 */
4879 WARN_ON(1);
4880 err = -ENOTTY;
4881 break;
4882
4883 }
4884 return err;
4885 }
4886
4887 /*
4888 * Perform the SIOCxIFxxx calls, inside rtnl_lock()
4889 */
4890 static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4891 {
4892 int err;
4893 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
4894 const struct net_device_ops *ops;
4895
4896 if (!dev)
4897 return -ENODEV;
4898
4899 ops = dev->netdev_ops;
4900
4901 switch (cmd) {
4902 case SIOCSIFFLAGS: /* Set interface flags */
4903 return dev_change_flags(dev, ifr->ifr_flags);
4904
4905 case SIOCSIFMETRIC: /* Set the metric on the interface
4906 (currently unused) */
4907 return -EOPNOTSUPP;
4908
4909 case SIOCSIFMTU: /* Set the MTU of a device */
4910 return dev_set_mtu(dev, ifr->ifr_mtu);
4911
4912 case SIOCSIFHWADDR:
4913 return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
4914
4915 case SIOCSIFHWBROADCAST:
4916 if (ifr->ifr_hwaddr.sa_family != dev->type)
4917 return -EINVAL;
4918 memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
4919 min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
4920 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
4921 return 0;
4922
4923 case SIOCSIFMAP:
4924 if (ops->ndo_set_config) {
4925 if (!netif_device_present(dev))
4926 return -ENODEV;
4927 return ops->ndo_set_config(dev, &ifr->ifr_map);
4928 }
4929 return -EOPNOTSUPP;
4930
4931 case SIOCADDMULTI:
4932 if (!ops->ndo_set_rx_mode ||
4933 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4934 return -EINVAL;
4935 if (!netif_device_present(dev))
4936 return -ENODEV;
4937 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4938
4939 case SIOCDELMULTI:
4940 if (!ops->ndo_set_rx_mode ||
4941 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4942 return -EINVAL;
4943 if (!netif_device_present(dev))
4944 return -ENODEV;
4945 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
4946
4947 case SIOCSIFTXQLEN:
4948 if (ifr->ifr_qlen < 0)
4949 return -EINVAL;
4950 dev->tx_queue_len = ifr->ifr_qlen;
4951 return 0;
4952
4953 case SIOCSIFNAME:
4954 ifr->ifr_newname[IFNAMSIZ-1] = '\0';
4955 return dev_change_name(dev, ifr->ifr_newname);
4956
4957 case SIOCSHWTSTAMP:
4958 err = net_hwtstamp_validate(ifr);
4959 if (err)
4960 return err;
4961 /* fall through */
4962
4963 /*
4964 * Unknown or private ioctl
4965 */
4966 default:
4967 if ((cmd >= SIOCDEVPRIVATE &&
4968 cmd <= SIOCDEVPRIVATE + 15) ||
4969 cmd == SIOCBONDENSLAVE ||
4970 cmd == SIOCBONDRELEASE ||
4971 cmd == SIOCBONDSETHWADDR ||
4972 cmd == SIOCBONDSLAVEINFOQUERY ||
4973 cmd == SIOCBONDINFOQUERY ||
4974 cmd == SIOCBONDCHANGEACTIVE ||
4975 cmd == SIOCGMIIPHY ||
4976 cmd == SIOCGMIIREG ||
4977 cmd == SIOCSMIIREG ||
4978 cmd == SIOCBRADDIF ||
4979 cmd == SIOCBRDELIF ||
4980 cmd == SIOCSHWTSTAMP ||
4981 cmd == SIOCWANDEV) {
4982 err = -EOPNOTSUPP;
4983 if (ops->ndo_do_ioctl) {
4984 if (netif_device_present(dev))
4985 err = ops->ndo_do_ioctl(dev, ifr, cmd);
4986 else
4987 err = -ENODEV;
4988 }
4989 } else
4990 err = -EINVAL;
4991
4992 }
4993 return err;
4994 }
4995
4996 /*
4997 * This function handles all "interface"-type I/O control requests. The actual
4998 * 'doing' part of this is dev_ifsioc above.
4999 */
5000
5001 /**
5002 * dev_ioctl - network device ioctl
5003 * @net: the applicable net namespace
5004 * @cmd: command to issue
5005 * @arg: pointer to a struct ifreq in user space
5006 *
5007 * Issue ioctl functions to devices. This is normally called by the
5008 * user space syscall interfaces but can sometimes be useful for
5009 * other purposes. The return value is the return from the syscall if
5010 * positive or a negative errno code on error.
5011 */
5012
5013 int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
5014 {
5015 struct ifreq ifr;
5016 int ret;
5017 char *colon;
5018
5019 /* One special case: SIOCGIFCONF takes ifconf argument
5020 and requires shared lock, because it sleeps writing
5021 to user space.
5022 */
5023
5024 if (cmd == SIOCGIFCONF) {
5025 rtnl_lock();
5026 ret = dev_ifconf(net, (char __user *) arg);
5027 rtnl_unlock();
5028 return ret;
5029 }
5030 if (cmd == SIOCGIFNAME)
5031 return dev_ifname(net, (struct ifreq __user *)arg);
5032
5033 if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
5034 return -EFAULT;
5035
5036 ifr.ifr_name[IFNAMSIZ-1] = 0;
5037
5038 colon = strchr(ifr.ifr_name, ':');
5039 if (colon)
5040 *colon = 0;
5041
5042 /*
5043 * See which interface the caller is talking about.
5044 */
5045
5046 switch (cmd) {
5047 /*
5048 * These ioctl calls:
5049 * - can be done by all.
5050 * - atomic and do not require locking.
5051 * - return a value
5052 */
5053 case SIOCGIFFLAGS:
5054 case SIOCGIFMETRIC:
5055 case SIOCGIFMTU:
5056 case SIOCGIFHWADDR:
5057 case SIOCGIFSLAVE:
5058 case SIOCGIFMAP:
5059 case SIOCGIFINDEX:
5060 case SIOCGIFTXQLEN:
5061 dev_load(net, ifr.ifr_name);
5062 rcu_read_lock();
5063 ret = dev_ifsioc_locked(net, &ifr, cmd);
5064 rcu_read_unlock();
5065 if (!ret) {
5066 if (colon)
5067 *colon = ':';
5068 if (copy_to_user(arg, &ifr,
5069 sizeof(struct ifreq)))
5070 ret = -EFAULT;
5071 }
5072 return ret;
5073
5074 case SIOCETHTOOL:
5075 dev_load(net, ifr.ifr_name);
5076 rtnl_lock();
5077 ret = dev_ethtool(net, &ifr);
5078 rtnl_unlock();
5079 if (!ret) {
5080 if (colon)
5081 *colon = ':';
5082 if (copy_to_user(arg, &ifr,
5083 sizeof(struct ifreq)))
5084 ret = -EFAULT;
5085 }
5086 return ret;
5087
5088 /*
5089 * These ioctl calls:
5090 * - require superuser power.
5091 * - require strict serialization.
5092 * - return a value
5093 */
5094 case SIOCGMIIPHY:
5095 case SIOCGMIIREG:
5096 case SIOCSIFNAME:
5097 if (!capable(CAP_NET_ADMIN))
5098 return -EPERM;
5099 dev_load(net, ifr.ifr_name);
5100 rtnl_lock();
5101 ret = dev_ifsioc(net, &ifr, cmd);
5102 rtnl_unlock();
5103 if (!ret) {
5104 if (colon)
5105 *colon = ':';
5106 if (copy_to_user(arg, &ifr,
5107 sizeof(struct ifreq)))
5108 ret = -EFAULT;
5109 }
5110 return ret;
5111
5112 /*
5113 * These ioctl calls:
5114 * - require superuser power.
5115 * - require strict serialization.
5116 * - do not return a value
5117 */
5118 case SIOCSIFFLAGS:
5119 case SIOCSIFMETRIC:
5120 case SIOCSIFMTU:
5121 case SIOCSIFMAP:
5122 case SIOCSIFHWADDR:
5123 case SIOCSIFSLAVE:
5124 case SIOCADDMULTI:
5125 case SIOCDELMULTI:
5126 case SIOCSIFHWBROADCAST:
5127 case SIOCSIFTXQLEN:
5128 case SIOCSMIIREG:
5129 case SIOCBONDENSLAVE:
5130 case SIOCBONDRELEASE:
5131 case SIOCBONDSETHWADDR:
5132 case SIOCBONDCHANGEACTIVE:
5133 case SIOCBRADDIF:
5134 case SIOCBRDELIF:
5135 case SIOCSHWTSTAMP:
5136 if (!capable(CAP_NET_ADMIN))
5137 return -EPERM;
5138 /* fall through */
5139 case SIOCBONDSLAVEINFOQUERY:
5140 case SIOCBONDINFOQUERY:
5141 dev_load(net, ifr.ifr_name);
5142 rtnl_lock();
5143 ret = dev_ifsioc(net, &ifr, cmd);
5144 rtnl_unlock();
5145 return ret;
5146
5147 case SIOCGIFMEM:
5148 /* Get the per device memory space. We can add this but
5149 * currently do not support it */
5150 case SIOCSIFMEM:
5151 /* Set the per device memory buffer space.
5152 * Not applicable in our case */
5153 case SIOCSIFLINK:
5154 return -ENOTTY;
5155
5156 /*
5157 * Unknown or private ioctl.
5158 */
5159 default:
5160 if (cmd == SIOCWANDEV ||
5161 (cmd >= SIOCDEVPRIVATE &&
5162 cmd <= SIOCDEVPRIVATE + 15)) {
5163 dev_load(net, ifr.ifr_name);
5164 rtnl_lock();
5165 ret = dev_ifsioc(net, &ifr, cmd);
5166 rtnl_unlock();
5167 if (!ret && copy_to_user(arg, &ifr,
5168 sizeof(struct ifreq)))
5169 ret = -EFAULT;
5170 return ret;
5171 }
5172 /* Take care of Wireless Extensions */
5173 if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
5174 return wext_handle_ioctl(net, &ifr, cmd, arg);
5175 return -ENOTTY;
5176 }
5177 }
5178
5179
5180 /**
5181 * dev_new_index - allocate an ifindex
5182 * @net: the applicable net namespace
5183 *
5184 * Returns a suitable unique value for a new device interface
5185 * number. The caller must hold the rtnl semaphore or the
5186 * dev_base_lock to be sure it remains unique.
5187 */
5188 static int dev_new_index(struct net *net)
5189 {
5190 static int ifindex;
5191 for (;;) {
5192 if (++ifindex <= 0)
5193 ifindex = 1;
5194 if (!__dev_get_by_index(net, ifindex))
5195 return ifindex;
5196 }
5197 }
5198
5199 /* Delayed registration/unregisteration */
5200 static LIST_HEAD(net_todo_list);
5201
5202 static void net_set_todo(struct net_device *dev)
5203 {
5204 list_add_tail(&dev->todo_list, &net_todo_list);
5205 }
5206
5207 static void rollback_registered_many(struct list_head *head)
5208 {
5209 struct net_device *dev, *tmp;
5210
5211 BUG_ON(dev_boot_phase);
5212 ASSERT_RTNL();
5213
5214 list_for_each_entry_safe(dev, tmp, head, unreg_list) {
5215 /* Some devices call without registering
5216 * for initialization unwind. Remove those
5217 * devices and proceed with the remaining.
5218 */
5219 if (dev->reg_state == NETREG_UNINITIALIZED) {
5220 pr_debug("unregister_netdevice: device %s/%p never was registered\n",
5221 dev->name, dev);
5222
5223 WARN_ON(1);
5224 list_del(&dev->unreg_list);
5225 continue;
5226 }
5227 dev->dismantle = true;
5228 BUG_ON(dev->reg_state != NETREG_REGISTERED);
5229 }
5230
5231 /* If device is running, close it first. */
5232 dev_close_many(head);
5233
5234 list_for_each_entry(dev, head, unreg_list) {
5235 /* And unlink it from device chain. */
5236 unlist_netdevice(dev);
5237
5238 dev->reg_state = NETREG_UNREGISTERING;
5239 }
5240
5241 synchronize_net();
5242
5243 list_for_each_entry(dev, head, unreg_list) {
5244 /* Shutdown queueing discipline. */
5245 dev_shutdown(dev);
5246
5247
5248 /* Notify protocols, that we are about to destroy
5249 this device. They should clean all the things.
5250 */
5251 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5252
5253 if (!dev->rtnl_link_ops ||
5254 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5255 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
5256
5257 /*
5258 * Flush the unicast and multicast chains
5259 */
5260 dev_uc_flush(dev);
5261 dev_mc_flush(dev);
5262
5263 if (dev->netdev_ops->ndo_uninit)
5264 dev->netdev_ops->ndo_uninit(dev);
5265
5266 /* Notifier chain MUST detach us from master device. */
5267 WARN_ON(dev->master);
5268
5269 /* Remove entries from kobject tree */
5270 netdev_unregister_kobject(dev);
5271 }
5272
5273 /* Process any work delayed until the end of the batch */
5274 dev = list_first_entry(head, struct net_device, unreg_list);
5275 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
5276
5277 synchronize_net();
5278
5279 list_for_each_entry(dev, head, unreg_list)
5280 dev_put(dev);
5281 }
5282
5283 static void rollback_registered(struct net_device *dev)
5284 {
5285 LIST_HEAD(single);
5286
5287 list_add(&dev->unreg_list, &single);
5288 rollback_registered_many(&single);
5289 list_del(&single);
5290 }
5291
5292 static netdev_features_t netdev_fix_features(struct net_device *dev,
5293 netdev_features_t features)
5294 {
5295 /* Fix illegal checksum combinations */
5296 if ((features & NETIF_F_HW_CSUM) &&
5297 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5298 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
5299 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
5300 }
5301
5302 /* Fix illegal SG+CSUM combinations. */
5303 if ((features & NETIF_F_SG) &&
5304 !(features & NETIF_F_ALL_CSUM)) {
5305 netdev_dbg(dev,
5306 "Dropping NETIF_F_SG since no checksum feature.\n");
5307 features &= ~NETIF_F_SG;
5308 }
5309
5310 /* TSO requires that SG is present as well. */
5311 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
5312 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
5313 features &= ~NETIF_F_ALL_TSO;
5314 }
5315
5316 /* TSO ECN requires that TSO is present as well. */
5317 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
5318 features &= ~NETIF_F_TSO_ECN;
5319
5320 /* Software GSO depends on SG. */
5321 if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
5322 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
5323 features &= ~NETIF_F_GSO;
5324 }
5325
5326 /* UFO needs SG and checksumming */
5327 if (features & NETIF_F_UFO) {
5328 /* maybe split UFO into V4 and V6? */
5329 if (!((features & NETIF_F_GEN_CSUM) ||
5330 (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
5331 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
5332 netdev_dbg(dev,
5333 "Dropping NETIF_F_UFO since no checksum offload features.\n");
5334 features &= ~NETIF_F_UFO;
5335 }
5336
5337 if (!(features & NETIF_F_SG)) {
5338 netdev_dbg(dev,
5339 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
5340 features &= ~NETIF_F_UFO;
5341 }
5342 }
5343
5344 return features;
5345 }
5346
5347 int __netdev_update_features(struct net_device *dev)
5348 {
5349 netdev_features_t features;
5350 int err = 0;
5351
5352 ASSERT_RTNL();
5353
5354 features = netdev_get_wanted_features(dev);
5355
5356 if (dev->netdev_ops->ndo_fix_features)
5357 features = dev->netdev_ops->ndo_fix_features(dev, features);
5358
5359 /* driver might be less strict about feature dependencies */
5360 features = netdev_fix_features(dev, features);
5361
5362 if (dev->features == features)
5363 return 0;
5364
5365 netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
5366 &dev->features, &features);
5367
5368 if (dev->netdev_ops->ndo_set_features)
5369 err = dev->netdev_ops->ndo_set_features(dev, features);
5370
5371 if (unlikely(err < 0)) {
5372 netdev_err(dev,
5373 "set_features() failed (%d); wanted %pNF, left %pNF\n",
5374 err, &features, &dev->features);
5375 return -1;
5376 }
5377
5378 if (!err)
5379 dev->features = features;
5380
5381 return 1;
5382 }
5383
5384 /**
5385 * netdev_update_features - recalculate device features
5386 * @dev: the device to check
5387 *
5388 * Recalculate dev->features set and send notifications if it
5389 * has changed. Should be called after driver or hardware dependent
5390 * conditions might have changed that influence the features.
5391 */
5392 void netdev_update_features(struct net_device *dev)
5393 {
5394 if (__netdev_update_features(dev))
5395 netdev_features_change(dev);
5396 }
5397 EXPORT_SYMBOL(netdev_update_features);
5398
5399 /**
5400 * netdev_change_features - recalculate device features
5401 * @dev: the device to check
5402 *
5403 * Recalculate dev->features set and send notifications even
5404 * if they have not changed. Should be called instead of
5405 * netdev_update_features() if also dev->vlan_features might
5406 * have changed to allow the changes to be propagated to stacked
5407 * VLAN devices.
5408 */
5409 void netdev_change_features(struct net_device *dev)
5410 {
5411 __netdev_update_features(dev);
5412 netdev_features_change(dev);
5413 }
5414 EXPORT_SYMBOL(netdev_change_features);
5415
5416 /**
5417 * netif_stacked_transfer_operstate - transfer operstate
5418 * @rootdev: the root or lower level device to transfer state from
5419 * @dev: the device to transfer operstate to
5420 *
5421 * Transfer operational state from root to device. This is normally
5422 * called when a stacking relationship exists between the root
5423 * device and the device(a leaf device).
5424 */
5425 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
5426 struct net_device *dev)
5427 {
5428 if (rootdev->operstate == IF_OPER_DORMANT)
5429 netif_dormant_on(dev);
5430 else
5431 netif_dormant_off(dev);
5432
5433 if (netif_carrier_ok(rootdev)) {
5434 if (!netif_carrier_ok(dev))
5435 netif_carrier_on(dev);
5436 } else {
5437 if (netif_carrier_ok(dev))
5438 netif_carrier_off(dev);
5439 }
5440 }
5441 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
5442
5443 #ifdef CONFIG_RPS
5444 static int netif_alloc_rx_queues(struct net_device *dev)
5445 {
5446 unsigned int i, count = dev->num_rx_queues;
5447 struct netdev_rx_queue *rx;
5448
5449 BUG_ON(count < 1);
5450
5451 rx = kcalloc(count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5452 if (!rx) {
5453 pr_err("netdev: Unable to allocate %u rx queues\n", count);
5454 return -ENOMEM;
5455 }
5456 dev->_rx = rx;
5457
5458 for (i = 0; i < count; i++)
5459 rx[i].dev = dev;
5460 return 0;
5461 }
5462 #endif
5463
5464 static void netdev_init_one_queue(struct net_device *dev,
5465 struct netdev_queue *queue, void *_unused)
5466 {
5467 /* Initialize queue lock */
5468 spin_lock_init(&queue->_xmit_lock);
5469 netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
5470 queue->xmit_lock_owner = -1;
5471 netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
5472 queue->dev = dev;
5473 #ifdef CONFIG_BQL
5474 dql_init(&queue->dql, HZ);
5475 #endif
5476 }
5477
5478 static int netif_alloc_netdev_queues(struct net_device *dev)
5479 {
5480 unsigned int count = dev->num_tx_queues;
5481 struct netdev_queue *tx;
5482
5483 BUG_ON(count < 1);
5484
5485 tx = kcalloc(count, sizeof(struct netdev_queue), GFP_KERNEL);
5486 if (!tx) {
5487 pr_err("netdev: Unable to allocate %u tx queues\n", count);
5488 return -ENOMEM;
5489 }
5490 dev->_tx = tx;
5491
5492 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
5493 spin_lock_init(&dev->tx_global_lock);
5494
5495 return 0;
5496 }
5497
5498 /**
5499 * register_netdevice - register a network device
5500 * @dev: device to register
5501 *
5502 * Take a completed network device structure and add it to the kernel
5503 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5504 * chain. 0 is returned on success. A negative errno code is returned
5505 * on a failure to set up the device, or if the name is a duplicate.
5506 *
5507 * Callers must hold the rtnl semaphore. You may want
5508 * register_netdev() instead of this.
5509 *
5510 * BUGS:
5511 * The locking appears insufficient to guarantee two parallel registers
5512 * will not get the same name.
5513 */
5514
5515 int register_netdevice(struct net_device *dev)
5516 {
5517 int ret;
5518 struct net *net = dev_net(dev);
5519
5520 BUG_ON(dev_boot_phase);
5521 ASSERT_RTNL();
5522
5523 might_sleep();
5524
5525 /* When net_device's are persistent, this will be fatal. */
5526 BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
5527 BUG_ON(!net);
5528
5529 spin_lock_init(&dev->addr_list_lock);
5530 netdev_set_addr_lockdep_class(dev);
5531
5532 dev->iflink = -1;
5533
5534 ret = dev_get_valid_name(dev, dev->name);
5535 if (ret < 0)
5536 goto out;
5537
5538 /* Init, if this function is available */
5539 if (dev->netdev_ops->ndo_init) {
5540 ret = dev->netdev_ops->ndo_init(dev);
5541 if (ret) {
5542 if (ret > 0)
5543 ret = -EIO;
5544 goto out;
5545 }
5546 }
5547
5548 dev->ifindex = dev_new_index(net);
5549 if (dev->iflink == -1)
5550 dev->iflink = dev->ifindex;
5551
5552 /* Transfer changeable features to wanted_features and enable
5553 * software offloads (GSO and GRO).
5554 */
5555 dev->hw_features |= NETIF_F_SOFT_FEATURES;
5556 dev->features |= NETIF_F_SOFT_FEATURES;
5557 dev->wanted_features = dev->features & dev->hw_features;
5558
5559 /* Turn on no cache copy if HW is doing checksum */
5560 if (!(dev->flags & IFF_LOOPBACK)) {
5561 dev->hw_features |= NETIF_F_NOCACHE_COPY;
5562 if (dev->features & NETIF_F_ALL_CSUM) {
5563 dev->wanted_features |= NETIF_F_NOCACHE_COPY;
5564 dev->features |= NETIF_F_NOCACHE_COPY;
5565 }
5566 }
5567
5568 /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
5569 */
5570 dev->vlan_features |= NETIF_F_HIGHDMA;
5571
5572 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5573 ret = notifier_to_errno(ret);
5574 if (ret)
5575 goto err_uninit;
5576
5577 ret = netdev_register_kobject(dev);
5578 if (ret)
5579 goto err_uninit;
5580 dev->reg_state = NETREG_REGISTERED;
5581
5582 __netdev_update_features(dev);
5583
5584 /*
5585 * Default initial state at registry is that the
5586 * device is present.
5587 */
5588
5589 set_bit(__LINK_STATE_PRESENT, &dev->state);
5590
5591 dev_init_scheduler(dev);
5592 dev_hold(dev);
5593 list_netdevice(dev);
5594
5595 /* Notify protocols, that a new device appeared. */
5596 ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
5597 ret = notifier_to_errno(ret);
5598 if (ret) {
5599 rollback_registered(dev);
5600 dev->reg_state = NETREG_UNREGISTERED;
5601 }
5602 /*
5603 * Prevent userspace races by waiting until the network
5604 * device is fully setup before sending notifications.
5605 */
5606 if (!dev->rtnl_link_ops ||
5607 dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
5608 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
5609
5610 out:
5611 return ret;
5612
5613 err_uninit:
5614 if (dev->netdev_ops->ndo_uninit)
5615 dev->netdev_ops->ndo_uninit(dev);
5616 goto out;
5617 }
5618 EXPORT_SYMBOL(register_netdevice);
5619
5620 /**
5621 * init_dummy_netdev - init a dummy network device for NAPI
5622 * @dev: device to init
5623 *
5624 * This takes a network device structure and initialize the minimum
5625 * amount of fields so it can be used to schedule NAPI polls without
5626 * registering a full blown interface. This is to be used by drivers
5627 * that need to tie several hardware interfaces to a single NAPI
5628 * poll scheduler due to HW limitations.
5629 */
5630 int init_dummy_netdev(struct net_device *dev)
5631 {
5632 /* Clear everything. Note we don't initialize spinlocks
5633 * are they aren't supposed to be taken by any of the
5634 * NAPI code and this dummy netdev is supposed to be
5635 * only ever used for NAPI polls
5636 */
5637 memset(dev, 0, sizeof(struct net_device));
5638
5639 /* make sure we BUG if trying to hit standard
5640 * register/unregister code path
5641 */
5642 dev->reg_state = NETREG_DUMMY;
5643
5644 /* NAPI wants this */
5645 INIT_LIST_HEAD(&dev->napi_list);
5646
5647 /* a dummy interface is started by default */
5648 set_bit(__LINK_STATE_PRESENT, &dev->state);
5649 set_bit(__LINK_STATE_START, &dev->state);
5650
5651 /* Note : We dont allocate pcpu_refcnt for dummy devices,
5652 * because users of this 'device' dont need to change
5653 * its refcount.
5654 */
5655
5656 return 0;
5657 }
5658 EXPORT_SYMBOL_GPL(init_dummy_netdev);
5659
5660
5661 /**
5662 * register_netdev - register a network device
5663 * @dev: device to register
5664 *
5665 * Take a completed network device structure and add it to the kernel
5666 * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
5667 * chain. 0 is returned on success. A negative errno code is returned
5668 * on a failure to set up the device, or if the name is a duplicate.
5669 *
5670 * This is a wrapper around register_netdevice that takes the rtnl semaphore
5671 * and expands the device name if you passed a format string to
5672 * alloc_netdev.
5673 */
5674 int register_netdev(struct net_device *dev)
5675 {
5676 int err;
5677
5678 rtnl_lock();
5679 err = register_netdevice(dev);
5680 rtnl_unlock();
5681 return err;
5682 }
5683 EXPORT_SYMBOL(register_netdev);
5684
5685 int netdev_refcnt_read(const struct net_device *dev)
5686 {
5687 int i, refcnt = 0;
5688
5689 for_each_possible_cpu(i)
5690 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
5691 return refcnt;
5692 }
5693 EXPORT_SYMBOL(netdev_refcnt_read);
5694
5695 /*
5696 * netdev_wait_allrefs - wait until all references are gone.
5697 *
5698 * This is called when unregistering network devices.
5699 *
5700 * Any protocol or device that holds a reference should register
5701 * for netdevice notification, and cleanup and put back the
5702 * reference if they receive an UNREGISTER event.
5703 * We can get stuck here if buggy protocols don't correctly
5704 * call dev_put.
5705 */
5706 static void netdev_wait_allrefs(struct net_device *dev)
5707 {
5708 unsigned long rebroadcast_time, warning_time;
5709 int refcnt;
5710
5711 linkwatch_forget_dev(dev);
5712
5713 rebroadcast_time = warning_time = jiffies;
5714 refcnt = netdev_refcnt_read(dev);
5715
5716 while (refcnt != 0) {
5717 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
5718 rtnl_lock();
5719
5720 /* Rebroadcast unregister notification */
5721 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
5722 /* don't resend NETDEV_UNREGISTER_BATCH, _BATCH users
5723 * should have already handle it the first time */
5724
5725 if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
5726 &dev->state)) {
5727 /* We must not have linkwatch events
5728 * pending on unregister. If this
5729 * happens, we simply run the queue
5730 * unscheduled, resulting in a noop
5731 * for this device.
5732 */
5733 linkwatch_run_queue();
5734 }
5735
5736 __rtnl_unlock();
5737
5738 rebroadcast_time = jiffies;
5739 }
5740
5741 msleep(250);
5742
5743 refcnt = netdev_refcnt_read(dev);
5744
5745 if (time_after(jiffies, warning_time + 10 * HZ)) {
5746 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
5747 dev->name, refcnt);
5748 warning_time = jiffies;
5749 }
5750 }
5751 }
5752
5753 /* The sequence is:
5754 *
5755 * rtnl_lock();
5756 * ...
5757 * register_netdevice(x1);
5758 * register_netdevice(x2);
5759 * ...
5760 * unregister_netdevice(y1);
5761 * unregister_netdevice(y2);
5762 * ...
5763 * rtnl_unlock();
5764 * free_netdev(y1);
5765 * free_netdev(y2);
5766 *
5767 * We are invoked by rtnl_unlock().
5768 * This allows us to deal with problems:
5769 * 1) We can delete sysfs objects which invoke hotplug
5770 * without deadlocking with linkwatch via keventd.
5771 * 2) Since we run with the RTNL semaphore not held, we can sleep
5772 * safely in order to wait for the netdev refcnt to drop to zero.
5773 *
5774 * We must not return until all unregister events added during
5775 * the interval the lock was held have been completed.
5776 */
5777 void netdev_run_todo(void)
5778 {
5779 struct list_head list;
5780
5781 /* Snapshot list, allow later requests */
5782 list_replace_init(&net_todo_list, &list);
5783
5784 __rtnl_unlock();
5785
5786 /* Wait for rcu callbacks to finish before attempting to drain
5787 * the device list. This usually avoids a 250ms wait.
5788 */
5789 if (!list_empty(&list))
5790 rcu_barrier();
5791
5792 while (!list_empty(&list)) {
5793 struct net_device *dev
5794 = list_first_entry(&list, struct net_device, todo_list);
5795 list_del(&dev->todo_list);
5796
5797 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
5798 pr_err("network todo '%s' but state %d\n",
5799 dev->name, dev->reg_state);
5800 dump_stack();
5801 continue;
5802 }
5803
5804 dev->reg_state = NETREG_UNREGISTERED;
5805
5806 on_each_cpu(flush_backlog, dev, 1);
5807
5808 netdev_wait_allrefs(dev);
5809
5810 /* paranoia */
5811 BUG_ON(netdev_refcnt_read(dev));
5812 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5813 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5814 WARN_ON(dev->dn_ptr);
5815
5816 if (dev->destructor)
5817 dev->destructor(dev);
5818
5819 /* Free network device */
5820 kobject_put(&dev->dev.kobj);
5821 }
5822 }
5823
5824 /* Convert net_device_stats to rtnl_link_stats64. They have the same
5825 * fields in the same order, with only the type differing.
5826 */
5827 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
5828 const struct net_device_stats *netdev_stats)
5829 {
5830 #if BITS_PER_LONG == 64
5831 BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
5832 memcpy(stats64, netdev_stats, sizeof(*stats64));
5833 #else
5834 size_t i, n = sizeof(*stats64) / sizeof(u64);
5835 const unsigned long *src = (const unsigned long *)netdev_stats;
5836 u64 *dst = (u64 *)stats64;
5837
5838 BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
5839 sizeof(*stats64) / sizeof(u64));
5840 for (i = 0; i < n; i++)
5841 dst[i] = src[i];
5842 #endif
5843 }
5844 EXPORT_SYMBOL(netdev_stats_to_stats64);
5845
5846 /**
5847 * dev_get_stats - get network device statistics
5848 * @dev: device to get statistics from
5849 * @storage: place to store stats
5850 *
5851 * Get network statistics from device. Return @storage.
5852 * The device driver may provide its own method by setting
5853 * dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
5854 * otherwise the internal statistics structure is used.
5855 */
5856 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
5857 struct rtnl_link_stats64 *storage)
5858 {
5859 const struct net_device_ops *ops = dev->netdev_ops;
5860
5861 if (ops->ndo_get_stats64) {
5862 memset(storage, 0, sizeof(*storage));
5863 ops->ndo_get_stats64(dev, storage);
5864 } else if (ops->ndo_get_stats) {
5865 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
5866 } else {
5867 netdev_stats_to_stats64(storage, &dev->stats);
5868 }
5869 storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
5870 return storage;
5871 }
5872 EXPORT_SYMBOL(dev_get_stats);
5873
5874 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5875 {
5876 struct netdev_queue *queue = dev_ingress_queue(dev);
5877
5878 #ifdef CONFIG_NET_CLS_ACT
5879 if (queue)
5880 return queue;
5881 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
5882 if (!queue)
5883 return NULL;
5884 netdev_init_one_queue(dev, queue, NULL);
5885 queue->qdisc = &noop_qdisc;
5886 queue->qdisc_sleeping = &noop_qdisc;
5887 rcu_assign_pointer(dev->ingress_queue, queue);
5888 #endif
5889 return queue;
5890 }
5891
5892 /**
5893 * alloc_netdev_mqs - allocate network device
5894 * @sizeof_priv: size of private data to allocate space for
5895 * @name: device name format string
5896 * @setup: callback to initialize device
5897 * @txqs: the number of TX subqueues to allocate
5898 * @rxqs: the number of RX subqueues to allocate
5899 *
5900 * Allocates a struct net_device with private data area for driver use
5901 * and performs basic initialization. Also allocates subquue structs
5902 * for each queue on the device.
5903 */
5904 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5905 void (*setup)(struct net_device *),
5906 unsigned int txqs, unsigned int rxqs)
5907 {
5908 struct net_device *dev;
5909 size_t alloc_size;
5910 struct net_device *p;
5911
5912 BUG_ON(strlen(name) >= sizeof(dev->name));
5913
5914 if (txqs < 1) {
5915 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
5916 return NULL;
5917 }
5918
5919 #ifdef CONFIG_RPS
5920 if (rxqs < 1) {
5921 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
5922 return NULL;
5923 }
5924 #endif
5925
5926 alloc_size = sizeof(struct net_device);
5927 if (sizeof_priv) {
5928 /* ensure 32-byte alignment of private area */
5929 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
5930 alloc_size += sizeof_priv;
5931 }
5932 /* ensure 32-byte alignment of whole construct */
5933 alloc_size += NETDEV_ALIGN - 1;
5934
5935 p = kzalloc(alloc_size, GFP_KERNEL);
5936 if (!p) {
5937 pr_err("alloc_netdev: Unable to allocate device\n");
5938 return NULL;
5939 }
5940
5941 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5942 dev->padded = (char *)dev - (char *)p;
5943
5944 dev->pcpu_refcnt = alloc_percpu(int);
5945 if (!dev->pcpu_refcnt)
5946 goto free_p;
5947
5948 if (dev_addr_init(dev))
5949 goto free_pcpu;
5950
5951 dev_mc_init(dev);
5952 dev_uc_init(dev);
5953
5954 dev_net_set(dev, &init_net);
5955
5956 dev->gso_max_size = GSO_MAX_SIZE;
5957
5958 INIT_LIST_HEAD(&dev->napi_list);
5959 INIT_LIST_HEAD(&dev->unreg_list);
5960 INIT_LIST_HEAD(&dev->link_watch_list);
5961 dev->priv_flags = IFF_XMIT_DST_RELEASE;
5962 setup(dev);
5963
5964 dev->num_tx_queues = txqs;
5965 dev->real_num_tx_queues = txqs;
5966 if (netif_alloc_netdev_queues(dev))
5967 goto free_all;
5968
5969 #ifdef CONFIG_RPS
5970 dev->num_rx_queues = rxqs;
5971 dev->real_num_rx_queues = rxqs;
5972 if (netif_alloc_rx_queues(dev))
5973 goto free_all;
5974 #endif
5975
5976 strcpy(dev->name, name);
5977 dev->group = INIT_NETDEV_GROUP;
5978 return dev;
5979
5980 free_all:
5981 free_netdev(dev);
5982 return NULL;
5983
5984 free_pcpu:
5985 free_percpu(dev->pcpu_refcnt);
5986 kfree(dev->_tx);
5987 #ifdef CONFIG_RPS
5988 kfree(dev->_rx);
5989 #endif
5990
5991 free_p:
5992 kfree(p);
5993 return NULL;
5994 }
5995 EXPORT_SYMBOL(alloc_netdev_mqs);
5996
5997 /**
5998 * free_netdev - free network device
5999 * @dev: device
6000 *
6001 * This function does the last stage of destroying an allocated device
6002 * interface. The reference to the device object is released.
6003 * If this is the last reference then it will be freed.
6004 */
6005 void free_netdev(struct net_device *dev)
6006 {
6007 struct napi_struct *p, *n;
6008
6009 release_net(dev_net(dev));
6010
6011 kfree(dev->_tx);
6012 #ifdef CONFIG_RPS
6013 kfree(dev->_rx);
6014 #endif
6015
6016 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6017
6018 /* Flush device addresses */
6019 dev_addr_flush(dev);
6020
6021 list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6022 netif_napi_del(p);
6023
6024 free_percpu(dev->pcpu_refcnt);
6025 dev->pcpu_refcnt = NULL;
6026
6027 /* Compatibility with error handling in drivers */
6028 if (dev->reg_state == NETREG_UNINITIALIZED) {
6029 kfree((char *)dev - dev->padded);
6030 return;
6031 }
6032
6033 BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6034 dev->reg_state = NETREG_RELEASED;
6035
6036 /* will free via device release */
6037 put_device(&dev->dev);
6038 }
6039 EXPORT_SYMBOL(free_netdev);
6040
6041 /**
6042 * synchronize_net - Synchronize with packet receive processing
6043 *
6044 * Wait for packets currently being received to be done.
6045 * Does not block later packets from starting.
6046 */
6047 void synchronize_net(void)
6048 {
6049 might_sleep();
6050 if (rtnl_is_locked())
6051 synchronize_rcu_expedited();
6052 else
6053 synchronize_rcu();
6054 }
6055 EXPORT_SYMBOL(synchronize_net);
6056
6057 /**
6058 * unregister_netdevice_queue - remove device from the kernel
6059 * @dev: device
6060 * @head: list
6061 *
6062 * This function shuts down a device interface and removes it
6063 * from the kernel tables.
6064 * If head not NULL, device is queued to be unregistered later.
6065 *
6066 * Callers must hold the rtnl semaphore. You may want
6067 * unregister_netdev() instead of this.
6068 */
6069
6070 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6071 {
6072 ASSERT_RTNL();
6073
6074 if (head) {
6075 list_move_tail(&dev->unreg_list, head);
6076 } else {
6077 rollback_registered(dev);
6078 /* Finish processing unregister after unlock */
6079 net_set_todo(dev);
6080 }
6081 }
6082 EXPORT_SYMBOL(unregister_netdevice_queue);
6083
6084 /**
6085 * unregister_netdevice_many - unregister many devices
6086 * @head: list of devices
6087 */
6088 void unregister_netdevice_many(struct list_head *head)
6089 {
6090 struct net_device *dev;
6091
6092 if (!list_empty(head)) {
6093 rollback_registered_many(head);
6094 list_for_each_entry(dev, head, unreg_list)
6095 net_set_todo(dev);
6096 }
6097 }
6098 EXPORT_SYMBOL(unregister_netdevice_many);
6099
6100 /**
6101 * unregister_netdev - remove device from the kernel
6102 * @dev: device
6103 *
6104 * This function shuts down a device interface and removes it
6105 * from the kernel tables.
6106 *
6107 * This is just a wrapper for unregister_netdevice that takes
6108 * the rtnl semaphore. In general you want to use this and not
6109 * unregister_netdevice.
6110 */
6111 void unregister_netdev(struct net_device *dev)
6112 {
6113 rtnl_lock();
6114 unregister_netdevice(dev);
6115 rtnl_unlock();
6116 }
6117 EXPORT_SYMBOL(unregister_netdev);
6118
6119 /**
6120 * dev_change_net_namespace - move device to different nethost namespace
6121 * @dev: device
6122 * @net: network namespace
6123 * @pat: If not NULL name pattern to try if the current device name
6124 * is already taken in the destination network namespace.
6125 *
6126 * This function shuts down a device interface and moves it
6127 * to a new network namespace. On success 0 is returned, on
6128 * a failure a netagive errno code is returned.
6129 *
6130 * Callers must hold the rtnl semaphore.
6131 */
6132
6133 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
6134 {
6135 int err;
6136
6137 ASSERT_RTNL();
6138
6139 /* Don't allow namespace local devices to be moved. */
6140 err = -EINVAL;
6141 if (dev->features & NETIF_F_NETNS_LOCAL)
6142 goto out;
6143
6144 /* Ensure the device has been registrered */
6145 err = -EINVAL;
6146 if (dev->reg_state != NETREG_REGISTERED)
6147 goto out;
6148
6149 /* Get out if there is nothing todo */
6150 err = 0;
6151 if (net_eq(dev_net(dev), net))
6152 goto out;
6153
6154 /* Pick the destination device name, and ensure
6155 * we can use it in the destination network namespace.
6156 */
6157 err = -EEXIST;
6158 if (__dev_get_by_name(net, dev->name)) {
6159 /* We get here if we can't use the current device name */
6160 if (!pat)
6161 goto out;
6162 if (dev_get_valid_name(dev, pat) < 0)
6163 goto out;
6164 }
6165
6166 /*
6167 * And now a mini version of register_netdevice unregister_netdevice.
6168 */
6169
6170 /* If device is running close it first. */
6171 dev_close(dev);
6172
6173 /* And unlink it from device chain */
6174 err = -ENODEV;
6175 unlist_netdevice(dev);
6176
6177 synchronize_net();
6178
6179 /* Shutdown queueing discipline. */
6180 dev_shutdown(dev);
6181
6182 /* Notify protocols, that we are about to destroy
6183 this device. They should clean all the things.
6184
6185 Note that dev->reg_state stays at NETREG_REGISTERED.
6186 This is wanted because this way 8021q and macvlan know
6187 the device is just moving and can keep their slaves up.
6188 */
6189 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6190 call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
6191 rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
6192
6193 /*
6194 * Flush the unicast and multicast chains
6195 */
6196 dev_uc_flush(dev);
6197 dev_mc_flush(dev);
6198
6199 /* Actually switch the network namespace */
6200 dev_net_set(dev, net);
6201
6202 /* If there is an ifindex conflict assign a new one */
6203 if (__dev_get_by_index(net, dev->ifindex)) {
6204 int iflink = (dev->iflink == dev->ifindex);
6205 dev->ifindex = dev_new_index(net);
6206 if (iflink)
6207 dev->iflink = dev->ifindex;
6208 }
6209
6210 /* Fixup kobjects */
6211 err = device_rename(&dev->dev, dev->name);
6212 WARN_ON(err);
6213
6214 /* Add the device back in the hashes */
6215 list_netdevice(dev);
6216
6217 /* Notify protocols, that a new device appeared. */
6218 call_netdevice_notifiers(NETDEV_REGISTER, dev);
6219
6220 /*
6221 * Prevent userspace races by waiting until the network
6222 * device is fully setup before sending notifications.
6223 */
6224 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
6225
6226 synchronize_net();
6227 err = 0;
6228 out:
6229 return err;
6230 }
6231 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
6232
6233 static int dev_cpu_callback(struct notifier_block *nfb,
6234 unsigned long action,
6235 void *ocpu)
6236 {
6237 struct sk_buff **list_skb;
6238 struct sk_buff *skb;
6239 unsigned int cpu, oldcpu = (unsigned long)ocpu;
6240 struct softnet_data *sd, *oldsd;
6241
6242 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
6243 return NOTIFY_OK;
6244
6245 local_irq_disable();
6246 cpu = smp_processor_id();
6247 sd = &per_cpu(softnet_data, cpu);
6248 oldsd = &per_cpu(softnet_data, oldcpu);
6249
6250 /* Find end of our completion_queue. */
6251 list_skb = &sd->completion_queue;
6252 while (*list_skb)
6253 list_skb = &(*list_skb)->next;
6254 /* Append completion queue from offline CPU. */
6255 *list_skb = oldsd->completion_queue;
6256 oldsd->completion_queue = NULL;
6257
6258 /* Append output queue from offline CPU. */
6259 if (oldsd->output_queue) {
6260 *sd->output_queue_tailp = oldsd->output_queue;
6261 sd->output_queue_tailp = oldsd->output_queue_tailp;
6262 oldsd->output_queue = NULL;
6263 oldsd->output_queue_tailp = &oldsd->output_queue;
6264 }
6265 /* Append NAPI poll list from offline CPU. */
6266 if (!list_empty(&oldsd->poll_list)) {
6267 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6268 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6269 }
6270
6271 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6272 local_irq_enable();
6273
6274 /* Process offline CPU's input_pkt_queue */
6275 while ((skb = __skb_dequeue(&oldsd->process_queue))) {
6276 netif_rx(skb);
6277 input_queue_head_incr(oldsd);
6278 }
6279 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
6280 netif_rx(skb);
6281 input_queue_head_incr(oldsd);
6282 }
6283
6284 return NOTIFY_OK;
6285 }
6286
6287
6288 /**
6289 * netdev_increment_features - increment feature set by one
6290 * @all: current feature set
6291 * @one: new feature set
6292 * @mask: mask feature set
6293 *
6294 * Computes a new feature set after adding a device with feature set
6295 * @one to the master device with current feature set @all. Will not
6296 * enable anything that is off in @mask. Returns the new feature set.
6297 */
6298 netdev_features_t netdev_increment_features(netdev_features_t all,
6299 netdev_features_t one, netdev_features_t mask)
6300 {
6301 if (mask & NETIF_F_GEN_CSUM)
6302 mask |= NETIF_F_ALL_CSUM;
6303 mask |= NETIF_F_VLAN_CHALLENGED;
6304
6305 all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
6306 all &= one | ~NETIF_F_ALL_FOR_ALL;
6307
6308 /* If one device supports hw checksumming, set for all. */
6309 if (all & NETIF_F_GEN_CSUM)
6310 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
6311
6312 return all;
6313 }
6314 EXPORT_SYMBOL(netdev_increment_features);
6315
6316 static struct hlist_head *netdev_create_hash(void)
6317 {
6318 int i;
6319 struct hlist_head *hash;
6320
6321 hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
6322 if (hash != NULL)
6323 for (i = 0; i < NETDEV_HASHENTRIES; i++)
6324 INIT_HLIST_HEAD(&hash[i]);
6325
6326 return hash;
6327 }
6328
6329 /* Initialize per network namespace state */
6330 static int __net_init netdev_init(struct net *net)
6331 {
6332 INIT_LIST_HEAD(&net->dev_base_head);
6333
6334 net->dev_name_head = netdev_create_hash();
6335 if (net->dev_name_head == NULL)
6336 goto err_name;
6337
6338 net->dev_index_head = netdev_create_hash();
6339 if (net->dev_index_head == NULL)
6340 goto err_idx;
6341
6342 return 0;
6343
6344 err_idx:
6345 kfree(net->dev_name_head);
6346 err_name:
6347 return -ENOMEM;
6348 }
6349
6350 /**
6351 * netdev_drivername - network driver for the device
6352 * @dev: network device
6353 *
6354 * Determine network driver for device.
6355 */
6356 const char *netdev_drivername(const struct net_device *dev)
6357 {
6358 const struct device_driver *driver;
6359 const struct device *parent;
6360 const char *empty = "";
6361
6362 parent = dev->dev.parent;
6363 if (!parent)
6364 return empty;
6365
6366 driver = parent->driver;
6367 if (driver && driver->name)
6368 return driver->name;
6369 return empty;
6370 }
6371
6372 int __netdev_printk(const char *level, const struct net_device *dev,
6373 struct va_format *vaf)
6374 {
6375 int r;
6376
6377 if (dev && dev->dev.parent)
6378 r = dev_printk(level, dev->dev.parent, "%s: %pV",
6379 netdev_name(dev), vaf);
6380 else if (dev)
6381 r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
6382 else
6383 r = printk("%s(NULL net_device): %pV", level, vaf);
6384
6385 return r;
6386 }
6387 EXPORT_SYMBOL(__netdev_printk);
6388
6389 int netdev_printk(const char *level, const struct net_device *dev,
6390 const char *format, ...)
6391 {
6392 struct va_format vaf;
6393 va_list args;
6394 int r;
6395
6396 va_start(args, format);
6397
6398 vaf.fmt = format;
6399 vaf.va = &args;
6400
6401 r = __netdev_printk(level, dev, &vaf);
6402 va_end(args);
6403
6404 return r;
6405 }
6406 EXPORT_SYMBOL(netdev_printk);
6407
6408 #define define_netdev_printk_level(func, level) \
6409 int func(const struct net_device *dev, const char *fmt, ...) \
6410 { \
6411 int r; \
6412 struct va_format vaf; \
6413 va_list args; \
6414 \
6415 va_start(args, fmt); \
6416 \
6417 vaf.fmt = fmt; \
6418 vaf.va = &args; \
6419 \
6420 r = __netdev_printk(level, dev, &vaf); \
6421 va_end(args); \
6422 \
6423 return r; \
6424 } \
6425 EXPORT_SYMBOL(func);
6426
6427 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
6428 define_netdev_printk_level(netdev_alert, KERN_ALERT);
6429 define_netdev_printk_level(netdev_crit, KERN_CRIT);
6430 define_netdev_printk_level(netdev_err, KERN_ERR);
6431 define_netdev_printk_level(netdev_warn, KERN_WARNING);
6432 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
6433 define_netdev_printk_level(netdev_info, KERN_INFO);
6434
6435 static void __net_exit netdev_exit(struct net *net)
6436 {
6437 kfree(net->dev_name_head);
6438 kfree(net->dev_index_head);
6439 }
6440
6441 static struct pernet_operations __net_initdata netdev_net_ops = {
6442 .init = netdev_init,
6443 .exit = netdev_exit,
6444 };
6445
6446 static void __net_exit default_device_exit(struct net *net)
6447 {
6448 struct net_device *dev, *aux;
6449 /*
6450 * Push all migratable network devices back to the
6451 * initial network namespace
6452 */
6453 rtnl_lock();
6454 for_each_netdev_safe(net, dev, aux) {
6455 int err;
6456 char fb_name[IFNAMSIZ];
6457
6458 /* Ignore unmoveable devices (i.e. loopback) */
6459 if (dev->features & NETIF_F_NETNS_LOCAL)
6460 continue;
6461
6462 /* Leave virtual devices for the generic cleanup */
6463 if (dev->rtnl_link_ops)
6464 continue;
6465
6466 /* Push remaining network devices to init_net */
6467 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
6468 err = dev_change_net_namespace(dev, &init_net, fb_name);
6469 if (err) {
6470 pr_emerg("%s: failed to move %s to init_net: %d\n",
6471 __func__, dev->name, err);
6472 BUG();
6473 }
6474 }
6475 rtnl_unlock();
6476 }
6477
6478 static void __net_exit default_device_exit_batch(struct list_head *net_list)
6479 {
6480 /* At exit all network devices most be removed from a network
6481 * namespace. Do this in the reverse order of registration.
6482 * Do this across as many network namespaces as possible to
6483 * improve batching efficiency.
6484 */
6485 struct net_device *dev;
6486 struct net *net;
6487 LIST_HEAD(dev_kill_list);
6488
6489 rtnl_lock();
6490 list_for_each_entry(net, net_list, exit_list) {
6491 for_each_netdev_reverse(net, dev) {
6492 if (dev->rtnl_link_ops)
6493 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
6494 else
6495 unregister_netdevice_queue(dev, &dev_kill_list);
6496 }
6497 }
6498 unregister_netdevice_many(&dev_kill_list);
6499 list_del(&dev_kill_list);
6500 rtnl_unlock();
6501 }
6502
6503 static struct pernet_operations __net_initdata default_device_ops = {
6504 .exit = default_device_exit,
6505 .exit_batch = default_device_exit_batch,
6506 };
6507
6508 /*
6509 * Initialize the DEV module. At boot time this walks the device list and
6510 * unhooks any devices that fail to initialise (normally hardware not
6511 * present) and leaves us with a valid list of present and active devices.
6512 *
6513 */
6514
6515 /*
6516 * This is called single threaded during boot, so no need
6517 * to take the rtnl semaphore.
6518 */
6519 static int __init net_dev_init(void)
6520 {
6521 int i, rc = -ENOMEM;
6522
6523 BUG_ON(!dev_boot_phase);
6524
6525 if (dev_proc_init())
6526 goto out;
6527
6528 if (netdev_kobject_init())
6529 goto out;
6530
6531 INIT_LIST_HEAD(&ptype_all);
6532 for (i = 0; i < PTYPE_HASH_SIZE; i++)
6533 INIT_LIST_HEAD(&ptype_base[i]);
6534
6535 if (register_pernet_subsys(&netdev_net_ops))
6536 goto out;
6537
6538 /*
6539 * Initialise the packet receive queues.
6540 */
6541
6542 for_each_possible_cpu(i) {
6543 struct softnet_data *sd = &per_cpu(softnet_data, i);
6544
6545 memset(sd, 0, sizeof(*sd));
6546 skb_queue_head_init(&sd->input_pkt_queue);
6547 skb_queue_head_init(&sd->process_queue);
6548 sd->completion_queue = NULL;
6549 INIT_LIST_HEAD(&sd->poll_list);
6550 sd->output_queue = NULL;
6551 sd->output_queue_tailp = &sd->output_queue;
6552 #ifdef CONFIG_RPS
6553 sd->csd.func = rps_trigger_softirq;
6554 sd->csd.info = sd;
6555 sd->csd.flags = 0;
6556 sd->cpu = i;
6557 #endif
6558
6559 sd->backlog.poll = process_backlog;
6560 sd->backlog.weight = weight_p;
6561 sd->backlog.gro_list = NULL;
6562 sd->backlog.gro_count = 0;
6563 }
6564
6565 dev_boot_phase = 0;
6566
6567 /* The loopback device is special if any other network devices
6568 * is present in a network namespace the loopback device must
6569 * be present. Since we now dynamically allocate and free the
6570 * loopback device ensure this invariant is maintained by
6571 * keeping the loopback device as the first device on the
6572 * list of network devices. Ensuring the loopback devices
6573 * is the first device that appears and the last network device
6574 * that disappears.
6575 */
6576 if (register_pernet_device(&loopback_net_ops))
6577 goto out;
6578
6579 if (register_pernet_device(&default_device_ops))
6580 goto out;
6581
6582 open_softirq(NET_TX_SOFTIRQ, net_tx_action);
6583 open_softirq(NET_RX_SOFTIRQ, net_rx_action);
6584
6585 hotcpu_notifier(dev_cpu_callback, 0);
6586 dst_init();
6587 dev_mcast_init();
6588 rc = 0;
6589 out:
6590 return rc;
6591 }
6592
6593 subsys_initcall(net_dev_init);
6594
6595 static int __init initialize_hashrnd(void)
6596 {
6597 get_random_bytes(&hashrnd, sizeof(hashrnd));
6598 return 0;
6599 }
6600
6601 late_initcall_sync(initialize_hashrnd);
6602
This page took 0.283028 seconds and 5 git commands to generate.