Merge branch 'ttm-next-3.13' of git://people.freedesktop.org/~thomash/linux into...
[deliverable/linux.git] / drivers / net / bonding / bond_main.c
1 /*
2 * originally based on the dummy device.
3 *
4 * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
5 * Licensed under the GPL. Based on dummy.c, and eql.c devices.
6 *
7 * bonding.c: an Ethernet Bonding driver
8 *
9 * This is useful to talk to a Cisco EtherChannel compatible equipment:
10 * Cisco 5500
11 * Sun Trunking (Solaris)
12 * Alteon AceDirector Trunks
13 * Linux Bonding
14 * and probably many L2 switches ...
15 *
16 * How it works:
17 * ifconfig bond0 ipaddress netmask up
18 * will setup a network device, with an ip address. No mac address
19 * will be assigned at this time. The hw mac address will come from
20 * the first slave bonded to the channel. All slaves will then use
21 * this hw mac address.
22 *
23 * ifconfig bond0 down
24 * will release all slaves, marking them as down.
25 *
26 * ifenslave bond0 eth0
27 * will attach eth0 to bond0 as a slave. eth0 hw mac address will either
28 * a: be used as initial mac address
29 * b: if a hw mac address already is there, eth0's hw mac address
30 * will then be set from bond0.
31 *
32 */
33
34 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
36 #include <linux/kernel.h>
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/fcntl.h>
40 #include <linux/interrupt.h>
41 #include <linux/ptrace.h>
42 #include <linux/ioport.h>
43 #include <linux/in.h>
44 #include <net/ip.h>
45 #include <linux/ip.h>
46 #include <linux/tcp.h>
47 #include <linux/udp.h>
48 #include <linux/slab.h>
49 #include <linux/string.h>
50 #include <linux/init.h>
51 #include <linux/timer.h>
52 #include <linux/socket.h>
53 #include <linux/ctype.h>
54 #include <linux/inet.h>
55 #include <linux/bitops.h>
56 #include <linux/io.h>
57 #include <asm/dma.h>
58 #include <linux/uaccess.h>
59 #include <linux/errno.h>
60 #include <linux/netdevice.h>
61 #include <linux/inetdevice.h>
62 #include <linux/igmp.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <net/sock.h>
66 #include <linux/rtnetlink.h>
67 #include <linux/smp.h>
68 #include <linux/if_ether.h>
69 #include <net/arp.h>
70 #include <linux/mii.h>
71 #include <linux/ethtool.h>
72 #include <linux/if_vlan.h>
73 #include <linux/if_bonding.h>
74 #include <linux/jiffies.h>
75 #include <linux/preempt.h>
76 #include <net/route.h>
77 #include <net/net_namespace.h>
78 #include <net/netns/generic.h>
79 #include <net/pkt_sched.h>
80 #include <linux/rculist.h>
81 #include "bonding.h"
82 #include "bond_3ad.h"
83 #include "bond_alb.h"
84
85 /*---------------------------- Module parameters ----------------------------*/
86
87 /* monitor all links that often (in milliseconds). <=0 disables monitoring */
88 #define BOND_LINK_MON_INTERV 0
89 #define BOND_LINK_ARP_INTERV 0
90
91 static int max_bonds = BOND_DEFAULT_MAX_BONDS;
92 static int tx_queues = BOND_DEFAULT_TX_QUEUES;
93 static int num_peer_notif = 1;
94 static int miimon = BOND_LINK_MON_INTERV;
95 static int updelay;
96 static int downdelay;
97 static int use_carrier = 1;
98 static char *mode;
99 static char *primary;
100 static char *primary_reselect;
101 static char *lacp_rate;
102 static int min_links;
103 static char *ad_select;
104 static char *xmit_hash_policy;
105 static int arp_interval = BOND_LINK_ARP_INTERV;
106 static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
107 static char *arp_validate;
108 static char *arp_all_targets;
109 static char *fail_over_mac;
110 static int all_slaves_active;
111 static struct bond_params bonding_defaults;
112 static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
113
114 module_param(max_bonds, int, 0);
115 MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
116 module_param(tx_queues, int, 0);
117 MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
118 module_param_named(num_grat_arp, num_peer_notif, int, 0644);
119 MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
120 "failover event (alias of num_unsol_na)");
121 module_param_named(num_unsol_na, num_peer_notif, int, 0644);
122 MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
123 "failover event (alias of num_grat_arp)");
124 module_param(miimon, int, 0);
125 MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
126 module_param(updelay, int, 0);
127 MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
128 module_param(downdelay, int, 0);
129 MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
130 "in milliseconds");
131 module_param(use_carrier, int, 0);
132 MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
133 "0 for off, 1 for on (default)");
134 module_param(mode, charp, 0);
135 MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
136 "1 for active-backup, 2 for balance-xor, "
137 "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
138 "6 for balance-alb");
139 module_param(primary, charp, 0);
140 MODULE_PARM_DESC(primary, "Primary network device to use");
141 module_param(primary_reselect, charp, 0);
142 MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
143 "once it comes up; "
144 "0 for always (default), "
145 "1 for only if speed of primary is "
146 "better, "
147 "2 for only on active slave "
148 "failure");
149 module_param(lacp_rate, charp, 0);
150 MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
151 "0 for slow, 1 for fast");
152 module_param(ad_select, charp, 0);
153 MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; "
154 "0 for stable (default), 1 for bandwidth, "
155 "2 for count");
156 module_param(min_links, int, 0);
157 MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
158
159 module_param(xmit_hash_policy, charp, 0);
160 MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; "
161 "0 for layer 2 (default), 1 for layer 3+4, "
162 "2 for layer 2+3");
163 module_param(arp_interval, int, 0);
164 MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
165 module_param_array(arp_ip_target, charp, NULL, 0);
166 MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
167 module_param(arp_validate, charp, 0);
168 MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
169 "0 for none (default), 1 for active, "
170 "2 for backup, 3 for all");
171 module_param(arp_all_targets, charp, 0);
172 MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
173 module_param(fail_over_mac, charp, 0);
174 MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
175 "the same MAC; 0 for none (default), "
176 "1 for active, 2 for follow");
177 module_param(all_slaves_active, int, 0);
178 MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface"
179 "by setting active flag for all slaves; "
180 "0 for never (default), 1 for always.");
181 module_param(resend_igmp, int, 0);
182 MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
183 "link failure");
184
185 /*----------------------------- Global variables ----------------------------*/
186
187 #ifdef CONFIG_NET_POLL_CONTROLLER
188 atomic_t netpoll_block_tx = ATOMIC_INIT(0);
189 #endif
190
191 int bond_net_id __read_mostly;
192
193 static __be32 arp_target[BOND_MAX_ARP_TARGETS];
194 static int arp_ip_count;
195 static int bond_mode = BOND_MODE_ROUNDROBIN;
196 static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
197 static int lacp_fast;
198
199 const struct bond_parm_tbl bond_lacp_tbl[] = {
200 { "slow", AD_LACP_SLOW},
201 { "fast", AD_LACP_FAST},
202 { NULL, -1},
203 };
204
205 const struct bond_parm_tbl bond_mode_tbl[] = {
206 { "balance-rr", BOND_MODE_ROUNDROBIN},
207 { "active-backup", BOND_MODE_ACTIVEBACKUP},
208 { "balance-xor", BOND_MODE_XOR},
209 { "broadcast", BOND_MODE_BROADCAST},
210 { "802.3ad", BOND_MODE_8023AD},
211 { "balance-tlb", BOND_MODE_TLB},
212 { "balance-alb", BOND_MODE_ALB},
213 { NULL, -1},
214 };
215
216 const struct bond_parm_tbl xmit_hashtype_tbl[] = {
217 { "layer2", BOND_XMIT_POLICY_LAYER2},
218 { "layer3+4", BOND_XMIT_POLICY_LAYER34},
219 { "layer2+3", BOND_XMIT_POLICY_LAYER23},
220 { NULL, -1},
221 };
222
223 const struct bond_parm_tbl arp_all_targets_tbl[] = {
224 { "any", BOND_ARP_TARGETS_ANY},
225 { "all", BOND_ARP_TARGETS_ALL},
226 { NULL, -1},
227 };
228
229 const struct bond_parm_tbl arp_validate_tbl[] = {
230 { "none", BOND_ARP_VALIDATE_NONE},
231 { "active", BOND_ARP_VALIDATE_ACTIVE},
232 { "backup", BOND_ARP_VALIDATE_BACKUP},
233 { "all", BOND_ARP_VALIDATE_ALL},
234 { NULL, -1},
235 };
236
237 const struct bond_parm_tbl fail_over_mac_tbl[] = {
238 { "none", BOND_FOM_NONE},
239 { "active", BOND_FOM_ACTIVE},
240 { "follow", BOND_FOM_FOLLOW},
241 { NULL, -1},
242 };
243
244 const struct bond_parm_tbl pri_reselect_tbl[] = {
245 { "always", BOND_PRI_RESELECT_ALWAYS},
246 { "better", BOND_PRI_RESELECT_BETTER},
247 { "failure", BOND_PRI_RESELECT_FAILURE},
248 { NULL, -1},
249 };
250
251 struct bond_parm_tbl ad_select_tbl[] = {
252 { "stable", BOND_AD_STABLE},
253 { "bandwidth", BOND_AD_BANDWIDTH},
254 { "count", BOND_AD_COUNT},
255 { NULL, -1},
256 };
257
258 /*-------------------------- Forward declarations ---------------------------*/
259
260 static int bond_init(struct net_device *bond_dev);
261 static void bond_uninit(struct net_device *bond_dev);
262
263 /*---------------------------- General routines -----------------------------*/
264
265 const char *bond_mode_name(int mode)
266 {
267 static const char *names[] = {
268 [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
269 [BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
270 [BOND_MODE_XOR] = "load balancing (xor)",
271 [BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
272 [BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
273 [BOND_MODE_TLB] = "transmit load balancing",
274 [BOND_MODE_ALB] = "adaptive load balancing",
275 };
276
277 if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
278 return "unknown";
279
280 return names[mode];
281 }
282
283 /*---------------------------------- VLAN -----------------------------------*/
284
285 /**
286 * bond_dev_queue_xmit - Prepare skb for xmit.
287 *
288 * @bond: bond device that got this skb for tx.
289 * @skb: hw accel VLAN tagged skb to transmit
290 * @slave_dev: slave that is supposed to xmit this skbuff
291 */
292 int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
293 struct net_device *slave_dev)
294 {
295 skb->dev = slave_dev;
296
297 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
298 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
299 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
300
301 if (unlikely(netpoll_tx_running(bond->dev)))
302 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
303 else
304 dev_queue_xmit(skb);
305
306 return 0;
307 }
308
309 /*
310 * In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
311 * We don't protect the slave list iteration with a lock because:
312 * a. This operation is performed in IOCTL context,
313 * b. The operation is protected by the RTNL semaphore in the 8021q code,
314 * c. Holding a lock with BH disabled while directly calling a base driver
315 * entry point is generally a BAD idea.
316 *
317 * The design of synchronization/protection for this operation in the 8021q
318 * module is good for one or more VLAN devices over a single physical device
319 * and cannot be extended for a teaming solution like bonding, so there is a
320 * potential race condition here where a net device from the vlan group might
321 * be referenced (either by a base driver or the 8021q code) while it is being
322 * removed from the system. However, it turns out we're not making matters
323 * worse, and if it works for regular VLAN usage it will work here too.
324 */
325
326 /**
327 * bond_vlan_rx_add_vid - Propagates adding an id to slaves
328 * @bond_dev: bonding net device that got called
329 * @vid: vlan id being added
330 */
331 static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
332 __be16 proto, u16 vid)
333 {
334 struct bonding *bond = netdev_priv(bond_dev);
335 struct slave *slave;
336 int res;
337
338 bond_for_each_slave(bond, slave) {
339 res = vlan_vid_add(slave->dev, proto, vid);
340 if (res)
341 goto unwind;
342 }
343
344 return 0;
345
346 unwind:
347 /* unwind from the slave that failed */
348 bond_for_each_slave_continue_reverse(bond, slave)
349 vlan_vid_del(slave->dev, proto, vid);
350
351 return res;
352 }
353
354 /**
355 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
356 * @bond_dev: bonding net device that got called
357 * @vid: vlan id being removed
358 */
359 static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
360 __be16 proto, u16 vid)
361 {
362 struct bonding *bond = netdev_priv(bond_dev);
363 struct slave *slave;
364
365 bond_for_each_slave(bond, slave)
366 vlan_vid_del(slave->dev, proto, vid);
367
368 if (bond_is_lb(bond))
369 bond_alb_clear_vlan(bond, vid);
370
371 return 0;
372 }
373
374 /*------------------------------- Link status -------------------------------*/
375
376 /*
377 * Set the carrier state for the master according to the state of its
378 * slaves. If any slaves are up, the master is up. In 802.3ad mode,
379 * do special 802.3ad magic.
380 *
381 * Returns zero if carrier state does not change, nonzero if it does.
382 */
383 static int bond_set_carrier(struct bonding *bond)
384 {
385 struct slave *slave;
386
387 if (list_empty(&bond->slave_list))
388 goto down;
389
390 if (bond->params.mode == BOND_MODE_8023AD)
391 return bond_3ad_set_carrier(bond);
392
393 bond_for_each_slave(bond, slave) {
394 if (slave->link == BOND_LINK_UP) {
395 if (!netif_carrier_ok(bond->dev)) {
396 netif_carrier_on(bond->dev);
397 return 1;
398 }
399 return 0;
400 }
401 }
402
403 down:
404 if (netif_carrier_ok(bond->dev)) {
405 netif_carrier_off(bond->dev);
406 return 1;
407 }
408 return 0;
409 }
410
411 /*
412 * Get link speed and duplex from the slave's base driver
413 * using ethtool. If for some reason the call fails or the
414 * values are invalid, set speed and duplex to -1,
415 * and return.
416 */
417 static void bond_update_speed_duplex(struct slave *slave)
418 {
419 struct net_device *slave_dev = slave->dev;
420 struct ethtool_cmd ecmd;
421 u32 slave_speed;
422 int res;
423
424 slave->speed = SPEED_UNKNOWN;
425 slave->duplex = DUPLEX_UNKNOWN;
426
427 res = __ethtool_get_settings(slave_dev, &ecmd);
428 if (res < 0)
429 return;
430
431 slave_speed = ethtool_cmd_speed(&ecmd);
432 if (slave_speed == 0 || slave_speed == ((__u32) -1))
433 return;
434
435 switch (ecmd.duplex) {
436 case DUPLEX_FULL:
437 case DUPLEX_HALF:
438 break;
439 default:
440 return;
441 }
442
443 slave->speed = slave_speed;
444 slave->duplex = ecmd.duplex;
445
446 return;
447 }
448
449 /*
450 * if <dev> supports MII link status reporting, check its link status.
451 *
452 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
453 * depending upon the setting of the use_carrier parameter.
454 *
455 * Return either BMSR_LSTATUS, meaning that the link is up (or we
456 * can't tell and just pretend it is), or 0, meaning that the link is
457 * down.
458 *
459 * If reporting is non-zero, instead of faking link up, return -1 if
460 * both ETHTOOL and MII ioctls fail (meaning the device does not
461 * support them). If use_carrier is set, return whatever it says.
462 * It'd be nice if there was a good way to tell if a driver supports
463 * netif_carrier, but there really isn't.
464 */
465 static int bond_check_dev_link(struct bonding *bond,
466 struct net_device *slave_dev, int reporting)
467 {
468 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
469 int (*ioctl)(struct net_device *, struct ifreq *, int);
470 struct ifreq ifr;
471 struct mii_ioctl_data *mii;
472
473 if (!reporting && !netif_running(slave_dev))
474 return 0;
475
476 if (bond->params.use_carrier)
477 return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
478
479 /* Try to get link status using Ethtool first. */
480 if (slave_dev->ethtool_ops->get_link)
481 return slave_dev->ethtool_ops->get_link(slave_dev) ?
482 BMSR_LSTATUS : 0;
483
484 /* Ethtool can't be used, fallback to MII ioctls. */
485 ioctl = slave_ops->ndo_do_ioctl;
486 if (ioctl) {
487 /* TODO: set pointer to correct ioctl on a per team member */
488 /* bases to make this more efficient. that is, once */
489 /* we determine the correct ioctl, we will always */
490 /* call it and not the others for that team */
491 /* member. */
492
493 /*
494 * We cannot assume that SIOCGMIIPHY will also read a
495 * register; not all network drivers (e.g., e100)
496 * support that.
497 */
498
499 /* Yes, the mii is overlaid on the ifreq.ifr_ifru */
500 strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
501 mii = if_mii(&ifr);
502 if (IOCTL(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
503 mii->reg_num = MII_BMSR;
504 if (IOCTL(slave_dev, &ifr, SIOCGMIIREG) == 0)
505 return mii->val_out & BMSR_LSTATUS;
506 }
507 }
508
509 /*
510 * If reporting, report that either there's no dev->do_ioctl,
511 * or both SIOCGMIIREG and get_link failed (meaning that we
512 * cannot report link status). If not reporting, pretend
513 * we're ok.
514 */
515 return reporting ? -1 : BMSR_LSTATUS;
516 }
517
518 /*----------------------------- Multicast list ------------------------------*/
519
520 /*
521 * Push the promiscuity flag down to appropriate slaves
522 */
523 static int bond_set_promiscuity(struct bonding *bond, int inc)
524 {
525 int err = 0;
526 if (USES_PRIMARY(bond->params.mode)) {
527 /* write lock already acquired */
528 if (bond->curr_active_slave) {
529 err = dev_set_promiscuity(bond->curr_active_slave->dev,
530 inc);
531 }
532 } else {
533 struct slave *slave;
534
535 bond_for_each_slave(bond, slave) {
536 err = dev_set_promiscuity(slave->dev, inc);
537 if (err)
538 return err;
539 }
540 }
541 return err;
542 }
543
544 /*
545 * Push the allmulti flag down to all slaves
546 */
547 static int bond_set_allmulti(struct bonding *bond, int inc)
548 {
549 int err = 0;
550 if (USES_PRIMARY(bond->params.mode)) {
551 /* write lock already acquired */
552 if (bond->curr_active_slave) {
553 err = dev_set_allmulti(bond->curr_active_slave->dev,
554 inc);
555 }
556 } else {
557 struct slave *slave;
558
559 bond_for_each_slave(bond, slave) {
560 err = dev_set_allmulti(slave->dev, inc);
561 if (err)
562 return err;
563 }
564 }
565 return err;
566 }
567
568 /*
569 * Retrieve the list of registered multicast addresses for the bonding
570 * device and retransmit an IGMP JOIN request to the current active
571 * slave.
572 */
573 static void bond_resend_igmp_join_requests(struct bonding *bond)
574 {
575 if (!rtnl_trylock()) {
576 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
577 return;
578 }
579 call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
580 rtnl_unlock();
581
582 /* We use curr_slave_lock to protect against concurrent access to
583 * igmp_retrans from multiple running instances of this function and
584 * bond_change_active_slave
585 */
586 write_lock_bh(&bond->curr_slave_lock);
587 if (bond->igmp_retrans > 1) {
588 bond->igmp_retrans--;
589 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
590 }
591 write_unlock_bh(&bond->curr_slave_lock);
592 }
593
594 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
595 {
596 struct bonding *bond = container_of(work, struct bonding,
597 mcast_work.work);
598
599 bond_resend_igmp_join_requests(bond);
600 }
601
602 /* Flush bond's hardware addresses from slave
603 */
604 static void bond_hw_addr_flush(struct net_device *bond_dev,
605 struct net_device *slave_dev)
606 {
607 struct bonding *bond = netdev_priv(bond_dev);
608
609 dev_uc_unsync(slave_dev, bond_dev);
610 dev_mc_unsync(slave_dev, bond_dev);
611
612 if (bond->params.mode == BOND_MODE_8023AD) {
613 /* del lacpdu mc addr from mc list */
614 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
615
616 dev_mc_del(slave_dev, lacpdu_multicast);
617 }
618 }
619
620 /*--------------------------- Active slave change ---------------------------*/
621
622 /* Update the hardware address list and promisc/allmulti for the new and
623 * old active slaves (if any). Modes that are !USES_PRIMARY keep all
624 * slaves up date at all times; only the USES_PRIMARY modes need to call
625 * this function to swap these settings during a failover.
626 */
627 static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
628 struct slave *old_active)
629 {
630 ASSERT_RTNL();
631
632 if (old_active) {
633 if (bond->dev->flags & IFF_PROMISC)
634 dev_set_promiscuity(old_active->dev, -1);
635
636 if (bond->dev->flags & IFF_ALLMULTI)
637 dev_set_allmulti(old_active->dev, -1);
638
639 bond_hw_addr_flush(bond->dev, old_active->dev);
640 }
641
642 if (new_active) {
643 /* FIXME: Signal errors upstream. */
644 if (bond->dev->flags & IFF_PROMISC)
645 dev_set_promiscuity(new_active->dev, 1);
646
647 if (bond->dev->flags & IFF_ALLMULTI)
648 dev_set_allmulti(new_active->dev, 1);
649
650 netif_addr_lock_bh(bond->dev);
651 dev_uc_sync(new_active->dev, bond->dev);
652 dev_mc_sync(new_active->dev, bond->dev);
653 netif_addr_unlock_bh(bond->dev);
654 }
655 }
656
657 /**
658 * bond_set_dev_addr - clone slave's address to bond
659 * @bond_dev: bond net device
660 * @slave_dev: slave net device
661 *
662 * Should be called with RTNL held.
663 */
664 static void bond_set_dev_addr(struct net_device *bond_dev,
665 struct net_device *slave_dev)
666 {
667 pr_debug("bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
668 bond_dev, slave_dev, slave_dev->addr_len);
669 memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
670 bond_dev->addr_assign_type = NET_ADDR_STOLEN;
671 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
672 }
673
674 /*
675 * bond_do_fail_over_mac
676 *
677 * Perform special MAC address swapping for fail_over_mac settings
678 *
679 * Called with RTNL, bond->lock for read, curr_slave_lock for write_bh.
680 */
681 static void bond_do_fail_over_mac(struct bonding *bond,
682 struct slave *new_active,
683 struct slave *old_active)
684 __releases(&bond->curr_slave_lock)
685 __releases(&bond->lock)
686 __acquires(&bond->lock)
687 __acquires(&bond->curr_slave_lock)
688 {
689 u8 tmp_mac[ETH_ALEN];
690 struct sockaddr saddr;
691 int rv;
692
693 switch (bond->params.fail_over_mac) {
694 case BOND_FOM_ACTIVE:
695 if (new_active) {
696 write_unlock_bh(&bond->curr_slave_lock);
697 read_unlock(&bond->lock);
698 bond_set_dev_addr(bond->dev, new_active->dev);
699 read_lock(&bond->lock);
700 write_lock_bh(&bond->curr_slave_lock);
701 }
702 break;
703 case BOND_FOM_FOLLOW:
704 /*
705 * if new_active && old_active, swap them
706 * if just old_active, do nothing (going to no active slave)
707 * if just new_active, set new_active to bond's MAC
708 */
709 if (!new_active)
710 return;
711
712 write_unlock_bh(&bond->curr_slave_lock);
713 read_unlock(&bond->lock);
714
715 if (old_active) {
716 memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
717 memcpy(saddr.sa_data, old_active->dev->dev_addr,
718 ETH_ALEN);
719 saddr.sa_family = new_active->dev->type;
720 } else {
721 memcpy(saddr.sa_data, bond->dev->dev_addr, ETH_ALEN);
722 saddr.sa_family = bond->dev->type;
723 }
724
725 rv = dev_set_mac_address(new_active->dev, &saddr);
726 if (rv) {
727 pr_err("%s: Error %d setting MAC of slave %s\n",
728 bond->dev->name, -rv, new_active->dev->name);
729 goto out;
730 }
731
732 if (!old_active)
733 goto out;
734
735 memcpy(saddr.sa_data, tmp_mac, ETH_ALEN);
736 saddr.sa_family = old_active->dev->type;
737
738 rv = dev_set_mac_address(old_active->dev, &saddr);
739 if (rv)
740 pr_err("%s: Error %d setting MAC of slave %s\n",
741 bond->dev->name, -rv, new_active->dev->name);
742 out:
743 read_lock(&bond->lock);
744 write_lock_bh(&bond->curr_slave_lock);
745 break;
746 default:
747 pr_err("%s: bond_do_fail_over_mac impossible: bad policy %d\n",
748 bond->dev->name, bond->params.fail_over_mac);
749 break;
750 }
751
752 }
753
754 static bool bond_should_change_active(struct bonding *bond)
755 {
756 struct slave *prim = bond->primary_slave;
757 struct slave *curr = bond->curr_active_slave;
758
759 if (!prim || !curr || curr->link != BOND_LINK_UP)
760 return true;
761 if (bond->force_primary) {
762 bond->force_primary = false;
763 return true;
764 }
765 if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER &&
766 (prim->speed < curr->speed ||
767 (prim->speed == curr->speed && prim->duplex <= curr->duplex)))
768 return false;
769 if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE)
770 return false;
771 return true;
772 }
773
774 /**
775 * find_best_interface - select the best available slave to be the active one
776 * @bond: our bonding struct
777 *
778 * Warning: Caller must hold curr_slave_lock for writing.
779 */
780 static struct slave *bond_find_best_slave(struct bonding *bond)
781 {
782 struct slave *new_active, *old_active;
783 struct slave *bestslave = NULL;
784 int mintime = bond->params.updelay;
785 int i;
786
787 new_active = bond->curr_active_slave;
788
789 if (!new_active) { /* there were no active slaves left */
790 new_active = bond_first_slave(bond);
791 if (!new_active)
792 return NULL; /* still no slave, return NULL */
793 }
794
795 if ((bond->primary_slave) &&
796 bond->primary_slave->link == BOND_LINK_UP &&
797 bond_should_change_active(bond)) {
798 new_active = bond->primary_slave;
799 }
800
801 /* remember where to stop iterating over the slaves */
802 old_active = new_active;
803
804 bond_for_each_slave_from(bond, new_active, i, old_active) {
805 if (new_active->link == BOND_LINK_UP) {
806 return new_active;
807 } else if (new_active->link == BOND_LINK_BACK &&
808 IS_UP(new_active->dev)) {
809 /* link up, but waiting for stabilization */
810 if (new_active->delay < mintime) {
811 mintime = new_active->delay;
812 bestslave = new_active;
813 }
814 }
815 }
816
817 return bestslave;
818 }
819
820 static bool bond_should_notify_peers(struct bonding *bond)
821 {
822 struct slave *slave = bond->curr_active_slave;
823
824 pr_debug("bond_should_notify_peers: bond %s slave %s\n",
825 bond->dev->name, slave ? slave->dev->name : "NULL");
826
827 if (!slave || !bond->send_peer_notif ||
828 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
829 return false;
830
831 return true;
832 }
833
834 /**
835 * change_active_interface - change the active slave into the specified one
836 * @bond: our bonding struct
837 * @new: the new slave to make the active one
838 *
839 * Set the new slave to the bond's settings and unset them on the old
840 * curr_active_slave.
841 * Setting include flags, mc-list, promiscuity, allmulti, etc.
842 *
843 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
844 * because it is apparently the best available slave we have, even though its
845 * updelay hasn't timed out yet.
846 *
847 * If new_active is not NULL, caller must hold bond->lock for read and
848 * curr_slave_lock for write_bh.
849 */
850 void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
851 {
852 struct slave *old_active = bond->curr_active_slave;
853
854 if (old_active == new_active)
855 return;
856
857 if (new_active) {
858 new_active->jiffies = jiffies;
859
860 if (new_active->link == BOND_LINK_BACK) {
861 if (USES_PRIMARY(bond->params.mode)) {
862 pr_info("%s: making interface %s the new active one %d ms earlier.\n",
863 bond->dev->name, new_active->dev->name,
864 (bond->params.updelay - new_active->delay) * bond->params.miimon);
865 }
866
867 new_active->delay = 0;
868 new_active->link = BOND_LINK_UP;
869
870 if (bond->params.mode == BOND_MODE_8023AD)
871 bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
872
873 if (bond_is_lb(bond))
874 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
875 } else {
876 if (USES_PRIMARY(bond->params.mode)) {
877 pr_info("%s: making interface %s the new active one.\n",
878 bond->dev->name, new_active->dev->name);
879 }
880 }
881 }
882
883 if (USES_PRIMARY(bond->params.mode))
884 bond_hw_addr_swap(bond, new_active, old_active);
885
886 if (bond_is_lb(bond)) {
887 bond_alb_handle_active_change(bond, new_active);
888 if (old_active)
889 bond_set_slave_inactive_flags(old_active);
890 if (new_active)
891 bond_set_slave_active_flags(new_active);
892 } else {
893 rcu_assign_pointer(bond->curr_active_slave, new_active);
894 }
895
896 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
897 if (old_active)
898 bond_set_slave_inactive_flags(old_active);
899
900 if (new_active) {
901 bool should_notify_peers = false;
902
903 bond_set_slave_active_flags(new_active);
904
905 if (bond->params.fail_over_mac)
906 bond_do_fail_over_mac(bond, new_active,
907 old_active);
908
909 if (netif_running(bond->dev)) {
910 bond->send_peer_notif =
911 bond->params.num_peer_notif;
912 should_notify_peers =
913 bond_should_notify_peers(bond);
914 }
915
916 write_unlock_bh(&bond->curr_slave_lock);
917 read_unlock(&bond->lock);
918
919 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
920 if (should_notify_peers)
921 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
922 bond->dev);
923
924 read_lock(&bond->lock);
925 write_lock_bh(&bond->curr_slave_lock);
926 }
927 }
928
929 /* resend IGMP joins since active slave has changed or
930 * all were sent on curr_active_slave.
931 * resend only if bond is brought up with the affected
932 * bonding modes and the retransmission is enabled */
933 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
934 ((USES_PRIMARY(bond->params.mode) && new_active) ||
935 bond->params.mode == BOND_MODE_ROUNDROBIN)) {
936 bond->igmp_retrans = bond->params.resend_igmp;
937 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
938 }
939 }
940
941 /**
942 * bond_select_active_slave - select a new active slave, if needed
943 * @bond: our bonding struct
944 *
945 * This functions should be called when one of the following occurs:
946 * - The old curr_active_slave has been released or lost its link.
947 * - The primary_slave has got its link back.
948 * - A slave has got its link back and there's no old curr_active_slave.
949 *
950 * Caller must hold bond->lock for read and curr_slave_lock for write_bh.
951 */
952 void bond_select_active_slave(struct bonding *bond)
953 {
954 struct slave *best_slave;
955 int rv;
956
957 best_slave = bond_find_best_slave(bond);
958 if (best_slave != bond->curr_active_slave) {
959 bond_change_active_slave(bond, best_slave);
960 rv = bond_set_carrier(bond);
961 if (!rv)
962 return;
963
964 if (netif_carrier_ok(bond->dev)) {
965 pr_info("%s: first active interface up!\n",
966 bond->dev->name);
967 } else {
968 pr_info("%s: now running without any active interface !\n",
969 bond->dev->name);
970 }
971 }
972 }
973
974 /*--------------------------- slave list handling ---------------------------*/
975
976 /*
977 * This function attaches the slave to the end of list.
978 *
979 * bond->lock held for writing by caller.
980 */
981 static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
982 {
983 list_add_tail_rcu(&new_slave->list, &bond->slave_list);
984 bond->slave_cnt++;
985 }
986
987 /*
988 * This function detaches the slave from the list.
989 * WARNING: no check is made to verify if the slave effectively
990 * belongs to <bond>.
991 * Nothing is freed on return, structures are just unchained.
992 * If any slave pointer in bond was pointing to <slave>,
993 * it should be changed by the calling function.
994 *
995 * bond->lock held for writing by caller.
996 */
997 static void bond_detach_slave(struct bonding *bond, struct slave *slave)
998 {
999 list_del_rcu(&slave->list);
1000 bond->slave_cnt--;
1001 }
1002
1003 #ifdef CONFIG_NET_POLL_CONTROLLER
1004 static inline int slave_enable_netpoll(struct slave *slave)
1005 {
1006 struct netpoll *np;
1007 int err = 0;
1008
1009 np = kzalloc(sizeof(*np), GFP_ATOMIC);
1010 err = -ENOMEM;
1011 if (!np)
1012 goto out;
1013
1014 err = __netpoll_setup(np, slave->dev, GFP_ATOMIC);
1015 if (err) {
1016 kfree(np);
1017 goto out;
1018 }
1019 slave->np = np;
1020 out:
1021 return err;
1022 }
1023 static inline void slave_disable_netpoll(struct slave *slave)
1024 {
1025 struct netpoll *np = slave->np;
1026
1027 if (!np)
1028 return;
1029
1030 slave->np = NULL;
1031 __netpoll_free_async(np);
1032 }
1033 static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
1034 {
1035 if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
1036 return false;
1037 if (!slave_dev->netdev_ops->ndo_poll_controller)
1038 return false;
1039 return true;
1040 }
1041
1042 static void bond_poll_controller(struct net_device *bond_dev)
1043 {
1044 }
1045
1046 static void bond_netpoll_cleanup(struct net_device *bond_dev)
1047 {
1048 struct bonding *bond = netdev_priv(bond_dev);
1049 struct slave *slave;
1050
1051 bond_for_each_slave(bond, slave)
1052 if (IS_UP(slave->dev))
1053 slave_disable_netpoll(slave);
1054 }
1055
1056 static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
1057 {
1058 struct bonding *bond = netdev_priv(dev);
1059 struct slave *slave;
1060 int err = 0;
1061
1062 bond_for_each_slave(bond, slave) {
1063 err = slave_enable_netpoll(slave);
1064 if (err) {
1065 bond_netpoll_cleanup(dev);
1066 break;
1067 }
1068 }
1069 return err;
1070 }
1071 #else
1072 static inline int slave_enable_netpoll(struct slave *slave)
1073 {
1074 return 0;
1075 }
1076 static inline void slave_disable_netpoll(struct slave *slave)
1077 {
1078 }
1079 static void bond_netpoll_cleanup(struct net_device *bond_dev)
1080 {
1081 }
1082 #endif
1083
1084 /*---------------------------------- IOCTL ----------------------------------*/
1085
1086 static netdev_features_t bond_fix_features(struct net_device *dev,
1087 netdev_features_t features)
1088 {
1089 struct bonding *bond = netdev_priv(dev);
1090 netdev_features_t mask;
1091 struct slave *slave;
1092
1093 if (list_empty(&bond->slave_list)) {
1094 /* Disable adding VLANs to empty bond. But why? --mq */
1095 features |= NETIF_F_VLAN_CHALLENGED;
1096 return features;
1097 }
1098
1099 mask = features;
1100 features &= ~NETIF_F_ONE_FOR_ALL;
1101 features |= NETIF_F_ALL_FOR_ALL;
1102
1103 bond_for_each_slave(bond, slave) {
1104 features = netdev_increment_features(features,
1105 slave->dev->features,
1106 mask);
1107 }
1108 features = netdev_add_tso_features(features, mask);
1109
1110 return features;
1111 }
1112
1113 #define BOND_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
1114 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
1115 NETIF_F_HIGHDMA | NETIF_F_LRO)
1116
1117 static void bond_compute_features(struct bonding *bond)
1118 {
1119 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
1120 netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1121 unsigned short max_hard_header_len = ETH_HLEN;
1122 unsigned int gso_max_size = GSO_MAX_SIZE;
1123 struct net_device *bond_dev = bond->dev;
1124 u16 gso_max_segs = GSO_MAX_SEGS;
1125 struct slave *slave;
1126
1127 if (list_empty(&bond->slave_list))
1128 goto done;
1129
1130 bond_for_each_slave(bond, slave) {
1131 vlan_features = netdev_increment_features(vlan_features,
1132 slave->dev->vlan_features, BOND_VLAN_FEATURES);
1133
1134 dst_release_flag &= slave->dev->priv_flags;
1135 if (slave->dev->hard_header_len > max_hard_header_len)
1136 max_hard_header_len = slave->dev->hard_header_len;
1137
1138 gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
1139 gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
1140 }
1141
1142 done:
1143 bond_dev->vlan_features = vlan_features;
1144 bond_dev->hard_header_len = max_hard_header_len;
1145 bond_dev->gso_max_segs = gso_max_segs;
1146 netif_set_gso_max_size(bond_dev, gso_max_size);
1147
1148 flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
1149 bond_dev->priv_flags = flags | dst_release_flag;
1150
1151 netdev_change_features(bond_dev);
1152 }
1153
1154 static void bond_setup_by_slave(struct net_device *bond_dev,
1155 struct net_device *slave_dev)
1156 {
1157 bond_dev->header_ops = slave_dev->header_ops;
1158
1159 bond_dev->type = slave_dev->type;
1160 bond_dev->hard_header_len = slave_dev->hard_header_len;
1161 bond_dev->addr_len = slave_dev->addr_len;
1162
1163 memcpy(bond_dev->broadcast, slave_dev->broadcast,
1164 slave_dev->addr_len);
1165 }
1166
1167 /* On bonding slaves other than the currently active slave, suppress
1168 * duplicates except for alb non-mcast/bcast.
1169 */
1170 static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1171 struct slave *slave,
1172 struct bonding *bond)
1173 {
1174 if (bond_is_slave_inactive(slave)) {
1175 if (bond->params.mode == BOND_MODE_ALB &&
1176 skb->pkt_type != PACKET_BROADCAST &&
1177 skb->pkt_type != PACKET_MULTICAST)
1178 return false;
1179 return true;
1180 }
1181 return false;
1182 }
1183
1184 static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1185 {
1186 struct sk_buff *skb = *pskb;
1187 struct slave *slave;
1188 struct bonding *bond;
1189 int (*recv_probe)(const struct sk_buff *, struct bonding *,
1190 struct slave *);
1191 int ret = RX_HANDLER_ANOTHER;
1192
1193 skb = skb_share_check(skb, GFP_ATOMIC);
1194 if (unlikely(!skb))
1195 return RX_HANDLER_CONSUMED;
1196
1197 *pskb = skb;
1198
1199 slave = bond_slave_get_rcu(skb->dev);
1200 bond = slave->bond;
1201
1202 if (bond->params.arp_interval)
1203 slave->dev->last_rx = jiffies;
1204
1205 recv_probe = ACCESS_ONCE(bond->recv_probe);
1206 if (recv_probe) {
1207 ret = recv_probe(skb, bond, slave);
1208 if (ret == RX_HANDLER_CONSUMED) {
1209 consume_skb(skb);
1210 return ret;
1211 }
1212 }
1213
1214 if (bond_should_deliver_exact_match(skb, slave, bond)) {
1215 return RX_HANDLER_EXACT;
1216 }
1217
1218 skb->dev = bond->dev;
1219
1220 if (bond->params.mode == BOND_MODE_ALB &&
1221 bond->dev->priv_flags & IFF_BRIDGE_PORT &&
1222 skb->pkt_type == PACKET_HOST) {
1223
1224 if (unlikely(skb_cow_head(skb,
1225 skb->data - skb_mac_header(skb)))) {
1226 kfree_skb(skb);
1227 return RX_HANDLER_CONSUMED;
1228 }
1229 memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
1230 }
1231
1232 return ret;
1233 }
1234
1235 static int bond_master_upper_dev_link(struct net_device *bond_dev,
1236 struct net_device *slave_dev)
1237 {
1238 int err;
1239
1240 err = netdev_master_upper_dev_link(slave_dev, bond_dev);
1241 if (err)
1242 return err;
1243 slave_dev->flags |= IFF_SLAVE;
1244 rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
1245 return 0;
1246 }
1247
1248 static void bond_upper_dev_unlink(struct net_device *bond_dev,
1249 struct net_device *slave_dev)
1250 {
1251 netdev_upper_dev_unlink(slave_dev, bond_dev);
1252 slave_dev->flags &= ~IFF_SLAVE;
1253 rtmsg_ifinfo(RTM_NEWLINK, slave_dev, IFF_SLAVE);
1254 }
1255
1256 /* enslave device <slave> to bond device <master> */
1257 int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1258 {
1259 struct bonding *bond = netdev_priv(bond_dev);
1260 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1261 struct slave *new_slave = NULL;
1262 struct sockaddr addr;
1263 int link_reporting;
1264 int res = 0, i;
1265
1266 if (!bond->params.use_carrier &&
1267 slave_dev->ethtool_ops->get_link == NULL &&
1268 slave_ops->ndo_do_ioctl == NULL) {
1269 pr_warning("%s: Warning: no link monitoring support for %s\n",
1270 bond_dev->name, slave_dev->name);
1271 }
1272
1273 /* already enslaved */
1274 if (slave_dev->flags & IFF_SLAVE) {
1275 pr_debug("Error, Device was already enslaved\n");
1276 return -EBUSY;
1277 }
1278
1279 /* vlan challenged mutual exclusion */
1280 /* no need to lock since we're protected by rtnl_lock */
1281 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1282 pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
1283 if (vlan_uses_dev(bond_dev)) {
1284 pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
1285 bond_dev->name, slave_dev->name, bond_dev->name);
1286 return -EPERM;
1287 } else {
1288 pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
1289 bond_dev->name, slave_dev->name,
1290 slave_dev->name, bond_dev->name);
1291 }
1292 } else {
1293 pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
1294 }
1295
1296 /*
1297 * Old ifenslave binaries are no longer supported. These can
1298 * be identified with moderate accuracy by the state of the slave:
1299 * the current ifenslave will set the interface down prior to
1300 * enslaving it; the old ifenslave will not.
1301 */
1302 if ((slave_dev->flags & IFF_UP)) {
1303 pr_err("%s is up. This may be due to an out of date ifenslave.\n",
1304 slave_dev->name);
1305 res = -EPERM;
1306 goto err_undo_flags;
1307 }
1308
1309 /* set bonding device ether type by slave - bonding netdevices are
1310 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
1311 * there is a need to override some of the type dependent attribs/funcs.
1312 *
1313 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1314 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1315 */
1316 if (list_empty(&bond->slave_list)) {
1317 if (bond_dev->type != slave_dev->type) {
1318 pr_debug("%s: change device type from %d to %d\n",
1319 bond_dev->name,
1320 bond_dev->type, slave_dev->type);
1321
1322 res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
1323 bond_dev);
1324 res = notifier_to_errno(res);
1325 if (res) {
1326 pr_err("%s: refused to change device type\n",
1327 bond_dev->name);
1328 res = -EBUSY;
1329 goto err_undo_flags;
1330 }
1331
1332 /* Flush unicast and multicast addresses */
1333 dev_uc_flush(bond_dev);
1334 dev_mc_flush(bond_dev);
1335
1336 if (slave_dev->type != ARPHRD_ETHER)
1337 bond_setup_by_slave(bond_dev, slave_dev);
1338 else {
1339 ether_setup(bond_dev);
1340 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1341 }
1342
1343 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
1344 bond_dev);
1345 }
1346 } else if (bond_dev->type != slave_dev->type) {
1347 pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
1348 slave_dev->name,
1349 slave_dev->type, bond_dev->type);
1350 res = -EINVAL;
1351 goto err_undo_flags;
1352 }
1353
1354 if (slave_ops->ndo_set_mac_address == NULL) {
1355 if (list_empty(&bond->slave_list)) {
1356 pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
1357 bond_dev->name);
1358 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1359 } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1360 pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
1361 bond_dev->name);
1362 res = -EOPNOTSUPP;
1363 goto err_undo_flags;
1364 }
1365 }
1366
1367 call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
1368
1369 /* If this is the first slave, then we need to set the master's hardware
1370 * address to be the same as the slave's. */
1371 if (list_empty(&bond->slave_list) &&
1372 bond->dev->addr_assign_type == NET_ADDR_RANDOM)
1373 bond_set_dev_addr(bond->dev, slave_dev);
1374
1375 new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
1376 if (!new_slave) {
1377 res = -ENOMEM;
1378 goto err_undo_flags;
1379 }
1380 INIT_LIST_HEAD(&new_slave->list);
1381 /*
1382 * Set the new_slave's queue_id to be zero. Queue ID mapping
1383 * is set via sysfs or module option if desired.
1384 */
1385 new_slave->queue_id = 0;
1386
1387 /* Save slave's original mtu and then set it to match the bond */
1388 new_slave->original_mtu = slave_dev->mtu;
1389 res = dev_set_mtu(slave_dev, bond->dev->mtu);
1390 if (res) {
1391 pr_debug("Error %d calling dev_set_mtu\n", res);
1392 goto err_free;
1393 }
1394
1395 /*
1396 * Save slave's original ("permanent") mac address for modes
1397 * that need it, and for restoring it upon release, and then
1398 * set it to the master's address
1399 */
1400 memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
1401
1402 if (!bond->params.fail_over_mac) {
1403 /*
1404 * Set slave to master's mac address. The application already
1405 * set the master's mac address to that of the first slave
1406 */
1407 memcpy(addr.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
1408 addr.sa_family = slave_dev->type;
1409 res = dev_set_mac_address(slave_dev, &addr);
1410 if (res) {
1411 pr_debug("Error %d calling set_mac_address\n", res);
1412 goto err_restore_mtu;
1413 }
1414 }
1415
1416 res = bond_master_upper_dev_link(bond_dev, slave_dev);
1417 if (res) {
1418 pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
1419 goto err_restore_mac;
1420 }
1421
1422 /* open the slave since the application closed it */
1423 res = dev_open(slave_dev);
1424 if (res) {
1425 pr_debug("Opening slave %s failed\n", slave_dev->name);
1426 goto err_unset_master;
1427 }
1428
1429 new_slave->bond = bond;
1430 new_slave->dev = slave_dev;
1431 slave_dev->priv_flags |= IFF_BONDING;
1432
1433 if (bond_is_lb(bond)) {
1434 /* bond_alb_init_slave() must be called before all other stages since
1435 * it might fail and we do not want to have to undo everything
1436 */
1437 res = bond_alb_init_slave(bond, new_slave);
1438 if (res)
1439 goto err_close;
1440 }
1441
1442 /* If the mode USES_PRIMARY, then the following is handled by
1443 * bond_change_active_slave().
1444 */
1445 if (!USES_PRIMARY(bond->params.mode)) {
1446 /* set promiscuity level to new slave */
1447 if (bond_dev->flags & IFF_PROMISC) {
1448 res = dev_set_promiscuity(slave_dev, 1);
1449 if (res)
1450 goto err_close;
1451 }
1452
1453 /* set allmulti level to new slave */
1454 if (bond_dev->flags & IFF_ALLMULTI) {
1455 res = dev_set_allmulti(slave_dev, 1);
1456 if (res)
1457 goto err_close;
1458 }
1459
1460 netif_addr_lock_bh(bond_dev);
1461
1462 dev_mc_sync_multiple(slave_dev, bond_dev);
1463 dev_uc_sync_multiple(slave_dev, bond_dev);
1464
1465 netif_addr_unlock_bh(bond_dev);
1466 }
1467
1468 if (bond->params.mode == BOND_MODE_8023AD) {
1469 /* add lacpdu mc addr to mc list */
1470 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1471
1472 dev_mc_add(slave_dev, lacpdu_multicast);
1473 }
1474
1475 res = vlan_vids_add_by_dev(slave_dev, bond_dev);
1476 if (res) {
1477 pr_err("%s: Error: Couldn't add bond vlan ids to %s\n",
1478 bond_dev->name, slave_dev->name);
1479 goto err_close;
1480 }
1481
1482 write_lock_bh(&bond->lock);
1483
1484 bond_attach_slave(bond, new_slave);
1485
1486 new_slave->delay = 0;
1487 new_slave->link_failure_count = 0;
1488
1489 write_unlock_bh(&bond->lock);
1490
1491 bond_compute_features(bond);
1492
1493 bond_update_speed_duplex(new_slave);
1494
1495 read_lock(&bond->lock);
1496
1497 new_slave->last_arp_rx = jiffies -
1498 (msecs_to_jiffies(bond->params.arp_interval) + 1);
1499 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
1500 new_slave->target_last_arp_rx[i] = new_slave->last_arp_rx;
1501
1502 if (bond->params.miimon && !bond->params.use_carrier) {
1503 link_reporting = bond_check_dev_link(bond, slave_dev, 1);
1504
1505 if ((link_reporting == -1) && !bond->params.arp_interval) {
1506 /*
1507 * miimon is set but a bonded network driver
1508 * does not support ETHTOOL/MII and
1509 * arp_interval is not set. Note: if
1510 * use_carrier is enabled, we will never go
1511 * here (because netif_carrier is always
1512 * supported); thus, we don't need to change
1513 * the messages for netif_carrier.
1514 */
1515 pr_warning("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details.\n",
1516 bond_dev->name, slave_dev->name);
1517 } else if (link_reporting == -1) {
1518 /* unable get link status using mii/ethtool */
1519 pr_warning("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface.\n",
1520 bond_dev->name, slave_dev->name);
1521 }
1522 }
1523
1524 /* check for initial state */
1525 if (bond->params.miimon) {
1526 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
1527 if (bond->params.updelay) {
1528 new_slave->link = BOND_LINK_BACK;
1529 new_slave->delay = bond->params.updelay;
1530 } else {
1531 new_slave->link = BOND_LINK_UP;
1532 }
1533 } else {
1534 new_slave->link = BOND_LINK_DOWN;
1535 }
1536 } else if (bond->params.arp_interval) {
1537 new_slave->link = (netif_carrier_ok(slave_dev) ?
1538 BOND_LINK_UP : BOND_LINK_DOWN);
1539 } else {
1540 new_slave->link = BOND_LINK_UP;
1541 }
1542
1543 if (new_slave->link != BOND_LINK_DOWN)
1544 new_slave->jiffies = jiffies;
1545 pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
1546 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
1547 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1548
1549 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
1550 /* if there is a primary slave, remember it */
1551 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
1552 bond->primary_slave = new_slave;
1553 bond->force_primary = true;
1554 }
1555 }
1556
1557 write_lock_bh(&bond->curr_slave_lock);
1558
1559 switch (bond->params.mode) {
1560 case BOND_MODE_ACTIVEBACKUP:
1561 bond_set_slave_inactive_flags(new_slave);
1562 bond_select_active_slave(bond);
1563 break;
1564 case BOND_MODE_8023AD:
1565 /* in 802.3ad mode, the internal mechanism
1566 * will activate the slaves in the selected
1567 * aggregator
1568 */
1569 bond_set_slave_inactive_flags(new_slave);
1570 /* if this is the first slave */
1571 if (bond_first_slave(bond) == new_slave) {
1572 SLAVE_AD_INFO(new_slave).id = 1;
1573 /* Initialize AD with the number of times that the AD timer is called in 1 second
1574 * can be called only after the mac address of the bond is set
1575 */
1576 bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
1577 } else {
1578 struct slave *prev_slave;
1579
1580 prev_slave = bond_prev_slave(bond, new_slave);
1581 SLAVE_AD_INFO(new_slave).id =
1582 SLAVE_AD_INFO(prev_slave).id + 1;
1583 }
1584
1585 bond_3ad_bind_slave(new_slave);
1586 break;
1587 case BOND_MODE_TLB:
1588 case BOND_MODE_ALB:
1589 bond_set_active_slave(new_slave);
1590 bond_set_slave_inactive_flags(new_slave);
1591 bond_select_active_slave(bond);
1592 break;
1593 default:
1594 pr_debug("This slave is always active in trunk mode\n");
1595
1596 /* always active in trunk mode */
1597 bond_set_active_slave(new_slave);
1598
1599 /* In trunking mode there is little meaning to curr_active_slave
1600 * anyway (it holds no special properties of the bond device),
1601 * so we can change it without calling change_active_interface()
1602 */
1603 if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP)
1604 rcu_assign_pointer(bond->curr_active_slave, new_slave);
1605
1606 break;
1607 } /* switch(bond_mode) */
1608
1609 write_unlock_bh(&bond->curr_slave_lock);
1610
1611 bond_set_carrier(bond);
1612
1613 #ifdef CONFIG_NET_POLL_CONTROLLER
1614 slave_dev->npinfo = bond->dev->npinfo;
1615 if (slave_dev->npinfo) {
1616 if (slave_enable_netpoll(new_slave)) {
1617 read_unlock(&bond->lock);
1618 pr_info("Error, %s: master_dev is using netpoll, "
1619 "but new slave device does not support netpoll.\n",
1620 bond_dev->name);
1621 res = -EBUSY;
1622 goto err_detach;
1623 }
1624 }
1625 #endif
1626
1627 read_unlock(&bond->lock);
1628
1629 res = bond_create_slave_symlinks(bond_dev, slave_dev);
1630 if (res)
1631 goto err_detach;
1632
1633 res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
1634 new_slave);
1635 if (res) {
1636 pr_debug("Error %d calling netdev_rx_handler_register\n", res);
1637 goto err_dest_symlinks;
1638 }
1639
1640 pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
1641 bond_dev->name, slave_dev->name,
1642 bond_is_active_slave(new_slave) ? "n active" : " backup",
1643 new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
1644
1645 /* enslave is successful */
1646 return 0;
1647
1648 /* Undo stages on error */
1649 err_dest_symlinks:
1650 bond_destroy_slave_symlinks(bond_dev, slave_dev);
1651
1652 err_detach:
1653 if (!USES_PRIMARY(bond->params.mode))
1654 bond_hw_addr_flush(bond_dev, slave_dev);
1655
1656 vlan_vids_del_by_dev(slave_dev, bond_dev);
1657 write_lock_bh(&bond->lock);
1658 bond_detach_slave(bond, new_slave);
1659 if (bond->primary_slave == new_slave)
1660 bond->primary_slave = NULL;
1661 if (bond->curr_active_slave == new_slave) {
1662 bond_change_active_slave(bond, NULL);
1663 write_unlock_bh(&bond->lock);
1664 read_lock(&bond->lock);
1665 write_lock_bh(&bond->curr_slave_lock);
1666 bond_select_active_slave(bond);
1667 write_unlock_bh(&bond->curr_slave_lock);
1668 read_unlock(&bond->lock);
1669 } else {
1670 write_unlock_bh(&bond->lock);
1671 }
1672 slave_disable_netpoll(new_slave);
1673
1674 err_close:
1675 slave_dev->priv_flags &= ~IFF_BONDING;
1676 dev_close(slave_dev);
1677
1678 err_unset_master:
1679 bond_upper_dev_unlink(bond_dev, slave_dev);
1680
1681 err_restore_mac:
1682 if (!bond->params.fail_over_mac) {
1683 /* XXX TODO - fom follow mode needs to change master's
1684 * MAC if this slave's MAC is in use by the bond, or at
1685 * least print a warning.
1686 */
1687 memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
1688 addr.sa_family = slave_dev->type;
1689 dev_set_mac_address(slave_dev, &addr);
1690 }
1691
1692 err_restore_mtu:
1693 dev_set_mtu(slave_dev, new_slave->original_mtu);
1694
1695 err_free:
1696 kfree(new_slave);
1697
1698 err_undo_flags:
1699 bond_compute_features(bond);
1700 /* Enslave of first slave has failed and we need to fix master's mac */
1701 if (list_empty(&bond->slave_list) &&
1702 ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
1703 eth_hw_addr_random(bond_dev);
1704
1705 return res;
1706 }
1707
1708 /*
1709 * Try to release the slave device <slave> from the bond device <master>
1710 * It is legal to access curr_active_slave without a lock because all the function
1711 * is write-locked. If "all" is true it means that the function is being called
1712 * while destroying a bond interface and all slaves are being released.
1713 *
1714 * The rules for slave state should be:
1715 * for Active/Backup:
1716 * Active stays on all backups go down
1717 * for Bonded connections:
1718 * The first up interface should be left on and all others downed.
1719 */
1720 static int __bond_release_one(struct net_device *bond_dev,
1721 struct net_device *slave_dev,
1722 bool all)
1723 {
1724 struct bonding *bond = netdev_priv(bond_dev);
1725 struct slave *slave, *oldcurrent;
1726 struct sockaddr addr;
1727 int old_flags = bond_dev->flags;
1728 netdev_features_t old_features = bond_dev->features;
1729
1730 /* slave is not a slave or master is not master of this slave */
1731 if (!(slave_dev->flags & IFF_SLAVE) ||
1732 !netdev_has_upper_dev(slave_dev, bond_dev)) {
1733 pr_err("%s: Error: cannot release %s.\n",
1734 bond_dev->name, slave_dev->name);
1735 return -EINVAL;
1736 }
1737
1738 block_netpoll_tx();
1739 write_lock_bh(&bond->lock);
1740
1741 slave = bond_get_slave_by_dev(bond, slave_dev);
1742 if (!slave) {
1743 /* not a slave of this bond */
1744 pr_info("%s: %s not enslaved\n",
1745 bond_dev->name, slave_dev->name);
1746 write_unlock_bh(&bond->lock);
1747 unblock_netpoll_tx();
1748 return -EINVAL;
1749 }
1750
1751 write_unlock_bh(&bond->lock);
1752 /* unregister rx_handler early so bond_handle_frame wouldn't be called
1753 * for this slave anymore.
1754 */
1755 netdev_rx_handler_unregister(slave_dev);
1756 write_lock_bh(&bond->lock);
1757
1758 /* Inform AD package of unbinding of slave. */
1759 if (bond->params.mode == BOND_MODE_8023AD) {
1760 /* must be called before the slave is
1761 * detached from the list
1762 */
1763 bond_3ad_unbind_slave(slave);
1764 }
1765
1766 pr_info("%s: releasing %s interface %s\n",
1767 bond_dev->name,
1768 bond_is_active_slave(slave) ? "active" : "backup",
1769 slave_dev->name);
1770
1771 oldcurrent = bond->curr_active_slave;
1772
1773 bond->current_arp_slave = NULL;
1774
1775 /* release the slave from its bond */
1776 bond_detach_slave(bond, slave);
1777
1778 if (!all && !bond->params.fail_over_mac) {
1779 if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
1780 !list_empty(&bond->slave_list))
1781 pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
1782 bond_dev->name, slave_dev->name,
1783 slave->perm_hwaddr,
1784 bond_dev->name, slave_dev->name);
1785 }
1786
1787 if (bond->primary_slave == slave)
1788 bond->primary_slave = NULL;
1789
1790 if (oldcurrent == slave)
1791 bond_change_active_slave(bond, NULL);
1792
1793 if (bond_is_lb(bond)) {
1794 /* Must be called only after the slave has been
1795 * detached from the list and the curr_active_slave
1796 * has been cleared (if our_slave == old_current),
1797 * but before a new active slave is selected.
1798 */
1799 write_unlock_bh(&bond->lock);
1800 bond_alb_deinit_slave(bond, slave);
1801 write_lock_bh(&bond->lock);
1802 }
1803
1804 if (all) {
1805 rcu_assign_pointer(bond->curr_active_slave, NULL);
1806 } else if (oldcurrent == slave) {
1807 /*
1808 * Note that we hold RTNL over this sequence, so there
1809 * is no concern that another slave add/remove event
1810 * will interfere.
1811 */
1812 write_unlock_bh(&bond->lock);
1813 read_lock(&bond->lock);
1814 write_lock_bh(&bond->curr_slave_lock);
1815
1816 bond_select_active_slave(bond);
1817
1818 write_unlock_bh(&bond->curr_slave_lock);
1819 read_unlock(&bond->lock);
1820 write_lock_bh(&bond->lock);
1821 }
1822
1823 if (list_empty(&bond->slave_list)) {
1824 bond_set_carrier(bond);
1825 eth_hw_addr_random(bond_dev);
1826
1827 if (vlan_uses_dev(bond_dev)) {
1828 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
1829 bond_dev->name, bond_dev->name);
1830 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
1831 bond_dev->name);
1832 }
1833 }
1834
1835 write_unlock_bh(&bond->lock);
1836 unblock_netpoll_tx();
1837 synchronize_rcu();
1838
1839 if (list_empty(&bond->slave_list)) {
1840 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
1841 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
1842 }
1843
1844 bond_compute_features(bond);
1845 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
1846 (old_features & NETIF_F_VLAN_CHALLENGED))
1847 pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
1848 bond_dev->name, slave_dev->name, bond_dev->name);
1849
1850 /* must do this from outside any spinlocks */
1851 bond_destroy_slave_symlinks(bond_dev, slave_dev);
1852
1853 vlan_vids_del_by_dev(slave_dev, bond_dev);
1854
1855 /* If the mode USES_PRIMARY, then this cases was handled above by
1856 * bond_change_active_slave(..., NULL)
1857 */
1858 if (!USES_PRIMARY(bond->params.mode)) {
1859 /* unset promiscuity level from slave
1860 * NOTE: The NETDEV_CHANGEADDR call above may change the value
1861 * of the IFF_PROMISC flag in the bond_dev, but we need the
1862 * value of that flag before that change, as that was the value
1863 * when this slave was attached, so we cache at the start of the
1864 * function and use it here. Same goes for ALLMULTI below
1865 */
1866 if (old_flags & IFF_PROMISC)
1867 dev_set_promiscuity(slave_dev, -1);
1868
1869 /* unset allmulti level from slave */
1870 if (old_flags & IFF_ALLMULTI)
1871 dev_set_allmulti(slave_dev, -1);
1872
1873 bond_hw_addr_flush(bond_dev, slave_dev);
1874 }
1875
1876 bond_upper_dev_unlink(bond_dev, slave_dev);
1877
1878 slave_disable_netpoll(slave);
1879
1880 /* close slave before restoring its mac address */
1881 dev_close(slave_dev);
1882
1883 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1884 /* restore original ("permanent") mac address */
1885 memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
1886 addr.sa_family = slave_dev->type;
1887 dev_set_mac_address(slave_dev, &addr);
1888 }
1889
1890 dev_set_mtu(slave_dev, slave->original_mtu);
1891
1892 slave_dev->priv_flags &= ~IFF_BONDING;
1893
1894 kfree(slave);
1895
1896 return 0; /* deletion OK */
1897 }
1898
1899 /* A wrapper used because of ndo_del_link */
1900 int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1901 {
1902 return __bond_release_one(bond_dev, slave_dev, false);
1903 }
1904
1905 /*
1906 * First release a slave and then destroy the bond if no more slaves are left.
1907 * Must be under rtnl_lock when this function is called.
1908 */
1909 static int bond_release_and_destroy(struct net_device *bond_dev,
1910 struct net_device *slave_dev)
1911 {
1912 struct bonding *bond = netdev_priv(bond_dev);
1913 int ret;
1914
1915 ret = bond_release(bond_dev, slave_dev);
1916 if (ret == 0 && list_empty(&bond->slave_list)) {
1917 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1918 pr_info("%s: destroying bond %s.\n",
1919 bond_dev->name, bond_dev->name);
1920 unregister_netdevice(bond_dev);
1921 }
1922 return ret;
1923 }
1924
1925 /*
1926 * This function changes the active slave to slave <slave_dev>.
1927 * It returns -EINVAL in the following cases.
1928 * - <slave_dev> is not found in the list.
1929 * - There is not active slave now.
1930 * - <slave_dev> is already active.
1931 * - The link state of <slave_dev> is not BOND_LINK_UP.
1932 * - <slave_dev> is not running.
1933 * In these cases, this function does nothing.
1934 * In the other cases, current_slave pointer is changed and 0 is returned.
1935 */
1936 static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_device *slave_dev)
1937 {
1938 struct bonding *bond = netdev_priv(bond_dev);
1939 struct slave *old_active = NULL;
1940 struct slave *new_active = NULL;
1941 int res = 0;
1942
1943 if (!USES_PRIMARY(bond->params.mode))
1944 return -EINVAL;
1945
1946 /* Verify that bond_dev is indeed the master of slave_dev */
1947 if (!(slave_dev->flags & IFF_SLAVE) ||
1948 !netdev_has_upper_dev(slave_dev, bond_dev))
1949 return -EINVAL;
1950
1951 read_lock(&bond->lock);
1952
1953 old_active = bond->curr_active_slave;
1954 new_active = bond_get_slave_by_dev(bond, slave_dev);
1955 /*
1956 * Changing to the current active: do nothing; return success.
1957 */
1958 if (new_active && new_active == old_active) {
1959 read_unlock(&bond->lock);
1960 return 0;
1961 }
1962
1963 if (new_active &&
1964 old_active &&
1965 new_active->link == BOND_LINK_UP &&
1966 IS_UP(new_active->dev)) {
1967 block_netpoll_tx();
1968 write_lock_bh(&bond->curr_slave_lock);
1969 bond_change_active_slave(bond, new_active);
1970 write_unlock_bh(&bond->curr_slave_lock);
1971 unblock_netpoll_tx();
1972 } else
1973 res = -EINVAL;
1974
1975 read_unlock(&bond->lock);
1976
1977 return res;
1978 }
1979
1980 static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
1981 {
1982 struct bonding *bond = netdev_priv(bond_dev);
1983
1984 info->bond_mode = bond->params.mode;
1985 info->miimon = bond->params.miimon;
1986
1987 read_lock(&bond->lock);
1988 info->num_slaves = bond->slave_cnt;
1989 read_unlock(&bond->lock);
1990
1991 return 0;
1992 }
1993
1994 static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
1995 {
1996 struct bonding *bond = netdev_priv(bond_dev);
1997 int i = 0, res = -ENODEV;
1998 struct slave *slave;
1999
2000 read_lock(&bond->lock);
2001 bond_for_each_slave(bond, slave) {
2002 if (i++ == (int)info->slave_id) {
2003 res = 0;
2004 strcpy(info->slave_name, slave->dev->name);
2005 info->link = slave->link;
2006 info->state = bond_slave_state(slave);
2007 info->link_failure_count = slave->link_failure_count;
2008 break;
2009 }
2010 }
2011 read_unlock(&bond->lock);
2012
2013 return res;
2014 }
2015
2016 /*-------------------------------- Monitoring -------------------------------*/
2017
2018
2019 static int bond_miimon_inspect(struct bonding *bond)
2020 {
2021 int link_state, commit = 0;
2022 struct slave *slave;
2023 bool ignore_updelay;
2024
2025 ignore_updelay = !bond->curr_active_slave ? true : false;
2026
2027 bond_for_each_slave(bond, slave) {
2028 slave->new_link = BOND_LINK_NOCHANGE;
2029
2030 link_state = bond_check_dev_link(bond, slave->dev, 0);
2031
2032 switch (slave->link) {
2033 case BOND_LINK_UP:
2034 if (link_state)
2035 continue;
2036
2037 slave->link = BOND_LINK_FAIL;
2038 slave->delay = bond->params.downdelay;
2039 if (slave->delay) {
2040 pr_info("%s: link status down for %sinterface %s, disabling it in %d ms.\n",
2041 bond->dev->name,
2042 (bond->params.mode ==
2043 BOND_MODE_ACTIVEBACKUP) ?
2044 (bond_is_active_slave(slave) ?
2045 "active " : "backup ") : "",
2046 slave->dev->name,
2047 bond->params.downdelay * bond->params.miimon);
2048 }
2049 /*FALLTHRU*/
2050 case BOND_LINK_FAIL:
2051 if (link_state) {
2052 /*
2053 * recovered before downdelay expired
2054 */
2055 slave->link = BOND_LINK_UP;
2056 slave->jiffies = jiffies;
2057 pr_info("%s: link status up again after %d ms for interface %s.\n",
2058 bond->dev->name,
2059 (bond->params.downdelay - slave->delay) *
2060 bond->params.miimon,
2061 slave->dev->name);
2062 continue;
2063 }
2064
2065 if (slave->delay <= 0) {
2066 slave->new_link = BOND_LINK_DOWN;
2067 commit++;
2068 continue;
2069 }
2070
2071 slave->delay--;
2072 break;
2073
2074 case BOND_LINK_DOWN:
2075 if (!link_state)
2076 continue;
2077
2078 slave->link = BOND_LINK_BACK;
2079 slave->delay = bond->params.updelay;
2080
2081 if (slave->delay) {
2082 pr_info("%s: link status up for interface %s, enabling it in %d ms.\n",
2083 bond->dev->name, slave->dev->name,
2084 ignore_updelay ? 0 :
2085 bond->params.updelay *
2086 bond->params.miimon);
2087 }
2088 /*FALLTHRU*/
2089 case BOND_LINK_BACK:
2090 if (!link_state) {
2091 slave->link = BOND_LINK_DOWN;
2092 pr_info("%s: link status down again after %d ms for interface %s.\n",
2093 bond->dev->name,
2094 (bond->params.updelay - slave->delay) *
2095 bond->params.miimon,
2096 slave->dev->name);
2097
2098 continue;
2099 }
2100
2101 if (ignore_updelay)
2102 slave->delay = 0;
2103
2104 if (slave->delay <= 0) {
2105 slave->new_link = BOND_LINK_UP;
2106 commit++;
2107 ignore_updelay = false;
2108 continue;
2109 }
2110
2111 slave->delay--;
2112 break;
2113 }
2114 }
2115
2116 return commit;
2117 }
2118
2119 static void bond_miimon_commit(struct bonding *bond)
2120 {
2121 struct slave *slave;
2122
2123 bond_for_each_slave(bond, slave) {
2124 switch (slave->new_link) {
2125 case BOND_LINK_NOCHANGE:
2126 continue;
2127
2128 case BOND_LINK_UP:
2129 slave->link = BOND_LINK_UP;
2130 slave->jiffies = jiffies;
2131
2132 if (bond->params.mode == BOND_MODE_8023AD) {
2133 /* prevent it from being the active one */
2134 bond_set_backup_slave(slave);
2135 } else if (bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
2136 /* make it immediately active */
2137 bond_set_active_slave(slave);
2138 } else if (slave != bond->primary_slave) {
2139 /* prevent it from being the active one */
2140 bond_set_backup_slave(slave);
2141 }
2142
2143 pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
2144 bond->dev->name, slave->dev->name,
2145 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2146 slave->duplex ? "full" : "half");
2147
2148 /* notify ad that the link status has changed */
2149 if (bond->params.mode == BOND_MODE_8023AD)
2150 bond_3ad_handle_link_change(slave, BOND_LINK_UP);
2151
2152 if (bond_is_lb(bond))
2153 bond_alb_handle_link_change(bond, slave,
2154 BOND_LINK_UP);
2155
2156 if (!bond->curr_active_slave ||
2157 (slave == bond->primary_slave))
2158 goto do_failover;
2159
2160 continue;
2161
2162 case BOND_LINK_DOWN:
2163 if (slave->link_failure_count < UINT_MAX)
2164 slave->link_failure_count++;
2165
2166 slave->link = BOND_LINK_DOWN;
2167
2168 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
2169 bond->params.mode == BOND_MODE_8023AD)
2170 bond_set_slave_inactive_flags(slave);
2171
2172 pr_info("%s: link status definitely down for interface %s, disabling it\n",
2173 bond->dev->name, slave->dev->name);
2174
2175 if (bond->params.mode == BOND_MODE_8023AD)
2176 bond_3ad_handle_link_change(slave,
2177 BOND_LINK_DOWN);
2178
2179 if (bond_is_lb(bond))
2180 bond_alb_handle_link_change(bond, slave,
2181 BOND_LINK_DOWN);
2182
2183 if (slave == bond->curr_active_slave)
2184 goto do_failover;
2185
2186 continue;
2187
2188 default:
2189 pr_err("%s: invalid new link %d on slave %s\n",
2190 bond->dev->name, slave->new_link,
2191 slave->dev->name);
2192 slave->new_link = BOND_LINK_NOCHANGE;
2193
2194 continue;
2195 }
2196
2197 do_failover:
2198 ASSERT_RTNL();
2199 block_netpoll_tx();
2200 write_lock_bh(&bond->curr_slave_lock);
2201 bond_select_active_slave(bond);
2202 write_unlock_bh(&bond->curr_slave_lock);
2203 unblock_netpoll_tx();
2204 }
2205
2206 bond_set_carrier(bond);
2207 }
2208
2209 /*
2210 * bond_mii_monitor
2211 *
2212 * Really a wrapper that splits the mii monitor into two phases: an
2213 * inspection, then (if inspection indicates something needs to be done)
2214 * an acquisition of appropriate locks followed by a commit phase to
2215 * implement whatever link state changes are indicated.
2216 */
2217 void bond_mii_monitor(struct work_struct *work)
2218 {
2219 struct bonding *bond = container_of(work, struct bonding,
2220 mii_work.work);
2221 bool should_notify_peers = false;
2222 unsigned long delay;
2223
2224 read_lock(&bond->lock);
2225
2226 delay = msecs_to_jiffies(bond->params.miimon);
2227
2228 if (list_empty(&bond->slave_list))
2229 goto re_arm;
2230
2231 should_notify_peers = bond_should_notify_peers(bond);
2232
2233 if (bond_miimon_inspect(bond)) {
2234 read_unlock(&bond->lock);
2235
2236 /* Race avoidance with bond_close cancel of workqueue */
2237 if (!rtnl_trylock()) {
2238 read_lock(&bond->lock);
2239 delay = 1;
2240 should_notify_peers = false;
2241 goto re_arm;
2242 }
2243
2244 read_lock(&bond->lock);
2245
2246 bond_miimon_commit(bond);
2247
2248 read_unlock(&bond->lock);
2249 rtnl_unlock(); /* might sleep, hold no other locks */
2250 read_lock(&bond->lock);
2251 }
2252
2253 re_arm:
2254 if (bond->params.miimon)
2255 queue_delayed_work(bond->wq, &bond->mii_work, delay);
2256
2257 read_unlock(&bond->lock);
2258
2259 if (should_notify_peers) {
2260 if (!rtnl_trylock())
2261 return;
2262 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2263 rtnl_unlock();
2264 }
2265 }
2266
2267 static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2268 {
2269 struct net_device *upper;
2270 struct list_head *iter;
2271 bool ret = false;
2272
2273 if (ip == bond_confirm_addr(bond->dev, 0, ip))
2274 return true;
2275
2276 rcu_read_lock();
2277 netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
2278 if (ip == bond_confirm_addr(upper, 0, ip)) {
2279 ret = true;
2280 break;
2281 }
2282 }
2283 rcu_read_unlock();
2284
2285 return ret;
2286 }
2287
2288 /*
2289 * We go to the (large) trouble of VLAN tagging ARP frames because
2290 * switches in VLAN mode (especially if ports are configured as
2291 * "native" to a VLAN) might not pass non-tagged frames.
2292 */
2293 static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ip, __be32 src_ip, unsigned short vlan_id)
2294 {
2295 struct sk_buff *skb;
2296
2297 pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op,
2298 slave_dev->name, &dest_ip, &src_ip, vlan_id);
2299
2300 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
2301 NULL, slave_dev->dev_addr, NULL);
2302
2303 if (!skb) {
2304 pr_err("ARP packet allocation failed\n");
2305 return;
2306 }
2307 if (vlan_id) {
2308 skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
2309 if (!skb) {
2310 pr_err("failed to insert VLAN tag\n");
2311 return;
2312 }
2313 }
2314 arp_xmit(skb);
2315 }
2316
2317
2318 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2319 {
2320 struct net_device *upper, *vlan_upper;
2321 struct list_head *iter, *vlan_iter;
2322 struct rtable *rt;
2323 __be32 *targets = bond->params.arp_targets, addr;
2324 int i, vlan_id;
2325
2326 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2327 pr_debug("basa: target %pI4\n", &targets[i]);
2328
2329 /* Find out through which dev should the packet go */
2330 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
2331 RTO_ONLINK, 0);
2332 if (IS_ERR(rt)) {
2333 pr_debug("%s: no route to arp_ip_target %pI4\n",
2334 bond->dev->name, &targets[i]);
2335 continue;
2336 }
2337
2338 vlan_id = 0;
2339
2340 /* bond device itself */
2341 if (rt->dst.dev == bond->dev)
2342 goto found;
2343
2344 rcu_read_lock();
2345 /* first we search only for vlan devices. for every vlan
2346 * found we verify its upper dev list, searching for the
2347 * rt->dst.dev. If found we save the tag of the vlan and
2348 * proceed to send the packet.
2349 *
2350 * TODO: QinQ?
2351 */
2352 netdev_for_each_upper_dev_rcu(bond->dev, vlan_upper, vlan_iter) {
2353 if (!is_vlan_dev(vlan_upper))
2354 continue;
2355 netdev_for_each_upper_dev_rcu(vlan_upper, upper, iter) {
2356 if (upper == rt->dst.dev) {
2357 vlan_id = vlan_dev_vlan_id(vlan_upper);
2358 rcu_read_unlock();
2359 goto found;
2360 }
2361 }
2362 }
2363
2364 /* if the device we're looking for is not on top of any of
2365 * our upper vlans, then just search for any dev that
2366 * matches, and in case it's a vlan - save the id
2367 */
2368 netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
2369 if (upper == rt->dst.dev) {
2370 /* if it's a vlan - get its VID */
2371 if (is_vlan_dev(upper))
2372 vlan_id = vlan_dev_vlan_id(upper);
2373
2374 rcu_read_unlock();
2375 goto found;
2376 }
2377 }
2378 rcu_read_unlock();
2379
2380 /* Not our device - skip */
2381 pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
2382 bond->dev->name, &targets[i],
2383 rt->dst.dev ? rt->dst.dev->name : "NULL");
2384
2385 ip_rt_put(rt);
2386 continue;
2387
2388 found:
2389 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
2390 ip_rt_put(rt);
2391 bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
2392 addr, vlan_id);
2393 }
2394 }
2395
2396 static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2397 {
2398 int i;
2399
2400 if (!sip || !bond_has_this_ip(bond, tip)) {
2401 pr_debug("bva: sip %pI4 tip %pI4 not found\n", &sip, &tip);
2402 return;
2403 }
2404
2405 i = bond_get_targets_ip(bond->params.arp_targets, sip);
2406 if (i == -1) {
2407 pr_debug("bva: sip %pI4 not found in targets\n", &sip);
2408 return;
2409 }
2410 slave->last_arp_rx = jiffies;
2411 slave->target_last_arp_rx[i] = jiffies;
2412 }
2413
2414 int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2415 struct slave *slave)
2416 {
2417 struct arphdr *arp = (struct arphdr *)skb->data;
2418 unsigned char *arp_ptr;
2419 __be32 sip, tip;
2420 int alen;
2421
2422 if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
2423 return RX_HANDLER_ANOTHER;
2424
2425 read_lock(&bond->lock);
2426
2427 if (!slave_do_arp_validate(bond, slave))
2428 goto out_unlock;
2429
2430 alen = arp_hdr_len(bond->dev);
2431
2432 pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
2433 bond->dev->name, skb->dev->name);
2434
2435 if (alen > skb_headlen(skb)) {
2436 arp = kmalloc(alen, GFP_ATOMIC);
2437 if (!arp)
2438 goto out_unlock;
2439 if (skb_copy_bits(skb, 0, arp, alen) < 0)
2440 goto out_unlock;
2441 }
2442
2443 if (arp->ar_hln != bond->dev->addr_len ||
2444 skb->pkt_type == PACKET_OTHERHOST ||
2445 skb->pkt_type == PACKET_LOOPBACK ||
2446 arp->ar_hrd != htons(ARPHRD_ETHER) ||
2447 arp->ar_pro != htons(ETH_P_IP) ||
2448 arp->ar_pln != 4)
2449 goto out_unlock;
2450
2451 arp_ptr = (unsigned char *)(arp + 1);
2452 arp_ptr += bond->dev->addr_len;
2453 memcpy(&sip, arp_ptr, 4);
2454 arp_ptr += 4 + bond->dev->addr_len;
2455 memcpy(&tip, arp_ptr, 4);
2456
2457 pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
2458 bond->dev->name, slave->dev->name, bond_slave_state(slave),
2459 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
2460 &sip, &tip);
2461
2462 /*
2463 * Backup slaves won't see the ARP reply, but do come through
2464 * here for each ARP probe (so we swap the sip/tip to validate
2465 * the probe). In a "redundant switch, common router" type of
2466 * configuration, the ARP probe will (hopefully) travel from
2467 * the active, through one switch, the router, then the other
2468 * switch before reaching the backup.
2469 *
2470 * We 'trust' the arp requests if there is an active slave and
2471 * it received valid arp reply(s) after it became active. This
2472 * is done to avoid endless looping when we can't reach the
2473 * arp_ip_target and fool ourselves with our own arp requests.
2474 */
2475 if (bond_is_active_slave(slave))
2476 bond_validate_arp(bond, slave, sip, tip);
2477 else if (bond->curr_active_slave &&
2478 time_after(slave_last_rx(bond, bond->curr_active_slave),
2479 bond->curr_active_slave->jiffies))
2480 bond_validate_arp(bond, slave, tip, sip);
2481
2482 out_unlock:
2483 read_unlock(&bond->lock);
2484 if (arp != (struct arphdr *)skb->data)
2485 kfree(arp);
2486 return RX_HANDLER_ANOTHER;
2487 }
2488
2489 /* function to verify if we're in the arp_interval timeslice, returns true if
2490 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
2491 * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
2492 */
2493 static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
2494 int mod)
2495 {
2496 int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
2497
2498 return time_in_range(jiffies,
2499 last_act - delta_in_ticks,
2500 last_act + mod * delta_in_ticks + delta_in_ticks/2);
2501 }
2502
2503 /*
2504 * this function is called regularly to monitor each slave's link
2505 * ensuring that traffic is being sent and received when arp monitoring
2506 * is used in load-balancing mode. if the adapter has been dormant, then an
2507 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
2508 * arp monitoring in active backup mode.
2509 */
2510 void bond_loadbalance_arp_mon(struct work_struct *work)
2511 {
2512 struct bonding *bond = container_of(work, struct bonding,
2513 arp_work.work);
2514 struct slave *slave, *oldcurrent;
2515 int do_failover = 0;
2516
2517 read_lock(&bond->lock);
2518
2519 if (list_empty(&bond->slave_list))
2520 goto re_arm;
2521
2522 oldcurrent = bond->curr_active_slave;
2523 /* see if any of the previous devices are up now (i.e. they have
2524 * xmt and rcv traffic). the curr_active_slave does not come into
2525 * the picture unless it is null. also, slave->jiffies is not needed
2526 * here because we send an arp on each slave and give a slave as
2527 * long as it needs to get the tx/rx within the delta.
2528 * TODO: what about up/down delay in arp mode? it wasn't here before
2529 * so it can wait
2530 */
2531 bond_for_each_slave(bond, slave) {
2532 unsigned long trans_start = dev_trans_start(slave->dev);
2533
2534 if (slave->link != BOND_LINK_UP) {
2535 if (bond_time_in_interval(bond, trans_start, 1) &&
2536 bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
2537
2538 slave->link = BOND_LINK_UP;
2539 bond_set_active_slave(slave);
2540
2541 /* primary_slave has no meaning in round-robin
2542 * mode. the window of a slave being up and
2543 * curr_active_slave being null after enslaving
2544 * is closed.
2545 */
2546 if (!oldcurrent) {
2547 pr_info("%s: link status definitely up for interface %s, ",
2548 bond->dev->name,
2549 slave->dev->name);
2550 do_failover = 1;
2551 } else {
2552 pr_info("%s: interface %s is now up\n",
2553 bond->dev->name,
2554 slave->dev->name);
2555 }
2556 }
2557 } else {
2558 /* slave->link == BOND_LINK_UP */
2559
2560 /* not all switches will respond to an arp request
2561 * when the source ip is 0, so don't take the link down
2562 * if we don't know our ip yet
2563 */
2564 if (!bond_time_in_interval(bond, trans_start, 2) ||
2565 !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
2566
2567 slave->link = BOND_LINK_DOWN;
2568 bond_set_backup_slave(slave);
2569
2570 if (slave->link_failure_count < UINT_MAX)
2571 slave->link_failure_count++;
2572
2573 pr_info("%s: interface %s is now down.\n",
2574 bond->dev->name,
2575 slave->dev->name);
2576
2577 if (slave == oldcurrent)
2578 do_failover = 1;
2579 }
2580 }
2581
2582 /* note: if switch is in round-robin mode, all links
2583 * must tx arp to ensure all links rx an arp - otherwise
2584 * links may oscillate or not come up at all; if switch is
2585 * in something like xor mode, there is nothing we can
2586 * do - all replies will be rx'ed on same link causing slaves
2587 * to be unstable during low/no traffic periods
2588 */
2589 if (IS_UP(slave->dev))
2590 bond_arp_send_all(bond, slave);
2591 }
2592
2593 if (do_failover) {
2594 block_netpoll_tx();
2595 write_lock_bh(&bond->curr_slave_lock);
2596
2597 bond_select_active_slave(bond);
2598
2599 write_unlock_bh(&bond->curr_slave_lock);
2600 unblock_netpoll_tx();
2601 }
2602
2603 re_arm:
2604 if (bond->params.arp_interval)
2605 queue_delayed_work(bond->wq, &bond->arp_work,
2606 msecs_to_jiffies(bond->params.arp_interval));
2607
2608 read_unlock(&bond->lock);
2609 }
2610
2611 /*
2612 * Called to inspect slaves for active-backup mode ARP monitor link state
2613 * changes. Sets new_link in slaves to specify what action should take
2614 * place for the slave. Returns 0 if no changes are found, >0 if changes
2615 * to link states must be committed.
2616 *
2617 * Called with bond->lock held for read.
2618 */
2619 static int bond_ab_arp_inspect(struct bonding *bond)
2620 {
2621 unsigned long trans_start, last_rx;
2622 struct slave *slave;
2623 int commit = 0;
2624
2625 bond_for_each_slave(bond, slave) {
2626 slave->new_link = BOND_LINK_NOCHANGE;
2627 last_rx = slave_last_rx(bond, slave);
2628
2629 if (slave->link != BOND_LINK_UP) {
2630 if (bond_time_in_interval(bond, last_rx, 1)) {
2631 slave->new_link = BOND_LINK_UP;
2632 commit++;
2633 }
2634 continue;
2635 }
2636
2637 /*
2638 * Give slaves 2*delta after being enslaved or made
2639 * active. This avoids bouncing, as the last receive
2640 * times need a full ARP monitor cycle to be updated.
2641 */
2642 if (bond_time_in_interval(bond, slave->jiffies, 2))
2643 continue;
2644
2645 /*
2646 * Backup slave is down if:
2647 * - No current_arp_slave AND
2648 * - more than 3*delta since last receive AND
2649 * - the bond has an IP address
2650 *
2651 * Note: a non-null current_arp_slave indicates
2652 * the curr_active_slave went down and we are
2653 * searching for a new one; under this condition
2654 * we only take the curr_active_slave down - this
2655 * gives each slave a chance to tx/rx traffic
2656 * before being taken out
2657 */
2658 if (!bond_is_active_slave(slave) &&
2659 !bond->current_arp_slave &&
2660 !bond_time_in_interval(bond, last_rx, 3)) {
2661 slave->new_link = BOND_LINK_DOWN;
2662 commit++;
2663 }
2664
2665 /*
2666 * Active slave is down if:
2667 * - more than 2*delta since transmitting OR
2668 * - (more than 2*delta since receive AND
2669 * the bond has an IP address)
2670 */
2671 trans_start = dev_trans_start(slave->dev);
2672 if (bond_is_active_slave(slave) &&
2673 (!bond_time_in_interval(bond, trans_start, 2) ||
2674 !bond_time_in_interval(bond, last_rx, 2))) {
2675 slave->new_link = BOND_LINK_DOWN;
2676 commit++;
2677 }
2678 }
2679
2680 return commit;
2681 }
2682
2683 /*
2684 * Called to commit link state changes noted by inspection step of
2685 * active-backup mode ARP monitor.
2686 *
2687 * Called with RTNL and bond->lock for read.
2688 */
2689 static void bond_ab_arp_commit(struct bonding *bond)
2690 {
2691 unsigned long trans_start;
2692 struct slave *slave;
2693
2694 bond_for_each_slave(bond, slave) {
2695 switch (slave->new_link) {
2696 case BOND_LINK_NOCHANGE:
2697 continue;
2698
2699 case BOND_LINK_UP:
2700 trans_start = dev_trans_start(slave->dev);
2701 if (bond->curr_active_slave != slave ||
2702 (!bond->curr_active_slave &&
2703 bond_time_in_interval(bond, trans_start, 1))) {
2704 slave->link = BOND_LINK_UP;
2705 if (bond->current_arp_slave) {
2706 bond_set_slave_inactive_flags(
2707 bond->current_arp_slave);
2708 bond->current_arp_slave = NULL;
2709 }
2710
2711 pr_info("%s: link status definitely up for interface %s.\n",
2712 bond->dev->name, slave->dev->name);
2713
2714 if (!bond->curr_active_slave ||
2715 (slave == bond->primary_slave))
2716 goto do_failover;
2717
2718 }
2719
2720 continue;
2721
2722 case BOND_LINK_DOWN:
2723 if (slave->link_failure_count < UINT_MAX)
2724 slave->link_failure_count++;
2725
2726 slave->link = BOND_LINK_DOWN;
2727 bond_set_slave_inactive_flags(slave);
2728
2729 pr_info("%s: link status definitely down for interface %s, disabling it\n",
2730 bond->dev->name, slave->dev->name);
2731
2732 if (slave == bond->curr_active_slave) {
2733 bond->current_arp_slave = NULL;
2734 goto do_failover;
2735 }
2736
2737 continue;
2738
2739 default:
2740 pr_err("%s: impossible: new_link %d on slave %s\n",
2741 bond->dev->name, slave->new_link,
2742 slave->dev->name);
2743 continue;
2744 }
2745
2746 do_failover:
2747 ASSERT_RTNL();
2748 block_netpoll_tx();
2749 write_lock_bh(&bond->curr_slave_lock);
2750 bond_select_active_slave(bond);
2751 write_unlock_bh(&bond->curr_slave_lock);
2752 unblock_netpoll_tx();
2753 }
2754
2755 bond_set_carrier(bond);
2756 }
2757
2758 /*
2759 * Send ARP probes for active-backup mode ARP monitor.
2760 *
2761 * Called with bond->lock held for read.
2762 */
2763 static void bond_ab_arp_probe(struct bonding *bond)
2764 {
2765 struct slave *slave, *next_slave;
2766 int i;
2767
2768 read_lock(&bond->curr_slave_lock);
2769
2770 if (bond->current_arp_slave && bond->curr_active_slave)
2771 pr_info("PROBE: c_arp %s && cas %s BAD\n",
2772 bond->current_arp_slave->dev->name,
2773 bond->curr_active_slave->dev->name);
2774
2775 if (bond->curr_active_slave) {
2776 bond_arp_send_all(bond, bond->curr_active_slave);
2777 read_unlock(&bond->curr_slave_lock);
2778 return;
2779 }
2780
2781 read_unlock(&bond->curr_slave_lock);
2782
2783 /* if we don't have a curr_active_slave, search for the next available
2784 * backup slave from the current_arp_slave and make it the candidate
2785 * for becoming the curr_active_slave
2786 */
2787
2788 if (!bond->current_arp_slave) {
2789 bond->current_arp_slave = bond_first_slave(bond);
2790 if (!bond->current_arp_slave)
2791 return;
2792 }
2793
2794 bond_set_slave_inactive_flags(bond->current_arp_slave);
2795
2796 /* search for next candidate */
2797 next_slave = bond_next_slave(bond, bond->current_arp_slave);
2798 bond_for_each_slave_from(bond, slave, i, next_slave) {
2799 if (IS_UP(slave->dev)) {
2800 slave->link = BOND_LINK_BACK;
2801 bond_set_slave_active_flags(slave);
2802 bond_arp_send_all(bond, slave);
2803 slave->jiffies = jiffies;
2804 bond->current_arp_slave = slave;
2805 break;
2806 }
2807
2808 /* if the link state is up at this point, we
2809 * mark it down - this can happen if we have
2810 * simultaneous link failures and
2811 * reselect_active_interface doesn't make this
2812 * one the current slave so it is still marked
2813 * up when it is actually down
2814 */
2815 if (slave->link == BOND_LINK_UP) {
2816 slave->link = BOND_LINK_DOWN;
2817 if (slave->link_failure_count < UINT_MAX)
2818 slave->link_failure_count++;
2819
2820 bond_set_slave_inactive_flags(slave);
2821
2822 pr_info("%s: backup interface %s is now down.\n",
2823 bond->dev->name, slave->dev->name);
2824 }
2825 }
2826 }
2827
2828 void bond_activebackup_arp_mon(struct work_struct *work)
2829 {
2830 struct bonding *bond = container_of(work, struct bonding,
2831 arp_work.work);
2832 bool should_notify_peers = false;
2833 int delta_in_ticks;
2834
2835 read_lock(&bond->lock);
2836
2837 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
2838
2839 if (list_empty(&bond->slave_list))
2840 goto re_arm;
2841
2842 should_notify_peers = bond_should_notify_peers(bond);
2843
2844 if (bond_ab_arp_inspect(bond)) {
2845 read_unlock(&bond->lock);
2846
2847 /* Race avoidance with bond_close flush of workqueue */
2848 if (!rtnl_trylock()) {
2849 read_lock(&bond->lock);
2850 delta_in_ticks = 1;
2851 should_notify_peers = false;
2852 goto re_arm;
2853 }
2854
2855 read_lock(&bond->lock);
2856
2857 bond_ab_arp_commit(bond);
2858
2859 read_unlock(&bond->lock);
2860 rtnl_unlock();
2861 read_lock(&bond->lock);
2862 }
2863
2864 bond_ab_arp_probe(bond);
2865
2866 re_arm:
2867 if (bond->params.arp_interval)
2868 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
2869
2870 read_unlock(&bond->lock);
2871
2872 if (should_notify_peers) {
2873 if (!rtnl_trylock())
2874 return;
2875 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2876 rtnl_unlock();
2877 }
2878 }
2879
2880 /*-------------------------- netdev event handling --------------------------*/
2881
2882 /*
2883 * Change device name
2884 */
2885 static int bond_event_changename(struct bonding *bond)
2886 {
2887 bond_remove_proc_entry(bond);
2888 bond_create_proc_entry(bond);
2889
2890 bond_debug_reregister(bond);
2891
2892 return NOTIFY_DONE;
2893 }
2894
2895 static int bond_master_netdev_event(unsigned long event,
2896 struct net_device *bond_dev)
2897 {
2898 struct bonding *event_bond = netdev_priv(bond_dev);
2899
2900 switch (event) {
2901 case NETDEV_CHANGENAME:
2902 return bond_event_changename(event_bond);
2903 case NETDEV_UNREGISTER:
2904 bond_remove_proc_entry(event_bond);
2905 break;
2906 case NETDEV_REGISTER:
2907 bond_create_proc_entry(event_bond);
2908 break;
2909 case NETDEV_NOTIFY_PEERS:
2910 if (event_bond->send_peer_notif)
2911 event_bond->send_peer_notif--;
2912 break;
2913 default:
2914 break;
2915 }
2916
2917 return NOTIFY_DONE;
2918 }
2919
2920 static int bond_slave_netdev_event(unsigned long event,
2921 struct net_device *slave_dev)
2922 {
2923 struct slave *slave = bond_slave_get_rtnl(slave_dev);
2924 struct bonding *bond;
2925 struct net_device *bond_dev;
2926 u32 old_speed;
2927 u8 old_duplex;
2928
2929 /* A netdev event can be generated while enslaving a device
2930 * before netdev_rx_handler_register is called in which case
2931 * slave will be NULL
2932 */
2933 if (!slave)
2934 return NOTIFY_DONE;
2935 bond_dev = slave->bond->dev;
2936 bond = slave->bond;
2937
2938 switch (event) {
2939 case NETDEV_UNREGISTER:
2940 if (bond_dev->type != ARPHRD_ETHER)
2941 bond_release_and_destroy(bond_dev, slave_dev);
2942 else
2943 bond_release(bond_dev, slave_dev);
2944 break;
2945 case NETDEV_UP:
2946 case NETDEV_CHANGE:
2947 old_speed = slave->speed;
2948 old_duplex = slave->duplex;
2949
2950 bond_update_speed_duplex(slave);
2951
2952 if (bond->params.mode == BOND_MODE_8023AD) {
2953 if (old_speed != slave->speed)
2954 bond_3ad_adapter_speed_changed(slave);
2955 if (old_duplex != slave->duplex)
2956 bond_3ad_adapter_duplex_changed(slave);
2957 }
2958 break;
2959 case NETDEV_DOWN:
2960 /*
2961 * ... Or is it this?
2962 */
2963 break;
2964 case NETDEV_CHANGEMTU:
2965 /*
2966 * TODO: Should slaves be allowed to
2967 * independently alter their MTU? For
2968 * an active-backup bond, slaves need
2969 * not be the same type of device, so
2970 * MTUs may vary. For other modes,
2971 * slaves arguably should have the
2972 * same MTUs. To do this, we'd need to
2973 * take over the slave's change_mtu
2974 * function for the duration of their
2975 * servitude.
2976 */
2977 break;
2978 case NETDEV_CHANGENAME:
2979 /*
2980 * TODO: handle changing the primary's name
2981 */
2982 break;
2983 case NETDEV_FEAT_CHANGE:
2984 bond_compute_features(bond);
2985 break;
2986 case NETDEV_RESEND_IGMP:
2987 /* Propagate to master device */
2988 call_netdevice_notifiers(event, slave->bond->dev);
2989 break;
2990 default:
2991 break;
2992 }
2993
2994 return NOTIFY_DONE;
2995 }
2996
2997 /*
2998 * bond_netdev_event: handle netdev notifier chain events.
2999 *
3000 * This function receives events for the netdev chain. The caller (an
3001 * ioctl handler calling blocking_notifier_call_chain) holds the necessary
3002 * locks for us to safely manipulate the slave devices (RTNL lock,
3003 * dev_probe_lock).
3004 */
3005 static int bond_netdev_event(struct notifier_block *this,
3006 unsigned long event, void *ptr)
3007 {
3008 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
3009
3010 pr_debug("event_dev: %s, event: %lx\n",
3011 event_dev ? event_dev->name : "None",
3012 event);
3013
3014 if (!(event_dev->priv_flags & IFF_BONDING))
3015 return NOTIFY_DONE;
3016
3017 if (event_dev->flags & IFF_MASTER) {
3018 pr_debug("IFF_MASTER\n");
3019 return bond_master_netdev_event(event, event_dev);
3020 }
3021
3022 if (event_dev->flags & IFF_SLAVE) {
3023 pr_debug("IFF_SLAVE\n");
3024 return bond_slave_netdev_event(event, event_dev);
3025 }
3026
3027 return NOTIFY_DONE;
3028 }
3029
3030 static struct notifier_block bond_netdev_notifier = {
3031 .notifier_call = bond_netdev_event,
3032 };
3033
3034 /*---------------------------- Hashing Policies -----------------------------*/
3035
3036 /*
3037 * Hash for the output device based upon layer 2 data
3038 */
3039 static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
3040 {
3041 struct ethhdr *data = (struct ethhdr *)skb->data;
3042
3043 if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
3044 return (data->h_dest[5] ^ data->h_source[5]) % count;
3045
3046 return 0;
3047 }
3048
3049 /*
3050 * Hash for the output device based upon layer 2 and layer 3 data. If
3051 * the packet is not IP, fall back on bond_xmit_hash_policy_l2()
3052 */
3053 static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
3054 {
3055 const struct ethhdr *data;
3056 const struct iphdr *iph;
3057 const struct ipv6hdr *ipv6h;
3058 u32 v6hash;
3059 const __be32 *s, *d;
3060
3061 if (skb->protocol == htons(ETH_P_IP) &&
3062 pskb_network_may_pull(skb, sizeof(*iph))) {
3063 iph = ip_hdr(skb);
3064 data = (struct ethhdr *)skb->data;
3065 return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
3066 (data->h_dest[5] ^ data->h_source[5])) % count;
3067 } else if (skb->protocol == htons(ETH_P_IPV6) &&
3068 pskb_network_may_pull(skb, sizeof(*ipv6h))) {
3069 ipv6h = ipv6_hdr(skb);
3070 data = (struct ethhdr *)skb->data;
3071 s = &ipv6h->saddr.s6_addr32[0];
3072 d = &ipv6h->daddr.s6_addr32[0];
3073 v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
3074 v6hash ^= (v6hash >> 24) ^ (v6hash >> 16) ^ (v6hash >> 8);
3075 return (v6hash ^ data->h_dest[5] ^ data->h_source[5]) % count;
3076 }
3077
3078 return bond_xmit_hash_policy_l2(skb, count);
3079 }
3080
3081 /*
3082 * Hash for the output device based upon layer 3 and layer 4 data. If
3083 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
3084 * altogether not IP, fall back on bond_xmit_hash_policy_l2()
3085 */
3086 static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
3087 {
3088 u32 layer4_xor = 0;
3089 const struct iphdr *iph;
3090 const struct ipv6hdr *ipv6h;
3091 const __be32 *s, *d;
3092 const __be16 *l4 = NULL;
3093 __be16 _l4[2];
3094 int noff = skb_network_offset(skb);
3095 int poff;
3096
3097 if (skb->protocol == htons(ETH_P_IP) &&
3098 pskb_may_pull(skb, noff + sizeof(*iph))) {
3099 iph = ip_hdr(skb);
3100 poff = proto_ports_offset(iph->protocol);
3101
3102 if (!ip_is_fragment(iph) && poff >= 0) {
3103 l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
3104 sizeof(_l4), &_l4);
3105 if (l4)
3106 layer4_xor = ntohs(l4[0] ^ l4[1]);
3107 }
3108 return (layer4_xor ^
3109 ((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
3110 } else if (skb->protocol == htons(ETH_P_IPV6) &&
3111 pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
3112 ipv6h = ipv6_hdr(skb);
3113 poff = proto_ports_offset(ipv6h->nexthdr);
3114 if (poff >= 0) {
3115 l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
3116 sizeof(_l4), &_l4);
3117 if (l4)
3118 layer4_xor = ntohs(l4[0] ^ l4[1]);
3119 }
3120 s = &ipv6h->saddr.s6_addr32[0];
3121 d = &ipv6h->daddr.s6_addr32[0];
3122 layer4_xor ^= (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
3123 layer4_xor ^= (layer4_xor >> 24) ^ (layer4_xor >> 16) ^
3124 (layer4_xor >> 8);
3125 return layer4_xor % count;
3126 }
3127
3128 return bond_xmit_hash_policy_l2(skb, count);
3129 }
3130
3131 /*-------------------------- Device entry points ----------------------------*/
3132
3133 static void bond_work_init_all(struct bonding *bond)
3134 {
3135 INIT_DELAYED_WORK(&bond->mcast_work,
3136 bond_resend_igmp_join_requests_delayed);
3137 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
3138 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3139 if (bond->params.mode == BOND_MODE_ACTIVEBACKUP)
3140 INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon);
3141 else
3142 INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon);
3143 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3144 }
3145
3146 static void bond_work_cancel_all(struct bonding *bond)
3147 {
3148 cancel_delayed_work_sync(&bond->mii_work);
3149 cancel_delayed_work_sync(&bond->arp_work);
3150 cancel_delayed_work_sync(&bond->alb_work);
3151 cancel_delayed_work_sync(&bond->ad_work);
3152 cancel_delayed_work_sync(&bond->mcast_work);
3153 }
3154
3155 static int bond_open(struct net_device *bond_dev)
3156 {
3157 struct bonding *bond = netdev_priv(bond_dev);
3158 struct slave *slave;
3159
3160 /* reset slave->backup and slave->inactive */
3161 read_lock(&bond->lock);
3162 if (!list_empty(&bond->slave_list)) {
3163 read_lock(&bond->curr_slave_lock);
3164 bond_for_each_slave(bond, slave) {
3165 if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
3166 && (slave != bond->curr_active_slave)) {
3167 bond_set_slave_inactive_flags(slave);
3168 } else {
3169 bond_set_slave_active_flags(slave);
3170 }
3171 }
3172 read_unlock(&bond->curr_slave_lock);
3173 }
3174 read_unlock(&bond->lock);
3175
3176 bond_work_init_all(bond);
3177
3178 if (bond_is_lb(bond)) {
3179 /* bond_alb_initialize must be called before the timer
3180 * is started.
3181 */
3182 if (bond_alb_initialize(bond, (bond->params.mode == BOND_MODE_ALB)))
3183 return -ENOMEM;
3184 queue_delayed_work(bond->wq, &bond->alb_work, 0);
3185 }
3186
3187 if (bond->params.miimon) /* link check interval, in milliseconds. */
3188 queue_delayed_work(bond->wq, &bond->mii_work, 0);
3189
3190 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
3191 queue_delayed_work(bond->wq, &bond->arp_work, 0);
3192 if (bond->params.arp_validate)
3193 bond->recv_probe = bond_arp_rcv;
3194 }
3195
3196 if (bond->params.mode == BOND_MODE_8023AD) {
3197 queue_delayed_work(bond->wq, &bond->ad_work, 0);
3198 /* register to receive LACPDUs */
3199 bond->recv_probe = bond_3ad_lacpdu_recv;
3200 bond_3ad_initiate_agg_selection(bond, 1);
3201 }
3202
3203 return 0;
3204 }
3205
3206 static int bond_close(struct net_device *bond_dev)
3207 {
3208 struct bonding *bond = netdev_priv(bond_dev);
3209
3210 bond_work_cancel_all(bond);
3211 bond->send_peer_notif = 0;
3212 if (bond_is_lb(bond))
3213 bond_alb_deinitialize(bond);
3214 bond->recv_probe = NULL;
3215
3216 return 0;
3217 }
3218
3219 static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
3220 struct rtnl_link_stats64 *stats)
3221 {
3222 struct bonding *bond = netdev_priv(bond_dev);
3223 struct rtnl_link_stats64 temp;
3224 struct slave *slave;
3225
3226 memset(stats, 0, sizeof(*stats));
3227
3228 read_lock_bh(&bond->lock);
3229 bond_for_each_slave(bond, slave) {
3230 const struct rtnl_link_stats64 *sstats =
3231 dev_get_stats(slave->dev, &temp);
3232
3233 stats->rx_packets += sstats->rx_packets;
3234 stats->rx_bytes += sstats->rx_bytes;
3235 stats->rx_errors += sstats->rx_errors;
3236 stats->rx_dropped += sstats->rx_dropped;
3237
3238 stats->tx_packets += sstats->tx_packets;
3239 stats->tx_bytes += sstats->tx_bytes;
3240 stats->tx_errors += sstats->tx_errors;
3241 stats->tx_dropped += sstats->tx_dropped;
3242
3243 stats->multicast += sstats->multicast;
3244 stats->collisions += sstats->collisions;
3245
3246 stats->rx_length_errors += sstats->rx_length_errors;
3247 stats->rx_over_errors += sstats->rx_over_errors;
3248 stats->rx_crc_errors += sstats->rx_crc_errors;
3249 stats->rx_frame_errors += sstats->rx_frame_errors;
3250 stats->rx_fifo_errors += sstats->rx_fifo_errors;
3251 stats->rx_missed_errors += sstats->rx_missed_errors;
3252
3253 stats->tx_aborted_errors += sstats->tx_aborted_errors;
3254 stats->tx_carrier_errors += sstats->tx_carrier_errors;
3255 stats->tx_fifo_errors += sstats->tx_fifo_errors;
3256 stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
3257 stats->tx_window_errors += sstats->tx_window_errors;
3258 }
3259 read_unlock_bh(&bond->lock);
3260
3261 return stats;
3262 }
3263
3264 static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
3265 {
3266 struct net_device *slave_dev = NULL;
3267 struct ifbond k_binfo;
3268 struct ifbond __user *u_binfo = NULL;
3269 struct ifslave k_sinfo;
3270 struct ifslave __user *u_sinfo = NULL;
3271 struct mii_ioctl_data *mii = NULL;
3272 struct net *net;
3273 int res = 0;
3274
3275 pr_debug("bond_ioctl: master=%s, cmd=%d\n", bond_dev->name, cmd);
3276
3277 switch (cmd) {
3278 case SIOCGMIIPHY:
3279 mii = if_mii(ifr);
3280 if (!mii)
3281 return -EINVAL;
3282
3283 mii->phy_id = 0;
3284 /* Fall Through */
3285 case SIOCGMIIREG:
3286 /*
3287 * We do this again just in case we were called by SIOCGMIIREG
3288 * instead of SIOCGMIIPHY.
3289 */
3290 mii = if_mii(ifr);
3291 if (!mii)
3292 return -EINVAL;
3293
3294
3295 if (mii->reg_num == 1) {
3296 struct bonding *bond = netdev_priv(bond_dev);
3297 mii->val_out = 0;
3298 read_lock(&bond->lock);
3299 read_lock(&bond->curr_slave_lock);
3300 if (netif_carrier_ok(bond->dev))
3301 mii->val_out = BMSR_LSTATUS;
3302
3303 read_unlock(&bond->curr_slave_lock);
3304 read_unlock(&bond->lock);
3305 }
3306
3307 return 0;
3308 case BOND_INFO_QUERY_OLD:
3309 case SIOCBONDINFOQUERY:
3310 u_binfo = (struct ifbond __user *)ifr->ifr_data;
3311
3312 if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
3313 return -EFAULT;
3314
3315 res = bond_info_query(bond_dev, &k_binfo);
3316 if (res == 0 &&
3317 copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
3318 return -EFAULT;
3319
3320 return res;
3321 case BOND_SLAVE_INFO_QUERY_OLD:
3322 case SIOCBONDSLAVEINFOQUERY:
3323 u_sinfo = (struct ifslave __user *)ifr->ifr_data;
3324
3325 if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
3326 return -EFAULT;
3327
3328 res = bond_slave_info_query(bond_dev, &k_sinfo);
3329 if (res == 0 &&
3330 copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
3331 return -EFAULT;
3332
3333 return res;
3334 default:
3335 /* Go on */
3336 break;
3337 }
3338
3339 net = dev_net(bond_dev);
3340
3341 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3342 return -EPERM;
3343
3344 slave_dev = dev_get_by_name(net, ifr->ifr_slave);
3345
3346 pr_debug("slave_dev=%p:\n", slave_dev);
3347
3348 if (!slave_dev)
3349 res = -ENODEV;
3350 else {
3351 pr_debug("slave_dev->name=%s:\n", slave_dev->name);
3352 switch (cmd) {
3353 case BOND_ENSLAVE_OLD:
3354 case SIOCBONDENSLAVE:
3355 res = bond_enslave(bond_dev, slave_dev);
3356 break;
3357 case BOND_RELEASE_OLD:
3358 case SIOCBONDRELEASE:
3359 res = bond_release(bond_dev, slave_dev);
3360 break;
3361 case BOND_SETHWADDR_OLD:
3362 case SIOCBONDSETHWADDR:
3363 bond_set_dev_addr(bond_dev, slave_dev);
3364 res = 0;
3365 break;
3366 case BOND_CHANGE_ACTIVE_OLD:
3367 case SIOCBONDCHANGEACTIVE:
3368 res = bond_ioctl_change_active(bond_dev, slave_dev);
3369 break;
3370 default:
3371 res = -EOPNOTSUPP;
3372 }
3373
3374 dev_put(slave_dev);
3375 }
3376
3377 return res;
3378 }
3379
3380 static void bond_change_rx_flags(struct net_device *bond_dev, int change)
3381 {
3382 struct bonding *bond = netdev_priv(bond_dev);
3383
3384 if (change & IFF_PROMISC)
3385 bond_set_promiscuity(bond,
3386 bond_dev->flags & IFF_PROMISC ? 1 : -1);
3387
3388 if (change & IFF_ALLMULTI)
3389 bond_set_allmulti(bond,
3390 bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
3391 }
3392
3393 static void bond_set_rx_mode(struct net_device *bond_dev)
3394 {
3395 struct bonding *bond = netdev_priv(bond_dev);
3396 struct slave *slave;
3397
3398 ASSERT_RTNL();
3399
3400 if (USES_PRIMARY(bond->params.mode)) {
3401 slave = rtnl_dereference(bond->curr_active_slave);
3402 if (slave) {
3403 dev_uc_sync(slave->dev, bond_dev);
3404 dev_mc_sync(slave->dev, bond_dev);
3405 }
3406 } else {
3407 bond_for_each_slave(bond, slave) {
3408 dev_uc_sync_multiple(slave->dev, bond_dev);
3409 dev_mc_sync_multiple(slave->dev, bond_dev);
3410 }
3411 }
3412 }
3413
3414 static int bond_neigh_init(struct neighbour *n)
3415 {
3416 struct bonding *bond = netdev_priv(n->dev);
3417 const struct net_device_ops *slave_ops;
3418 struct neigh_parms parms;
3419 struct slave *slave;
3420 int ret;
3421
3422 slave = bond_first_slave(bond);
3423 if (!slave)
3424 return 0;
3425 slave_ops = slave->dev->netdev_ops;
3426 if (!slave_ops->ndo_neigh_setup)
3427 return 0;
3428
3429 parms.neigh_setup = NULL;
3430 parms.neigh_cleanup = NULL;
3431 ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
3432 if (ret)
3433 return ret;
3434
3435 /*
3436 * Assign slave's neigh_cleanup to neighbour in case cleanup is called
3437 * after the last slave has been detached. Assumes that all slaves
3438 * utilize the same neigh_cleanup (true at this writing as only user
3439 * is ipoib).
3440 */
3441 n->parms->neigh_cleanup = parms.neigh_cleanup;
3442
3443 if (!parms.neigh_setup)
3444 return 0;
3445
3446 return parms.neigh_setup(n);
3447 }
3448
3449 /*
3450 * The bonding ndo_neigh_setup is called at init time beofre any
3451 * slave exists. So we must declare proxy setup function which will
3452 * be used at run time to resolve the actual slave neigh param setup.
3453 *
3454 * It's also called by master devices (such as vlans) to setup their
3455 * underlying devices. In that case - do nothing, we're already set up from
3456 * our init.
3457 */
3458 static int bond_neigh_setup(struct net_device *dev,
3459 struct neigh_parms *parms)
3460 {
3461 /* modify only our neigh_parms */
3462 if (parms->dev == dev)
3463 parms->neigh_setup = bond_neigh_init;
3464
3465 return 0;
3466 }
3467
3468 /*
3469 * Change the MTU of all of a master's slaves to match the master
3470 */
3471 static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
3472 {
3473 struct bonding *bond = netdev_priv(bond_dev);
3474 struct slave *slave;
3475 int res = 0;
3476
3477 pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
3478 (bond_dev ? bond_dev->name : "None"), new_mtu);
3479
3480 /* Can't hold bond->lock with bh disabled here since
3481 * some base drivers panic. On the other hand we can't
3482 * hold bond->lock without bh disabled because we'll
3483 * deadlock. The only solution is to rely on the fact
3484 * that we're under rtnl_lock here, and the slaves
3485 * list won't change. This doesn't solve the problem
3486 * of setting the slave's MTU while it is
3487 * transmitting, but the assumption is that the base
3488 * driver can handle that.
3489 *
3490 * TODO: figure out a way to safely iterate the slaves
3491 * list, but without holding a lock around the actual
3492 * call to the base driver.
3493 */
3494
3495 bond_for_each_slave(bond, slave) {
3496 pr_debug("s %p s->p %p c_m %p\n",
3497 slave,
3498 bond_prev_slave(bond, slave),
3499 slave->dev->netdev_ops->ndo_change_mtu);
3500
3501 res = dev_set_mtu(slave->dev, new_mtu);
3502
3503 if (res) {
3504 /* If we failed to set the slave's mtu to the new value
3505 * we must abort the operation even in ACTIVE_BACKUP
3506 * mode, because if we allow the backup slaves to have
3507 * different mtu values than the active slave we'll
3508 * need to change their mtu when doing a failover. That
3509 * means changing their mtu from timer context, which
3510 * is probably not a good idea.
3511 */
3512 pr_debug("err %d %s\n", res, slave->dev->name);
3513 goto unwind;
3514 }
3515 }
3516
3517 bond_dev->mtu = new_mtu;
3518
3519 return 0;
3520
3521 unwind:
3522 /* unwind from head to the slave that failed */
3523 bond_for_each_slave_continue_reverse(bond, slave) {
3524 int tmp_res;
3525
3526 tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
3527 if (tmp_res) {
3528 pr_debug("unwind err %d dev %s\n",
3529 tmp_res, slave->dev->name);
3530 }
3531 }
3532
3533 return res;
3534 }
3535
3536 /*
3537 * Change HW address
3538 *
3539 * Note that many devices must be down to change the HW address, and
3540 * downing the master releases all slaves. We can make bonds full of
3541 * bonding devices to test this, however.
3542 */
3543 static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
3544 {
3545 struct bonding *bond = netdev_priv(bond_dev);
3546 struct sockaddr *sa = addr, tmp_sa;
3547 struct slave *slave;
3548 int res = 0;
3549
3550 if (bond->params.mode == BOND_MODE_ALB)
3551 return bond_alb_set_mac_address(bond_dev, addr);
3552
3553
3554 pr_debug("bond=%p, name=%s\n",
3555 bond, bond_dev ? bond_dev->name : "None");
3556
3557 /* If fail_over_mac is enabled, do nothing and return success.
3558 * Returning an error causes ifenslave to fail.
3559 */
3560 if (bond->params.fail_over_mac)
3561 return 0;
3562
3563 if (!is_valid_ether_addr(sa->sa_data))
3564 return -EADDRNOTAVAIL;
3565
3566 /* Can't hold bond->lock with bh disabled here since
3567 * some base drivers panic. On the other hand we can't
3568 * hold bond->lock without bh disabled because we'll
3569 * deadlock. The only solution is to rely on the fact
3570 * that we're under rtnl_lock here, and the slaves
3571 * list won't change. This doesn't solve the problem
3572 * of setting the slave's hw address while it is
3573 * transmitting, but the assumption is that the base
3574 * driver can handle that.
3575 *
3576 * TODO: figure out a way to safely iterate the slaves
3577 * list, but without holding a lock around the actual
3578 * call to the base driver.
3579 */
3580
3581 bond_for_each_slave(bond, slave) {
3582 const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
3583 pr_debug("slave %p %s\n", slave, slave->dev->name);
3584
3585 if (slave_ops->ndo_set_mac_address == NULL) {
3586 res = -EOPNOTSUPP;
3587 pr_debug("EOPNOTSUPP %s\n", slave->dev->name);
3588 goto unwind;
3589 }
3590
3591 res = dev_set_mac_address(slave->dev, addr);
3592 if (res) {
3593 /* TODO: consider downing the slave
3594 * and retry ?
3595 * User should expect communications
3596 * breakage anyway until ARP finish
3597 * updating, so...
3598 */
3599 pr_debug("err %d %s\n", res, slave->dev->name);
3600 goto unwind;
3601 }
3602 }
3603
3604 /* success */
3605 memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
3606 return 0;
3607
3608 unwind:
3609 memcpy(tmp_sa.sa_data, bond_dev->dev_addr, bond_dev->addr_len);
3610 tmp_sa.sa_family = bond_dev->type;
3611
3612 /* unwind from head to the slave that failed */
3613 bond_for_each_slave_continue_reverse(bond, slave) {
3614 int tmp_res;
3615
3616 tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
3617 if (tmp_res) {
3618 pr_debug("unwind err %d dev %s\n",
3619 tmp_res, slave->dev->name);
3620 }
3621 }
3622
3623 return res;
3624 }
3625
3626 /**
3627 * bond_xmit_slave_id - transmit skb through slave with slave_id
3628 * @bond: bonding device that is transmitting
3629 * @skb: buffer to transmit
3630 * @slave_id: slave id up to slave_cnt-1 through which to transmit
3631 *
3632 * This function tries to transmit through slave with slave_id but in case
3633 * it fails, it tries to find the first available slave for transmission.
3634 * The skb is consumed in all cases, thus the function is void.
3635 */
3636 void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
3637 {
3638 struct slave *slave;
3639 int i = slave_id;
3640
3641 /* Here we start from the slave with slave_id */
3642 bond_for_each_slave_rcu(bond, slave) {
3643 if (--i < 0) {
3644 if (slave_can_tx(slave)) {
3645 bond_dev_queue_xmit(bond, skb, slave->dev);
3646 return;
3647 }
3648 }
3649 }
3650
3651 /* Here we start from the first slave up to slave_id */
3652 i = slave_id;
3653 bond_for_each_slave_rcu(bond, slave) {
3654 if (--i < 0)
3655 break;
3656 if (slave_can_tx(slave)) {
3657 bond_dev_queue_xmit(bond, skb, slave->dev);
3658 return;
3659 }
3660 }
3661 /* no slave that can tx has been found */
3662 kfree_skb(skb);
3663 }
3664
3665 static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
3666 {
3667 struct bonding *bond = netdev_priv(bond_dev);
3668 struct iphdr *iph = ip_hdr(skb);
3669 struct slave *slave;
3670
3671 /*
3672 * Start with the curr_active_slave that joined the bond as the
3673 * default for sending IGMP traffic. For failover purposes one
3674 * needs to maintain some consistency for the interface that will
3675 * send the join/membership reports. The curr_active_slave found
3676 * will send all of this type of traffic.
3677 */
3678 if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
3679 slave = rcu_dereference(bond->curr_active_slave);
3680 if (slave && slave_can_tx(slave))
3681 bond_dev_queue_xmit(bond, skb, slave->dev);
3682 else
3683 bond_xmit_slave_id(bond, skb, 0);
3684 } else {
3685 bond_xmit_slave_id(bond, skb,
3686 bond->rr_tx_counter++ % bond->slave_cnt);
3687 }
3688
3689 return NETDEV_TX_OK;
3690 }
3691
3692 /*
3693 * in active-backup mode, we know that bond->curr_active_slave is always valid if
3694 * the bond has a usable interface.
3695 */
3696 static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev)
3697 {
3698 struct bonding *bond = netdev_priv(bond_dev);
3699 struct slave *slave;
3700
3701 slave = rcu_dereference(bond->curr_active_slave);
3702 if (slave)
3703 bond_dev_queue_xmit(bond, skb, slave->dev);
3704 else
3705 kfree_skb(skb);
3706
3707 return NETDEV_TX_OK;
3708 }
3709
3710 /*
3711 * In bond_xmit_xor() , we determine the output device by using a pre-
3712 * determined xmit_hash_policy(), If the selected device is not enabled,
3713 * find the next active slave.
3714 */
3715 static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
3716 {
3717 struct bonding *bond = netdev_priv(bond_dev);
3718
3719 bond_xmit_slave_id(bond, skb,
3720 bond->xmit_hash_policy(skb, bond->slave_cnt));
3721
3722 return NETDEV_TX_OK;
3723 }
3724
3725 /* in broadcast mode, we send everything to all usable interfaces. */
3726 static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
3727 {
3728 struct bonding *bond = netdev_priv(bond_dev);
3729 struct slave *slave = NULL;
3730
3731 bond_for_each_slave_rcu(bond, slave) {
3732 if (bond_is_last_slave(bond, slave))
3733 break;
3734 if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
3735 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
3736
3737 if (!skb2) {
3738 pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
3739 bond_dev->name);
3740 continue;
3741 }
3742 /* bond_dev_queue_xmit always returns 0 */
3743 bond_dev_queue_xmit(bond, skb2, slave->dev);
3744 }
3745 }
3746 if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
3747 bond_dev_queue_xmit(bond, skb, slave->dev);
3748 else
3749 kfree_skb(skb);
3750
3751 return NETDEV_TX_OK;
3752 }
3753
3754 /*------------------------- Device initialization ---------------------------*/
3755
3756 static void bond_set_xmit_hash_policy(struct bonding *bond)
3757 {
3758 switch (bond->params.xmit_policy) {
3759 case BOND_XMIT_POLICY_LAYER23:
3760 bond->xmit_hash_policy = bond_xmit_hash_policy_l23;
3761 break;
3762 case BOND_XMIT_POLICY_LAYER34:
3763 bond->xmit_hash_policy = bond_xmit_hash_policy_l34;
3764 break;
3765 case BOND_XMIT_POLICY_LAYER2:
3766 default:
3767 bond->xmit_hash_policy = bond_xmit_hash_policy_l2;
3768 break;
3769 }
3770 }
3771
3772 /*
3773 * Lookup the slave that corresponds to a qid
3774 */
3775 static inline int bond_slave_override(struct bonding *bond,
3776 struct sk_buff *skb)
3777 {
3778 struct slave *slave = NULL;
3779 struct slave *check_slave;
3780 int res = 1;
3781
3782 if (!skb->queue_mapping)
3783 return 1;
3784
3785 /* Find out if any slaves have the same mapping as this skb. */
3786 bond_for_each_slave_rcu(bond, check_slave) {
3787 if (check_slave->queue_id == skb->queue_mapping) {
3788 slave = check_slave;
3789 break;
3790 }
3791 }
3792
3793 /* If the slave isn't UP, use default transmit policy. */
3794 if (slave && slave->queue_id && IS_UP(slave->dev) &&
3795 (slave->link == BOND_LINK_UP)) {
3796 res = bond_dev_queue_xmit(bond, skb, slave->dev);
3797 }
3798
3799 return res;
3800 }
3801
3802
3803 static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
3804 {
3805 /*
3806 * This helper function exists to help dev_pick_tx get the correct
3807 * destination queue. Using a helper function skips a call to
3808 * skb_tx_hash and will put the skbs in the queue we expect on their
3809 * way down to the bonding driver.
3810 */
3811 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
3812
3813 /*
3814 * Save the original txq to restore before passing to the driver
3815 */
3816 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
3817
3818 if (unlikely(txq >= dev->real_num_tx_queues)) {
3819 do {
3820 txq -= dev->real_num_tx_queues;
3821 } while (txq >= dev->real_num_tx_queues);
3822 }
3823 return txq;
3824 }
3825
3826 static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
3827 {
3828 struct bonding *bond = netdev_priv(dev);
3829
3830 if (TX_QUEUE_OVERRIDE(bond->params.mode)) {
3831 if (!bond_slave_override(bond, skb))
3832 return NETDEV_TX_OK;
3833 }
3834
3835 switch (bond->params.mode) {
3836 case BOND_MODE_ROUNDROBIN:
3837 return bond_xmit_roundrobin(skb, dev);
3838 case BOND_MODE_ACTIVEBACKUP:
3839 return bond_xmit_activebackup(skb, dev);
3840 case BOND_MODE_XOR:
3841 return bond_xmit_xor(skb, dev);
3842 case BOND_MODE_BROADCAST:
3843 return bond_xmit_broadcast(skb, dev);
3844 case BOND_MODE_8023AD:
3845 return bond_3ad_xmit_xor(skb, dev);
3846 case BOND_MODE_ALB:
3847 case BOND_MODE_TLB:
3848 return bond_alb_xmit(skb, dev);
3849 default:
3850 /* Should never happen, mode already checked */
3851 pr_err("%s: Error: Unknown bonding mode %d\n",
3852 dev->name, bond->params.mode);
3853 WARN_ON_ONCE(1);
3854 kfree_skb(skb);
3855 return NETDEV_TX_OK;
3856 }
3857 }
3858
3859 static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
3860 {
3861 struct bonding *bond = netdev_priv(dev);
3862 netdev_tx_t ret = NETDEV_TX_OK;
3863
3864 /*
3865 * If we risk deadlock from transmitting this in the
3866 * netpoll path, tell netpoll to queue the frame for later tx
3867 */
3868 if (is_netpoll_tx_blocked(dev))
3869 return NETDEV_TX_BUSY;
3870
3871 rcu_read_lock();
3872 if (!list_empty(&bond->slave_list))
3873 ret = __bond_start_xmit(skb, dev);
3874 else
3875 kfree_skb(skb);
3876 rcu_read_unlock();
3877
3878 return ret;
3879 }
3880
3881 /*
3882 * set bond mode specific net device operations
3883 */
3884 void bond_set_mode_ops(struct bonding *bond, int mode)
3885 {
3886 struct net_device *bond_dev = bond->dev;
3887
3888 switch (mode) {
3889 case BOND_MODE_ROUNDROBIN:
3890 break;
3891 case BOND_MODE_ACTIVEBACKUP:
3892 break;
3893 case BOND_MODE_XOR:
3894 bond_set_xmit_hash_policy(bond);
3895 break;
3896 case BOND_MODE_BROADCAST:
3897 break;
3898 case BOND_MODE_8023AD:
3899 bond_set_xmit_hash_policy(bond);
3900 break;
3901 case BOND_MODE_ALB:
3902 /* FALLTHRU */
3903 case BOND_MODE_TLB:
3904 break;
3905 default:
3906 /* Should never happen, mode already checked */
3907 pr_err("%s: Error: Unknown bonding mode %d\n",
3908 bond_dev->name, mode);
3909 break;
3910 }
3911 }
3912
3913 static int bond_ethtool_get_settings(struct net_device *bond_dev,
3914 struct ethtool_cmd *ecmd)
3915 {
3916 struct bonding *bond = netdev_priv(bond_dev);
3917 unsigned long speed = 0;
3918 struct slave *slave;
3919
3920 ecmd->duplex = DUPLEX_UNKNOWN;
3921 ecmd->port = PORT_OTHER;
3922
3923 /* Since SLAVE_IS_OK returns false for all inactive or down slaves, we
3924 * do not need to check mode. Though link speed might not represent
3925 * the true receive or transmit bandwidth (not all modes are symmetric)
3926 * this is an accurate maximum.
3927 */
3928 read_lock(&bond->lock);
3929 bond_for_each_slave(bond, slave) {
3930 if (SLAVE_IS_OK(slave)) {
3931 if (slave->speed != SPEED_UNKNOWN)
3932 speed += slave->speed;
3933 if (ecmd->duplex == DUPLEX_UNKNOWN &&
3934 slave->duplex != DUPLEX_UNKNOWN)
3935 ecmd->duplex = slave->duplex;
3936 }
3937 }
3938 ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN);
3939 read_unlock(&bond->lock);
3940
3941 return 0;
3942 }
3943
3944 static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
3945 struct ethtool_drvinfo *drvinfo)
3946 {
3947 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
3948 strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
3949 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
3950 BOND_ABI_VERSION);
3951 }
3952
3953 static const struct ethtool_ops bond_ethtool_ops = {
3954 .get_drvinfo = bond_ethtool_get_drvinfo,
3955 .get_settings = bond_ethtool_get_settings,
3956 .get_link = ethtool_op_get_link,
3957 };
3958
3959 static const struct net_device_ops bond_netdev_ops = {
3960 .ndo_init = bond_init,
3961 .ndo_uninit = bond_uninit,
3962 .ndo_open = bond_open,
3963 .ndo_stop = bond_close,
3964 .ndo_start_xmit = bond_start_xmit,
3965 .ndo_select_queue = bond_select_queue,
3966 .ndo_get_stats64 = bond_get_stats,
3967 .ndo_do_ioctl = bond_do_ioctl,
3968 .ndo_change_rx_flags = bond_change_rx_flags,
3969 .ndo_set_rx_mode = bond_set_rx_mode,
3970 .ndo_change_mtu = bond_change_mtu,
3971 .ndo_set_mac_address = bond_set_mac_address,
3972 .ndo_neigh_setup = bond_neigh_setup,
3973 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
3974 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
3975 #ifdef CONFIG_NET_POLL_CONTROLLER
3976 .ndo_netpoll_setup = bond_netpoll_setup,
3977 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
3978 .ndo_poll_controller = bond_poll_controller,
3979 #endif
3980 .ndo_add_slave = bond_enslave,
3981 .ndo_del_slave = bond_release,
3982 .ndo_fix_features = bond_fix_features,
3983 };
3984
3985 static const struct device_type bond_type = {
3986 .name = "bond",
3987 };
3988
3989 static void bond_destructor(struct net_device *bond_dev)
3990 {
3991 struct bonding *bond = netdev_priv(bond_dev);
3992 if (bond->wq)
3993 destroy_workqueue(bond->wq);
3994 free_netdev(bond_dev);
3995 }
3996
3997 static void bond_setup(struct net_device *bond_dev)
3998 {
3999 struct bonding *bond = netdev_priv(bond_dev);
4000
4001 /* initialize rwlocks */
4002 rwlock_init(&bond->lock);
4003 rwlock_init(&bond->curr_slave_lock);
4004 INIT_LIST_HEAD(&bond->slave_list);
4005 bond->params = bonding_defaults;
4006
4007 /* Initialize pointers */
4008 bond->dev = bond_dev;
4009
4010 /* Initialize the device entry points */
4011 ether_setup(bond_dev);
4012 bond_dev->netdev_ops = &bond_netdev_ops;
4013 bond_dev->ethtool_ops = &bond_ethtool_ops;
4014 bond_set_mode_ops(bond, bond->params.mode);
4015
4016 bond_dev->destructor = bond_destructor;
4017
4018 SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
4019
4020 /* Initialize the device options */
4021 bond_dev->tx_queue_len = 0;
4022 bond_dev->flags |= IFF_MASTER|IFF_MULTICAST;
4023 bond_dev->priv_flags |= IFF_BONDING;
4024 bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
4025
4026 /* At first, we block adding VLANs. That's the only way to
4027 * prevent problems that occur when adding VLANs over an
4028 * empty bond. The block will be removed once non-challenged
4029 * slaves are enslaved.
4030 */
4031 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
4032
4033 /* don't acquire bond device's netif_tx_lock when
4034 * transmitting */
4035 bond_dev->features |= NETIF_F_LLTX;
4036
4037 /* By default, we declare the bond to be fully
4038 * VLAN hardware accelerated capable. Special
4039 * care is taken in the various xmit functions
4040 * when there are slaves that are not hw accel
4041 * capable
4042 */
4043
4044 bond_dev->hw_features = BOND_VLAN_FEATURES |
4045 NETIF_F_HW_VLAN_CTAG_TX |
4046 NETIF_F_HW_VLAN_CTAG_RX |
4047 NETIF_F_HW_VLAN_CTAG_FILTER;
4048
4049 bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
4050 bond_dev->features |= bond_dev->hw_features;
4051 }
4052
4053 /*
4054 * Destroy a bonding device.
4055 * Must be under rtnl_lock when this function is called.
4056 */
4057 static void bond_uninit(struct net_device *bond_dev)
4058 {
4059 struct bonding *bond = netdev_priv(bond_dev);
4060 struct slave *slave, *tmp_slave;
4061
4062 bond_netpoll_cleanup(bond_dev);
4063
4064 /* Release the bonded slaves */
4065 list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list)
4066 __bond_release_one(bond_dev, slave->dev, true);
4067 pr_info("%s: released all slaves\n", bond_dev->name);
4068
4069 list_del(&bond->bond_list);
4070
4071 bond_debug_unregister(bond);
4072 }
4073
4074 /*------------------------- Module initialization ---------------------------*/
4075
4076 /*
4077 * Convert string input module parms. Accept either the
4078 * number of the mode or its string name. A bit complicated because
4079 * some mode names are substrings of other names, and calls from sysfs
4080 * may have whitespace in the name (trailing newlines, for example).
4081 */
4082 int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
4083 {
4084 int modeint = -1, i, rv;
4085 char *p, modestr[BOND_MAX_MODENAME_LEN + 1] = { 0, };
4086
4087 for (p = (char *)buf; *p; p++)
4088 if (!(isdigit(*p) || isspace(*p)))
4089 break;
4090
4091 if (*p)
4092 rv = sscanf(buf, "%20s", modestr);
4093 else
4094 rv = sscanf(buf, "%d", &modeint);
4095
4096 if (!rv)
4097 return -1;
4098
4099 for (i = 0; tbl[i].modename; i++) {
4100 if (modeint == tbl[i].mode)
4101 return tbl[i].mode;
4102 if (strcmp(modestr, tbl[i].modename) == 0)
4103 return tbl[i].mode;
4104 }
4105
4106 return -1;
4107 }
4108
4109 static int bond_check_params(struct bond_params *params)
4110 {
4111 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
4112 int arp_all_targets_value;
4113
4114 /*
4115 * Convert string parameters.
4116 */
4117 if (mode) {
4118 bond_mode = bond_parse_parm(mode, bond_mode_tbl);
4119 if (bond_mode == -1) {
4120 pr_err("Error: Invalid bonding mode \"%s\"\n",
4121 mode == NULL ? "NULL" : mode);
4122 return -EINVAL;
4123 }
4124 }
4125
4126 if (xmit_hash_policy) {
4127 if ((bond_mode != BOND_MODE_XOR) &&
4128 (bond_mode != BOND_MODE_8023AD)) {
4129 pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
4130 bond_mode_name(bond_mode));
4131 } else {
4132 xmit_hashtype = bond_parse_parm(xmit_hash_policy,
4133 xmit_hashtype_tbl);
4134 if (xmit_hashtype == -1) {
4135 pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
4136 xmit_hash_policy == NULL ? "NULL" :
4137 xmit_hash_policy);
4138 return -EINVAL;
4139 }
4140 }
4141 }
4142
4143 if (lacp_rate) {
4144 if (bond_mode != BOND_MODE_8023AD) {
4145 pr_info("lacp_rate param is irrelevant in mode %s\n",
4146 bond_mode_name(bond_mode));
4147 } else {
4148 lacp_fast = bond_parse_parm(lacp_rate, bond_lacp_tbl);
4149 if (lacp_fast == -1) {
4150 pr_err("Error: Invalid lacp rate \"%s\"\n",
4151 lacp_rate == NULL ? "NULL" : lacp_rate);
4152 return -EINVAL;
4153 }
4154 }
4155 }
4156
4157 if (ad_select) {
4158 params->ad_select = bond_parse_parm(ad_select, ad_select_tbl);
4159 if (params->ad_select == -1) {
4160 pr_err("Error: Invalid ad_select \"%s\"\n",
4161 ad_select == NULL ? "NULL" : ad_select);
4162 return -EINVAL;
4163 }
4164
4165 if (bond_mode != BOND_MODE_8023AD) {
4166 pr_warning("ad_select param only affects 802.3ad mode\n");
4167 }
4168 } else {
4169 params->ad_select = BOND_AD_STABLE;
4170 }
4171
4172 if (max_bonds < 0) {
4173 pr_warning("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
4174 max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
4175 max_bonds = BOND_DEFAULT_MAX_BONDS;
4176 }
4177
4178 if (miimon < 0) {
4179 pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to %d\n",
4180 miimon, INT_MAX, BOND_LINK_MON_INTERV);
4181 miimon = BOND_LINK_MON_INTERV;
4182 }
4183
4184 if (updelay < 0) {
4185 pr_warning("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
4186 updelay, INT_MAX);
4187 updelay = 0;
4188 }
4189
4190 if (downdelay < 0) {
4191 pr_warning("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
4192 downdelay, INT_MAX);
4193 downdelay = 0;
4194 }
4195
4196 if ((use_carrier != 0) && (use_carrier != 1)) {
4197 pr_warning("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
4198 use_carrier);
4199 use_carrier = 1;
4200 }
4201
4202 if (num_peer_notif < 0 || num_peer_notif > 255) {
4203 pr_warning("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
4204 num_peer_notif);
4205 num_peer_notif = 1;
4206 }
4207
4208 /* reset values for 802.3ad */
4209 if (bond_mode == BOND_MODE_8023AD) {
4210 if (!miimon) {
4211 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
4212 pr_warning("Forcing miimon to 100msec\n");
4213 miimon = 100;
4214 }
4215 }
4216
4217 if (tx_queues < 1 || tx_queues > 255) {
4218 pr_warning("Warning: tx_queues (%d) should be between "
4219 "1 and 255, resetting to %d\n",
4220 tx_queues, BOND_DEFAULT_TX_QUEUES);
4221 tx_queues = BOND_DEFAULT_TX_QUEUES;
4222 }
4223
4224 if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
4225 pr_warning("Warning: all_slaves_active module parameter (%d), "
4226 "not of valid value (0/1), so it was set to "
4227 "0\n", all_slaves_active);
4228 all_slaves_active = 0;
4229 }
4230
4231 if (resend_igmp < 0 || resend_igmp > 255) {
4232 pr_warning("Warning: resend_igmp (%d) should be between "
4233 "0 and 255, resetting to %d\n",
4234 resend_igmp, BOND_DEFAULT_RESEND_IGMP);
4235 resend_igmp = BOND_DEFAULT_RESEND_IGMP;
4236 }
4237
4238 /* reset values for TLB/ALB */
4239 if ((bond_mode == BOND_MODE_TLB) ||
4240 (bond_mode == BOND_MODE_ALB)) {
4241 if (!miimon) {
4242 pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure and link speed which are essential for TLB/ALB load balancing\n");
4243 pr_warning("Forcing miimon to 100msec\n");
4244 miimon = 100;
4245 }
4246 }
4247
4248 if (bond_mode == BOND_MODE_ALB) {
4249 pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
4250 updelay);
4251 }
4252
4253 if (!miimon) {
4254 if (updelay || downdelay) {
4255 /* just warn the user the up/down delay will have
4256 * no effect since miimon is zero...
4257 */
4258 pr_warning("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
4259 updelay, downdelay);
4260 }
4261 } else {
4262 /* don't allow arp monitoring */
4263 if (arp_interval) {
4264 pr_warning("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
4265 miimon, arp_interval);
4266 arp_interval = 0;
4267 }
4268
4269 if ((updelay % miimon) != 0) {
4270 pr_warning("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
4271 updelay, miimon,
4272 (updelay / miimon) * miimon);
4273 }
4274
4275 updelay /= miimon;
4276
4277 if ((downdelay % miimon) != 0) {
4278 pr_warning("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
4279 downdelay, miimon,
4280 (downdelay / miimon) * miimon);
4281 }
4282
4283 downdelay /= miimon;
4284 }
4285
4286 if (arp_interval < 0) {
4287 pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to %d\n",
4288 arp_interval, INT_MAX, BOND_LINK_ARP_INTERV);
4289 arp_interval = BOND_LINK_ARP_INTERV;
4290 }
4291
4292 for (arp_ip_count = 0, i = 0;
4293 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
4294 /* not complete check, but should be good enough to
4295 catch mistakes */
4296 __be32 ip = in_aton(arp_ip_target[i]);
4297 if (!isdigit(arp_ip_target[i][0]) || ip == 0 ||
4298 ip == htonl(INADDR_BROADCAST)) {
4299 pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
4300 arp_ip_target[i]);
4301 arp_interval = 0;
4302 } else {
4303 if (bond_get_targets_ip(arp_target, ip) == -1)
4304 arp_target[arp_ip_count++] = ip;
4305 else
4306 pr_warning("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
4307 &ip);
4308 }
4309 }
4310
4311 if (arp_interval && !arp_ip_count) {
4312 /* don't allow arping if no arp_ip_target given... */
4313 pr_warning("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
4314 arp_interval);
4315 arp_interval = 0;
4316 }
4317
4318 if (arp_validate) {
4319 if (bond_mode != BOND_MODE_ACTIVEBACKUP) {
4320 pr_err("arp_validate only supported in active-backup mode\n");
4321 return -EINVAL;
4322 }
4323 if (!arp_interval) {
4324 pr_err("arp_validate requires arp_interval\n");
4325 return -EINVAL;
4326 }
4327
4328 arp_validate_value = bond_parse_parm(arp_validate,
4329 arp_validate_tbl);
4330 if (arp_validate_value == -1) {
4331 pr_err("Error: invalid arp_validate \"%s\"\n",
4332 arp_validate == NULL ? "NULL" : arp_validate);
4333 return -EINVAL;
4334 }
4335 } else
4336 arp_validate_value = 0;
4337
4338 arp_all_targets_value = 0;
4339 if (arp_all_targets) {
4340 arp_all_targets_value = bond_parse_parm(arp_all_targets,
4341 arp_all_targets_tbl);
4342
4343 if (arp_all_targets_value == -1) {
4344 pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
4345 arp_all_targets);
4346 arp_all_targets_value = 0;
4347 }
4348 }
4349
4350 if (miimon) {
4351 pr_info("MII link monitoring set to %d ms\n", miimon);
4352 } else if (arp_interval) {
4353 pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
4354 arp_interval,
4355 arp_validate_tbl[arp_validate_value].modename,
4356 arp_ip_count);
4357
4358 for (i = 0; i < arp_ip_count; i++)
4359 pr_info(" %s", arp_ip_target[i]);
4360
4361 pr_info("\n");
4362
4363 } else if (max_bonds) {
4364 /* miimon and arp_interval not set, we need one so things
4365 * work as expected, see bonding.txt for details
4366 */
4367 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
4368 }
4369
4370 if (primary && !USES_PRIMARY(bond_mode)) {
4371 /* currently, using a primary only makes sense
4372 * in active backup, TLB or ALB modes
4373 */
4374 pr_warning("Warning: %s primary device specified but has no effect in %s mode\n",
4375 primary, bond_mode_name(bond_mode));
4376 primary = NULL;
4377 }
4378
4379 if (primary && primary_reselect) {
4380 primary_reselect_value = bond_parse_parm(primary_reselect,
4381 pri_reselect_tbl);
4382 if (primary_reselect_value == -1) {
4383 pr_err("Error: Invalid primary_reselect \"%s\"\n",
4384 primary_reselect ==
4385 NULL ? "NULL" : primary_reselect);
4386 return -EINVAL;
4387 }
4388 } else {
4389 primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
4390 }
4391
4392 if (fail_over_mac) {
4393 fail_over_mac_value = bond_parse_parm(fail_over_mac,
4394 fail_over_mac_tbl);
4395 if (fail_over_mac_value == -1) {
4396 pr_err("Error: invalid fail_over_mac \"%s\"\n",
4397 arp_validate == NULL ? "NULL" : arp_validate);
4398 return -EINVAL;
4399 }
4400
4401 if (bond_mode != BOND_MODE_ACTIVEBACKUP)
4402 pr_warning("Warning: fail_over_mac only affects active-backup mode.\n");
4403 } else {
4404 fail_over_mac_value = BOND_FOM_NONE;
4405 }
4406
4407 /* fill params struct with the proper values */
4408 params->mode = bond_mode;
4409 params->xmit_policy = xmit_hashtype;
4410 params->miimon = miimon;
4411 params->num_peer_notif = num_peer_notif;
4412 params->arp_interval = arp_interval;
4413 params->arp_validate = arp_validate_value;
4414 params->arp_all_targets = arp_all_targets_value;
4415 params->updelay = updelay;
4416 params->downdelay = downdelay;
4417 params->use_carrier = use_carrier;
4418 params->lacp_fast = lacp_fast;
4419 params->primary[0] = 0;
4420 params->primary_reselect = primary_reselect_value;
4421 params->fail_over_mac = fail_over_mac_value;
4422 params->tx_queues = tx_queues;
4423 params->all_slaves_active = all_slaves_active;
4424 params->resend_igmp = resend_igmp;
4425 params->min_links = min_links;
4426 params->lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
4427
4428 if (primary) {
4429 strncpy(params->primary, primary, IFNAMSIZ);
4430 params->primary[IFNAMSIZ - 1] = 0;
4431 }
4432
4433 memcpy(params->arp_targets, arp_target, sizeof(arp_target));
4434
4435 return 0;
4436 }
4437
4438 static struct lock_class_key bonding_netdev_xmit_lock_key;
4439 static struct lock_class_key bonding_netdev_addr_lock_key;
4440 static struct lock_class_key bonding_tx_busylock_key;
4441
4442 static void bond_set_lockdep_class_one(struct net_device *dev,
4443 struct netdev_queue *txq,
4444 void *_unused)
4445 {
4446 lockdep_set_class(&txq->_xmit_lock,
4447 &bonding_netdev_xmit_lock_key);
4448 }
4449
4450 static void bond_set_lockdep_class(struct net_device *dev)
4451 {
4452 lockdep_set_class(&dev->addr_list_lock,
4453 &bonding_netdev_addr_lock_key);
4454 netdev_for_each_tx_queue(dev, bond_set_lockdep_class_one, NULL);
4455 dev->qdisc_tx_busylock = &bonding_tx_busylock_key;
4456 }
4457
4458 /*
4459 * Called from registration process
4460 */
4461 static int bond_init(struct net_device *bond_dev)
4462 {
4463 struct bonding *bond = netdev_priv(bond_dev);
4464 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
4465 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
4466
4467 pr_debug("Begin bond_init for %s\n", bond_dev->name);
4468
4469 /*
4470 * Initialize locks that may be required during
4471 * en/deslave operations. All of the bond_open work
4472 * (of which this is part) should really be moved to
4473 * a phase prior to dev_open
4474 */
4475 spin_lock_init(&(bond_info->tx_hashtbl_lock));
4476 spin_lock_init(&(bond_info->rx_hashtbl_lock));
4477
4478 bond->wq = create_singlethread_workqueue(bond_dev->name);
4479 if (!bond->wq)
4480 return -ENOMEM;
4481
4482 bond_set_lockdep_class(bond_dev);
4483
4484 list_add_tail(&bond->bond_list, &bn->dev_list);
4485
4486 bond_prepare_sysfs_group(bond);
4487
4488 bond_debug_register(bond);
4489
4490 /* Ensure valid dev_addr */
4491 if (is_zero_ether_addr(bond_dev->dev_addr) &&
4492 bond_dev->addr_assign_type == NET_ADDR_PERM)
4493 eth_hw_addr_random(bond_dev);
4494
4495 return 0;
4496 }
4497
4498 static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
4499 {
4500 if (tb[IFLA_ADDRESS]) {
4501 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
4502 return -EINVAL;
4503 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
4504 return -EADDRNOTAVAIL;
4505 }
4506 return 0;
4507 }
4508
4509 static unsigned int bond_get_num_tx_queues(void)
4510 {
4511 return tx_queues;
4512 }
4513
4514 static struct rtnl_link_ops bond_link_ops __read_mostly = {
4515 .kind = "bond",
4516 .priv_size = sizeof(struct bonding),
4517 .setup = bond_setup,
4518 .validate = bond_validate,
4519 .get_num_tx_queues = bond_get_num_tx_queues,
4520 .get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
4521 as for TX queues */
4522 };
4523
4524 /* Create a new bond based on the specified name and bonding parameters.
4525 * If name is NULL, obtain a suitable "bond%d" name for us.
4526 * Caller must NOT hold rtnl_lock; we need to release it here before we
4527 * set up our sysfs entries.
4528 */
4529 int bond_create(struct net *net, const char *name)
4530 {
4531 struct net_device *bond_dev;
4532 int res;
4533
4534 rtnl_lock();
4535
4536 bond_dev = alloc_netdev_mq(sizeof(struct bonding),
4537 name ? name : "bond%d",
4538 bond_setup, tx_queues);
4539 if (!bond_dev) {
4540 pr_err("%s: eek! can't alloc netdev!\n", name);
4541 rtnl_unlock();
4542 return -ENOMEM;
4543 }
4544
4545 dev_net_set(bond_dev, net);
4546 bond_dev->rtnl_link_ops = &bond_link_ops;
4547
4548 res = register_netdevice(bond_dev);
4549
4550 netif_carrier_off(bond_dev);
4551
4552 rtnl_unlock();
4553 if (res < 0)
4554 bond_destructor(bond_dev);
4555 return res;
4556 }
4557
4558 static int __net_init bond_net_init(struct net *net)
4559 {
4560 struct bond_net *bn = net_generic(net, bond_net_id);
4561
4562 bn->net = net;
4563 INIT_LIST_HEAD(&bn->dev_list);
4564
4565 bond_create_proc_dir(bn);
4566 bond_create_sysfs(bn);
4567
4568 return 0;
4569 }
4570
4571 static void __net_exit bond_net_exit(struct net *net)
4572 {
4573 struct bond_net *bn = net_generic(net, bond_net_id);
4574 struct bonding *bond, *tmp_bond;
4575 LIST_HEAD(list);
4576
4577 bond_destroy_sysfs(bn);
4578 bond_destroy_proc_dir(bn);
4579
4580 /* Kill off any bonds created after unregistering bond rtnl ops */
4581 rtnl_lock();
4582 list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
4583 unregister_netdevice_queue(bond->dev, &list);
4584 unregister_netdevice_many(&list);
4585 rtnl_unlock();
4586 }
4587
4588 static struct pernet_operations bond_net_ops = {
4589 .init = bond_net_init,
4590 .exit = bond_net_exit,
4591 .id = &bond_net_id,
4592 .size = sizeof(struct bond_net),
4593 };
4594
4595 static int __init bonding_init(void)
4596 {
4597 int i;
4598 int res;
4599
4600 pr_info("%s", bond_version);
4601
4602 res = bond_check_params(&bonding_defaults);
4603 if (res)
4604 goto out;
4605
4606 res = register_pernet_subsys(&bond_net_ops);
4607 if (res)
4608 goto out;
4609
4610 res = rtnl_link_register(&bond_link_ops);
4611 if (res)
4612 goto err_link;
4613
4614 bond_create_debugfs();
4615
4616 for (i = 0; i < max_bonds; i++) {
4617 res = bond_create(&init_net, NULL);
4618 if (res)
4619 goto err;
4620 }
4621
4622 register_netdevice_notifier(&bond_netdev_notifier);
4623 out:
4624 return res;
4625 err:
4626 rtnl_link_unregister(&bond_link_ops);
4627 err_link:
4628 unregister_pernet_subsys(&bond_net_ops);
4629 goto out;
4630
4631 }
4632
4633 static void __exit bonding_exit(void)
4634 {
4635 unregister_netdevice_notifier(&bond_netdev_notifier);
4636
4637 bond_destroy_debugfs();
4638
4639 rtnl_link_unregister(&bond_link_ops);
4640 unregister_pernet_subsys(&bond_net_ops);
4641
4642 #ifdef CONFIG_NET_POLL_CONTROLLER
4643 /*
4644 * Make sure we don't have an imbalance on our netpoll blocking
4645 */
4646 WARN_ON(atomic_read(&netpoll_block_tx));
4647 #endif
4648 }
4649
4650 module_init(bonding_init);
4651 module_exit(bonding_exit);
4652 MODULE_LICENSE("GPL");
4653 MODULE_VERSION(DRV_VERSION);
4654 MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
4655 MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
4656 MODULE_ALIAS_RTNL_LINK("bond");
This page took 0.122656 seconds and 6 git commands to generate.