Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * originally based on the dummy device.
3 *
4 * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
5 * Licensed under the GPL. Based on dummy.c, and eql.c devices.
6 *
7 * bonding.c: an Ethernet Bonding driver
8 *
9 * This is useful to talk to a Cisco EtherChannel compatible equipment:
10 * Cisco 5500
11 * Sun Trunking (Solaris)
12 * Alteon AceDirector Trunks
13 * Linux Bonding
14 * and probably many L2 switches ...
15 *
16 * How it works:
17 * ifconfig bond0 ipaddress netmask up
18 * will setup a network device, with an ip address. No mac address
19 * will be assigned at this time. The hw mac address will come from
20 * the first slave bonded to the channel. All slaves will then use
21 * this hw mac address.
22 *
23 * ifconfig bond0 down
24 * will release all slaves, marking them as down.
25 *
26 * ifenslave bond0 eth0
27 * will attach eth0 to bond0 as a slave. eth0 hw mac address will either
28 * a: be used as initial mac address
29 * b: if a hw mac address already is there, eth0's hw mac address
30 * will then be set from bond0.
31 *
32 */
33
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/types.h>
37#include <linux/fcntl.h>
38#include <linux/interrupt.h>
39#include <linux/ptrace.h>
40#include <linux/ioport.h>
41#include <linux/in.h>
42#include <net/ip.h>
43#include <linux/ip.h>
44#include <linux/icmp.h>
45#include <linux/icmpv6.h>
46#include <linux/tcp.h>
47#include <linux/udp.h>
48#include <linux/slab.h>
49#include <linux/string.h>
50#include <linux/init.h>
51#include <linux/timer.h>
52#include <linux/socket.h>
53#include <linux/ctype.h>
54#include <linux/inet.h>
55#include <linux/bitops.h>
56#include <linux/io.h>
57#include <asm/dma.h>
58#include <linux/uaccess.h>
59#include <linux/errno.h>
60#include <linux/netdevice.h>
61#include <linux/inetdevice.h>
62#include <linux/igmp.h>
63#include <linux/etherdevice.h>
64#include <linux/skbuff.h>
65#include <net/sock.h>
66#include <linux/rtnetlink.h>
67#include <linux/smp.h>
68#include <linux/if_ether.h>
69#include <net/arp.h>
70#include <linux/mii.h>
71#include <linux/ethtool.h>
72#include <linux/if_vlan.h>
73#include <linux/if_bonding.h>
74#include <linux/jiffies.h>
75#include <linux/preempt.h>
76#include <net/route.h>
77#include <net/net_namespace.h>
78#include <net/netns/generic.h>
79#include <net/pkt_sched.h>
80#include <linux/rculist.h>
81#include <net/flow_dissector.h>
82#include <net/xfrm.h>
83#include <net/bonding.h>
84#include <net/bond_3ad.h>
85#include <net/bond_alb.h>
86#if IS_ENABLED(CONFIG_TLS_DEVICE)
87#include <net/tls.h>
88#endif
89
90#include "bonding_priv.h"
91
92/*---------------------------- Module parameters ----------------------------*/
93
94/* monitor all links that often (in milliseconds). <=0 disables monitoring */
95
96static int max_bonds = BOND_DEFAULT_MAX_BONDS;
97static int tx_queues = BOND_DEFAULT_TX_QUEUES;
98static int num_peer_notif = 1;
99static int miimon;
100static int updelay;
101static int downdelay;
102static int use_carrier = 1;
103static char *mode;
104static char *primary;
105static char *primary_reselect;
106static char *lacp_rate;
107static int min_links;
108static char *ad_select;
109static char *xmit_hash_policy;
110static int arp_interval;
111static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
112static char *arp_validate;
113static char *arp_all_targets;
114static char *fail_over_mac;
115static int all_slaves_active;
116static struct bond_params bonding_defaults;
117static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
118static int packets_per_slave = 1;
119static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
120
121module_param(max_bonds, int, 0);
122MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
123module_param(tx_queues, int, 0);
124MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
125module_param_named(num_grat_arp, num_peer_notif, int, 0644);
126MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
127 "failover event (alias of num_unsol_na)");
128module_param_named(num_unsol_na, num_peer_notif, int, 0644);
129MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
130 "failover event (alias of num_grat_arp)");
131module_param(miimon, int, 0);
132MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
133module_param(updelay, int, 0);
134MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
135module_param(downdelay, int, 0);
136MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
137 "in milliseconds");
138module_param(use_carrier, int, 0);
139MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
140 "0 for off, 1 for on (default)");
141module_param(mode, charp, 0);
142MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
143 "1 for active-backup, 2 for balance-xor, "
144 "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
145 "6 for balance-alb");
146module_param(primary, charp, 0);
147MODULE_PARM_DESC(primary, "Primary network device to use");
148module_param(primary_reselect, charp, 0);
149MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
150 "once it comes up; "
151 "0 for always (default), "
152 "1 for only if speed of primary is "
153 "better, "
154 "2 for only on active slave "
155 "failure");
156module_param(lacp_rate, charp, 0);
157MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
158 "0 for slow, 1 for fast");
159module_param(ad_select, charp, 0);
160MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
161 "0 for stable (default), 1 for bandwidth, "
162 "2 for count");
163module_param(min_links, int, 0);
164MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
165
166module_param(xmit_hash_policy, charp, 0);
167MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
168 "0 for layer 2 (default), 1 for layer 3+4, "
169 "2 for layer 2+3, 3 for encap layer 2+3, "
170 "4 for encap layer 3+4, 5 for vlan+srcmac");
171module_param(arp_interval, int, 0);
172MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
173module_param_array(arp_ip_target, charp, NULL, 0);
174MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
175module_param(arp_validate, charp, 0);
176MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
177 "0 for none (default), 1 for active, "
178 "2 for backup, 3 for all");
179module_param(arp_all_targets, charp, 0);
180MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
181module_param(fail_over_mac, charp, 0);
182MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
183 "the same MAC; 0 for none (default), "
184 "1 for active, 2 for follow");
185module_param(all_slaves_active, int, 0);
186MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
187 "by setting active flag for all slaves; "
188 "0 for never (default), 1 for always.");
189module_param(resend_igmp, int, 0);
190MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
191 "link failure");
192module_param(packets_per_slave, int, 0);
193MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
194 "mode; 0 for a random slave, 1 packet per "
195 "slave (default), >1 packets per slave.");
196module_param(lp_interval, uint, 0);
197MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
198 "the bonding driver sends learning packets to "
199 "each slaves peer switch. The default is 1.");
200
201/*----------------------------- Global variables ----------------------------*/
202
203#ifdef CONFIG_NET_POLL_CONTROLLER
204atomic_t netpoll_block_tx = ATOMIC_INIT(0);
205#endif
206
207unsigned int bond_net_id __read_mostly;
208
209static const struct flow_dissector_key flow_keys_bonding_keys[] = {
210 {
211 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
212 .offset = offsetof(struct flow_keys, control),
213 },
214 {
215 .key_id = FLOW_DISSECTOR_KEY_BASIC,
216 .offset = offsetof(struct flow_keys, basic),
217 },
218 {
219 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
220 .offset = offsetof(struct flow_keys, addrs.v4addrs),
221 },
222 {
223 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
224 .offset = offsetof(struct flow_keys, addrs.v6addrs),
225 },
226 {
227 .key_id = FLOW_DISSECTOR_KEY_TIPC,
228 .offset = offsetof(struct flow_keys, addrs.tipckey),
229 },
230 {
231 .key_id = FLOW_DISSECTOR_KEY_PORTS,
232 .offset = offsetof(struct flow_keys, ports),
233 },
234 {
235 .key_id = FLOW_DISSECTOR_KEY_ICMP,
236 .offset = offsetof(struct flow_keys, icmp),
237 },
238 {
239 .key_id = FLOW_DISSECTOR_KEY_VLAN,
240 .offset = offsetof(struct flow_keys, vlan),
241 },
242 {
243 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
244 .offset = offsetof(struct flow_keys, tags),
245 },
246 {
247 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
248 .offset = offsetof(struct flow_keys, keyid),
249 },
250};
251
252static struct flow_dissector flow_keys_bonding __read_mostly;
253
254/*-------------------------- Forward declarations ---------------------------*/
255
256static int bond_init(struct net_device *bond_dev);
257static void bond_uninit(struct net_device *bond_dev);
258static void bond_get_stats(struct net_device *bond_dev,
259 struct rtnl_link_stats64 *stats);
260static void bond_slave_arr_handler(struct work_struct *work);
261static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
262 int mod);
263static void bond_netdev_notify_work(struct work_struct *work);
264
265/*---------------------------- General routines -----------------------------*/
266
267const char *bond_mode_name(int mode)
268{
269 static const char *names[] = {
270 [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
271 [BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
272 [BOND_MODE_XOR] = "load balancing (xor)",
273 [BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
274 [BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
275 [BOND_MODE_TLB] = "transmit load balancing",
276 [BOND_MODE_ALB] = "adaptive load balancing",
277 };
278
279 if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
280 return "unknown";
281
282 return names[mode];
283}
284
285/**
286 * bond_dev_queue_xmit - Prepare skb for xmit.
287 *
288 * @bond: bond device that got this skb for tx.
289 * @skb: hw accel VLAN tagged skb to transmit
290 * @slave_dev: slave that is supposed to xmit this skbuff
291 */
292netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
293 struct net_device *slave_dev)
294{
295 skb->dev = slave_dev;
296
297 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
298 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
299 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
300
301 if (unlikely(netpoll_tx_running(bond->dev)))
302 return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
303
304 return dev_queue_xmit(skb);
305}
306
307bool bond_sk_check(struct bonding *bond)
308{
309 switch (BOND_MODE(bond)) {
310 case BOND_MODE_8023AD:
311 case BOND_MODE_XOR:
312 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
313 return true;
314 fallthrough;
315 default:
316 return false;
317 }
318}
319
320/*---------------------------------- VLAN -----------------------------------*/
321
322/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
323 * We don't protect the slave list iteration with a lock because:
324 * a. This operation is performed in IOCTL context,
325 * b. The operation is protected by the RTNL semaphore in the 8021q code,
326 * c. Holding a lock with BH disabled while directly calling a base driver
327 * entry point is generally a BAD idea.
328 *
329 * The design of synchronization/protection for this operation in the 8021q
330 * module is good for one or more VLAN devices over a single physical device
331 * and cannot be extended for a teaming solution like bonding, so there is a
332 * potential race condition here where a net device from the vlan group might
333 * be referenced (either by a base driver or the 8021q code) while it is being
334 * removed from the system. However, it turns out we're not making matters
335 * worse, and if it works for regular VLAN usage it will work here too.
336*/
337
338/**
339 * bond_vlan_rx_add_vid - Propagates adding an id to slaves
340 * @bond_dev: bonding net device that got called
341 * @proto: network protocol ID
342 * @vid: vlan id being added
343 */
344static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
345 __be16 proto, u16 vid)
346{
347 struct bonding *bond = netdev_priv(bond_dev);
348 struct slave *slave, *rollback_slave;
349 struct list_head *iter;
350 int res;
351
352 bond_for_each_slave(bond, slave, iter) {
353 res = vlan_vid_add(slave->dev, proto, vid);
354 if (res)
355 goto unwind;
356 }
357
358 return 0;
359
360unwind:
361 /* unwind to the slave that failed */
362 bond_for_each_slave(bond, rollback_slave, iter) {
363 if (rollback_slave == slave)
364 break;
365
366 vlan_vid_del(rollback_slave->dev, proto, vid);
367 }
368
369 return res;
370}
371
372/**
373 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
374 * @bond_dev: bonding net device that got called
375 * @proto: network protocol ID
376 * @vid: vlan id being removed
377 */
378static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
379 __be16 proto, u16 vid)
380{
381 struct bonding *bond = netdev_priv(bond_dev);
382 struct list_head *iter;
383 struct slave *slave;
384
385 bond_for_each_slave(bond, slave, iter)
386 vlan_vid_del(slave->dev, proto, vid);
387
388 if (bond_is_lb(bond))
389 bond_alb_clear_vlan(bond, vid);
390
391 return 0;
392}
393
394/*---------------------------------- XFRM -----------------------------------*/
395
396#ifdef CONFIG_XFRM_OFFLOAD
397/**
398 * bond_ipsec_add_sa - program device with a security association
399 * @xs: pointer to transformer state struct
400 **/
401static int bond_ipsec_add_sa(struct xfrm_state *xs)
402{
403 struct net_device *bond_dev = xs->xso.dev;
404 struct bonding *bond;
405 struct slave *slave;
406
407 if (!bond_dev)
408 return -EINVAL;
409
410 bond = netdev_priv(bond_dev);
411 slave = rcu_dereference(bond->curr_active_slave);
412 xs->xso.real_dev = slave->dev;
413 bond->xs = xs;
414
415 if (!(slave->dev->xfrmdev_ops
416 && slave->dev->xfrmdev_ops->xdo_dev_state_add)) {
417 slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
418 return -EINVAL;
419 }
420
421 return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
422}
423
424/**
425 * bond_ipsec_del_sa - clear out this specific SA
426 * @xs: pointer to transformer state struct
427 **/
428static void bond_ipsec_del_sa(struct xfrm_state *xs)
429{
430 struct net_device *bond_dev = xs->xso.dev;
431 struct bonding *bond;
432 struct slave *slave;
433
434 if (!bond_dev)
435 return;
436
437 bond = netdev_priv(bond_dev);
438 slave = rcu_dereference(bond->curr_active_slave);
439
440 if (!slave)
441 return;
442
443 xs->xso.real_dev = slave->dev;
444
445 if (!(slave->dev->xfrmdev_ops
446 && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) {
447 slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
448 return;
449 }
450
451 slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
452}
453
454/**
455 * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
456 * @skb: current data packet
457 * @xs: pointer to transformer state struct
458 **/
459static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
460{
461 struct net_device *bond_dev = xs->xso.dev;
462 struct bonding *bond = netdev_priv(bond_dev);
463 struct slave *curr_active = rcu_dereference(bond->curr_active_slave);
464 struct net_device *slave_dev = curr_active->dev;
465
466 if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
467 return true;
468
469 if (!(slave_dev->xfrmdev_ops
470 && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) {
471 slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__);
472 return false;
473 }
474
475 xs->xso.real_dev = slave_dev;
476 return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
477}
478
479static const struct xfrmdev_ops bond_xfrmdev_ops = {
480 .xdo_dev_state_add = bond_ipsec_add_sa,
481 .xdo_dev_state_delete = bond_ipsec_del_sa,
482 .xdo_dev_offload_ok = bond_ipsec_offload_ok,
483};
484#endif /* CONFIG_XFRM_OFFLOAD */
485
486/*------------------------------- Link status -------------------------------*/
487
488/* Set the carrier state for the master according to the state of its
489 * slaves. If any slaves are up, the master is up. In 802.3ad mode,
490 * do special 802.3ad magic.
491 *
492 * Returns zero if carrier state does not change, nonzero if it does.
493 */
494int bond_set_carrier(struct bonding *bond)
495{
496 struct list_head *iter;
497 struct slave *slave;
498
499 if (!bond_has_slaves(bond))
500 goto down;
501
502 if (BOND_MODE(bond) == BOND_MODE_8023AD)
503 return bond_3ad_set_carrier(bond);
504
505 bond_for_each_slave(bond, slave, iter) {
506 if (slave->link == BOND_LINK_UP) {
507 if (!netif_carrier_ok(bond->dev)) {
508 netif_carrier_on(bond->dev);
509 return 1;
510 }
511 return 0;
512 }
513 }
514
515down:
516 if (netif_carrier_ok(bond->dev)) {
517 netif_carrier_off(bond->dev);
518 return 1;
519 }
520 return 0;
521}
522
523/* Get link speed and duplex from the slave's base driver
524 * using ethtool. If for some reason the call fails or the
525 * values are invalid, set speed and duplex to -1,
526 * and return. Return 1 if speed or duplex settings are
527 * UNKNOWN; 0 otherwise.
528 */
529static int bond_update_speed_duplex(struct slave *slave)
530{
531 struct net_device *slave_dev = slave->dev;
532 struct ethtool_link_ksettings ecmd;
533 int res;
534
535 slave->speed = SPEED_UNKNOWN;
536 slave->duplex = DUPLEX_UNKNOWN;
537
538 res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
539 if (res < 0)
540 return 1;
541 if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
542 return 1;
543 switch (ecmd.base.duplex) {
544 case DUPLEX_FULL:
545 case DUPLEX_HALF:
546 break;
547 default:
548 return 1;
549 }
550
551 slave->speed = ecmd.base.speed;
552 slave->duplex = ecmd.base.duplex;
553
554 return 0;
555}
556
557const char *bond_slave_link_status(s8 link)
558{
559 switch (link) {
560 case BOND_LINK_UP:
561 return "up";
562 case BOND_LINK_FAIL:
563 return "going down";
564 case BOND_LINK_DOWN:
565 return "down";
566 case BOND_LINK_BACK:
567 return "going back";
568 default:
569 return "unknown";
570 }
571}
572
573/* if <dev> supports MII link status reporting, check its link status.
574 *
575 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
576 * depending upon the setting of the use_carrier parameter.
577 *
578 * Return either BMSR_LSTATUS, meaning that the link is up (or we
579 * can't tell and just pretend it is), or 0, meaning that the link is
580 * down.
581 *
582 * If reporting is non-zero, instead of faking link up, return -1 if
583 * both ETHTOOL and MII ioctls fail (meaning the device does not
584 * support them). If use_carrier is set, return whatever it says.
585 * It'd be nice if there was a good way to tell if a driver supports
586 * netif_carrier, but there really isn't.
587 */
588static int bond_check_dev_link(struct bonding *bond,
589 struct net_device *slave_dev, int reporting)
590{
591 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
592 int (*ioctl)(struct net_device *, struct ifreq *, int);
593 struct ifreq ifr;
594 struct mii_ioctl_data *mii;
595
596 if (!reporting && !netif_running(slave_dev))
597 return 0;
598
599 if (bond->params.use_carrier)
600 return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
601
602 /* Try to get link status using Ethtool first. */
603 if (slave_dev->ethtool_ops->get_link)
604 return slave_dev->ethtool_ops->get_link(slave_dev) ?
605 BMSR_LSTATUS : 0;
606
607 /* Ethtool can't be used, fallback to MII ioctls. */
608 ioctl = slave_ops->ndo_do_ioctl;
609 if (ioctl) {
610 /* TODO: set pointer to correct ioctl on a per team member
611 * bases to make this more efficient. that is, once
612 * we determine the correct ioctl, we will always
613 * call it and not the others for that team
614 * member.
615 */
616
617 /* We cannot assume that SIOCGMIIPHY will also read a
618 * register; not all network drivers (e.g., e100)
619 * support that.
620 */
621
622 /* Yes, the mii is overlaid on the ifreq.ifr_ifru */
623 strncpy(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
624 mii = if_mii(&ifr);
625 if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
626 mii->reg_num = MII_BMSR;
627 if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
628 return mii->val_out & BMSR_LSTATUS;
629 }
630 }
631
632 /* If reporting, report that either there's no dev->do_ioctl,
633 * or both SIOCGMIIREG and get_link failed (meaning that we
634 * cannot report link status). If not reporting, pretend
635 * we're ok.
636 */
637 return reporting ? -1 : BMSR_LSTATUS;
638}
639
640/*----------------------------- Multicast list ------------------------------*/
641
642/* Push the promiscuity flag down to appropriate slaves */
643static int bond_set_promiscuity(struct bonding *bond, int inc)
644{
645 struct list_head *iter;
646 int err = 0;
647
648 if (bond_uses_primary(bond)) {
649 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
650
651 if (curr_active)
652 err = dev_set_promiscuity(curr_active->dev, inc);
653 } else {
654 struct slave *slave;
655
656 bond_for_each_slave(bond, slave, iter) {
657 err = dev_set_promiscuity(slave->dev, inc);
658 if (err)
659 return err;
660 }
661 }
662 return err;
663}
664
665/* Push the allmulti flag down to all slaves */
666static int bond_set_allmulti(struct bonding *bond, int inc)
667{
668 struct list_head *iter;
669 int err = 0;
670
671 if (bond_uses_primary(bond)) {
672 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
673
674 if (curr_active)
675 err = dev_set_allmulti(curr_active->dev, inc);
676 } else {
677 struct slave *slave;
678
679 bond_for_each_slave(bond, slave, iter) {
680 err = dev_set_allmulti(slave->dev, inc);
681 if (err)
682 return err;
683 }
684 }
685 return err;
686}
687
688/* Retrieve the list of registered multicast addresses for the bonding
689 * device and retransmit an IGMP JOIN request to the current active
690 * slave.
691 */
692static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
693{
694 struct bonding *bond = container_of(work, struct bonding,
695 mcast_work.work);
696
697 if (!rtnl_trylock()) {
698 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
699 return;
700 }
701 call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
702
703 if (bond->igmp_retrans > 1) {
704 bond->igmp_retrans--;
705 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
706 }
707 rtnl_unlock();
708}
709
710/* Flush bond's hardware addresses from slave */
711static void bond_hw_addr_flush(struct net_device *bond_dev,
712 struct net_device *slave_dev)
713{
714 struct bonding *bond = netdev_priv(bond_dev);
715
716 dev_uc_unsync(slave_dev, bond_dev);
717 dev_mc_unsync(slave_dev, bond_dev);
718
719 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
720 /* del lacpdu mc addr from mc list */
721 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
722
723 dev_mc_del(slave_dev, lacpdu_multicast);
724 }
725}
726
727/*--------------------------- Active slave change ---------------------------*/
728
729/* Update the hardware address list and promisc/allmulti for the new and
730 * old active slaves (if any). Modes that are not using primary keep all
731 * slaves up date at all times; only the modes that use primary need to call
732 * this function to swap these settings during a failover.
733 */
734static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
735 struct slave *old_active)
736{
737 if (old_active) {
738 if (bond->dev->flags & IFF_PROMISC)
739 dev_set_promiscuity(old_active->dev, -1);
740
741 if (bond->dev->flags & IFF_ALLMULTI)
742 dev_set_allmulti(old_active->dev, -1);
743
744 bond_hw_addr_flush(bond->dev, old_active->dev);
745 }
746
747 if (new_active) {
748 /* FIXME: Signal errors upstream. */
749 if (bond->dev->flags & IFF_PROMISC)
750 dev_set_promiscuity(new_active->dev, 1);
751
752 if (bond->dev->flags & IFF_ALLMULTI)
753 dev_set_allmulti(new_active->dev, 1);
754
755 netif_addr_lock_bh(bond->dev);
756 dev_uc_sync(new_active->dev, bond->dev);
757 dev_mc_sync(new_active->dev, bond->dev);
758 netif_addr_unlock_bh(bond->dev);
759 }
760}
761
762/**
763 * bond_set_dev_addr - clone slave's address to bond
764 * @bond_dev: bond net device
765 * @slave_dev: slave net device
766 *
767 * Should be called with RTNL held.
768 */
769static int bond_set_dev_addr(struct net_device *bond_dev,
770 struct net_device *slave_dev)
771{
772 int err;
773
774 slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
775 bond_dev, slave_dev, slave_dev->addr_len);
776 err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
777 if (err)
778 return err;
779
780 memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
781 bond_dev->addr_assign_type = NET_ADDR_STOLEN;
782 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
783 return 0;
784}
785
786static struct slave *bond_get_old_active(struct bonding *bond,
787 struct slave *new_active)
788{
789 struct slave *slave;
790 struct list_head *iter;
791
792 bond_for_each_slave(bond, slave, iter) {
793 if (slave == new_active)
794 continue;
795
796 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
797 return slave;
798 }
799
800 return NULL;
801}
802
803/* bond_do_fail_over_mac
804 *
805 * Perform special MAC address swapping for fail_over_mac settings
806 *
807 * Called with RTNL
808 */
809static void bond_do_fail_over_mac(struct bonding *bond,
810 struct slave *new_active,
811 struct slave *old_active)
812{
813 u8 tmp_mac[MAX_ADDR_LEN];
814 struct sockaddr_storage ss;
815 int rv;
816
817 switch (bond->params.fail_over_mac) {
818 case BOND_FOM_ACTIVE:
819 if (new_active) {
820 rv = bond_set_dev_addr(bond->dev, new_active->dev);
821 if (rv)
822 slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n",
823 -rv);
824 }
825 break;
826 case BOND_FOM_FOLLOW:
827 /* if new_active && old_active, swap them
828 * if just old_active, do nothing (going to no active slave)
829 * if just new_active, set new_active to bond's MAC
830 */
831 if (!new_active)
832 return;
833
834 if (!old_active)
835 old_active = bond_get_old_active(bond, new_active);
836
837 if (old_active) {
838 bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
839 new_active->dev->addr_len);
840 bond_hw_addr_copy(ss.__data,
841 old_active->dev->dev_addr,
842 old_active->dev->addr_len);
843 ss.ss_family = new_active->dev->type;
844 } else {
845 bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
846 bond->dev->addr_len);
847 ss.ss_family = bond->dev->type;
848 }
849
850 rv = dev_set_mac_address(new_active->dev,
851 (struct sockaddr *)&ss, NULL);
852 if (rv) {
853 slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
854 -rv);
855 goto out;
856 }
857
858 if (!old_active)
859 goto out;
860
861 bond_hw_addr_copy(ss.__data, tmp_mac,
862 new_active->dev->addr_len);
863 ss.ss_family = old_active->dev->type;
864
865 rv = dev_set_mac_address(old_active->dev,
866 (struct sockaddr *)&ss, NULL);
867 if (rv)
868 slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
869 -rv);
870out:
871 break;
872 default:
873 netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
874 bond->params.fail_over_mac);
875 break;
876 }
877
878}
879
880static struct slave *bond_choose_primary_or_current(struct bonding *bond)
881{
882 struct slave *prim = rtnl_dereference(bond->primary_slave);
883 struct slave *curr = rtnl_dereference(bond->curr_active_slave);
884
885 if (!prim || prim->link != BOND_LINK_UP) {
886 if (!curr || curr->link != BOND_LINK_UP)
887 return NULL;
888 return curr;
889 }
890
891 if (bond->force_primary) {
892 bond->force_primary = false;
893 return prim;
894 }
895
896 if (!curr || curr->link != BOND_LINK_UP)
897 return prim;
898
899 /* At this point, prim and curr are both up */
900 switch (bond->params.primary_reselect) {
901 case BOND_PRI_RESELECT_ALWAYS:
902 return prim;
903 case BOND_PRI_RESELECT_BETTER:
904 if (prim->speed < curr->speed)
905 return curr;
906 if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
907 return curr;
908 return prim;
909 case BOND_PRI_RESELECT_FAILURE:
910 return curr;
911 default:
912 netdev_err(bond->dev, "impossible primary_reselect %d\n",
913 bond->params.primary_reselect);
914 return curr;
915 }
916}
917
918/**
919 * bond_find_best_slave - select the best available slave to be the active one
920 * @bond: our bonding struct
921 */
922static struct slave *bond_find_best_slave(struct bonding *bond)
923{
924 struct slave *slave, *bestslave = NULL;
925 struct list_head *iter;
926 int mintime = bond->params.updelay;
927
928 slave = bond_choose_primary_or_current(bond);
929 if (slave)
930 return slave;
931
932 bond_for_each_slave(bond, slave, iter) {
933 if (slave->link == BOND_LINK_UP)
934 return slave;
935 if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
936 slave->delay < mintime) {
937 mintime = slave->delay;
938 bestslave = slave;
939 }
940 }
941
942 return bestslave;
943}
944
945static bool bond_should_notify_peers(struct bonding *bond)
946{
947 struct slave *slave;
948
949 rcu_read_lock();
950 slave = rcu_dereference(bond->curr_active_slave);
951 rcu_read_unlock();
952
953 netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
954 slave ? slave->dev->name : "NULL");
955
956 if (!slave || !bond->send_peer_notif ||
957 bond->send_peer_notif %
958 max(1, bond->params.peer_notif_delay) != 0 ||
959 !netif_carrier_ok(bond->dev) ||
960 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
961 return false;
962
963 return true;
964}
965
966/**
967 * bond_change_active_slave - change the active slave into the specified one
968 * @bond: our bonding struct
969 * @new_active: the new slave to make the active one
970 *
971 * Set the new slave to the bond's settings and unset them on the old
972 * curr_active_slave.
973 * Setting include flags, mc-list, promiscuity, allmulti, etc.
974 *
975 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
976 * because it is apparently the best available slave we have, even though its
977 * updelay hasn't timed out yet.
978 *
979 * Caller must hold RTNL.
980 */
981void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
982{
983 struct slave *old_active;
984
985 ASSERT_RTNL();
986
987 old_active = rtnl_dereference(bond->curr_active_slave);
988
989 if (old_active == new_active)
990 return;
991
992#ifdef CONFIG_XFRM_OFFLOAD
993 if (old_active && bond->xs)
994 bond_ipsec_del_sa(bond->xs);
995#endif /* CONFIG_XFRM_OFFLOAD */
996
997 if (new_active) {
998 new_active->last_link_up = jiffies;
999
1000 if (new_active->link == BOND_LINK_BACK) {
1001 if (bond_uses_primary(bond)) {
1002 slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n",
1003 (bond->params.updelay - new_active->delay) * bond->params.miimon);
1004 }
1005
1006 new_active->delay = 0;
1007 bond_set_slave_link_state(new_active, BOND_LINK_UP,
1008 BOND_SLAVE_NOTIFY_NOW);
1009
1010 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1011 bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
1012
1013 if (bond_is_lb(bond))
1014 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
1015 } else {
1016 if (bond_uses_primary(bond)) {
1017 slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
1018 }
1019 }
1020 }
1021
1022 if (bond_uses_primary(bond))
1023 bond_hw_addr_swap(bond, new_active, old_active);
1024
1025 if (bond_is_lb(bond)) {
1026 bond_alb_handle_active_change(bond, new_active);
1027 if (old_active)
1028 bond_set_slave_inactive_flags(old_active,
1029 BOND_SLAVE_NOTIFY_NOW);
1030 if (new_active)
1031 bond_set_slave_active_flags(new_active,
1032 BOND_SLAVE_NOTIFY_NOW);
1033 } else {
1034 rcu_assign_pointer(bond->curr_active_slave, new_active);
1035 }
1036
1037 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
1038 if (old_active)
1039 bond_set_slave_inactive_flags(old_active,
1040 BOND_SLAVE_NOTIFY_NOW);
1041
1042 if (new_active) {
1043 bool should_notify_peers = false;
1044
1045 bond_set_slave_active_flags(new_active,
1046 BOND_SLAVE_NOTIFY_NOW);
1047
1048 if (bond->params.fail_over_mac)
1049 bond_do_fail_over_mac(bond, new_active,
1050 old_active);
1051
1052 if (netif_running(bond->dev)) {
1053 bond->send_peer_notif =
1054 bond->params.num_peer_notif *
1055 max(1, bond->params.peer_notif_delay);
1056 should_notify_peers =
1057 bond_should_notify_peers(bond);
1058 }
1059
1060 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
1061 if (should_notify_peers) {
1062 bond->send_peer_notif--;
1063 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
1064 bond->dev);
1065 }
1066 }
1067 }
1068
1069#ifdef CONFIG_XFRM_OFFLOAD
1070 if (new_active && bond->xs) {
1071 xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true);
1072 bond_ipsec_add_sa(bond->xs);
1073 }
1074#endif /* CONFIG_XFRM_OFFLOAD */
1075
1076 /* resend IGMP joins since active slave has changed or
1077 * all were sent on curr_active_slave.
1078 * resend only if bond is brought up with the affected
1079 * bonding modes and the retransmission is enabled
1080 */
1081 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
1082 ((bond_uses_primary(bond) && new_active) ||
1083 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
1084 bond->igmp_retrans = bond->params.resend_igmp;
1085 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
1086 }
1087}
1088
1089/**
1090 * bond_select_active_slave - select a new active slave, if needed
1091 * @bond: our bonding struct
1092 *
1093 * This functions should be called when one of the following occurs:
1094 * - The old curr_active_slave has been released or lost its link.
1095 * - The primary_slave has got its link back.
1096 * - A slave has got its link back and there's no old curr_active_slave.
1097 *
1098 * Caller must hold RTNL.
1099 */
1100void bond_select_active_slave(struct bonding *bond)
1101{
1102 struct slave *best_slave;
1103 int rv;
1104
1105 ASSERT_RTNL();
1106
1107 best_slave = bond_find_best_slave(bond);
1108 if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
1109 bond_change_active_slave(bond, best_slave);
1110 rv = bond_set_carrier(bond);
1111 if (!rv)
1112 return;
1113
1114 if (netif_carrier_ok(bond->dev))
1115 netdev_info(bond->dev, "active interface up!\n");
1116 else
1117 netdev_info(bond->dev, "now running without any active interface!\n");
1118 }
1119}
1120
1121#ifdef CONFIG_NET_POLL_CONTROLLER
1122static inline int slave_enable_netpoll(struct slave *slave)
1123{
1124 struct netpoll *np;
1125 int err = 0;
1126
1127 np = kzalloc(sizeof(*np), GFP_KERNEL);
1128 err = -ENOMEM;
1129 if (!np)
1130 goto out;
1131
1132 err = __netpoll_setup(np, slave->dev);
1133 if (err) {
1134 kfree(np);
1135 goto out;
1136 }
1137 slave->np = np;
1138out:
1139 return err;
1140}
1141static inline void slave_disable_netpoll(struct slave *slave)
1142{
1143 struct netpoll *np = slave->np;
1144
1145 if (!np)
1146 return;
1147
1148 slave->np = NULL;
1149
1150 __netpoll_free(np);
1151}
1152
1153static void bond_poll_controller(struct net_device *bond_dev)
1154{
1155 struct bonding *bond = netdev_priv(bond_dev);
1156 struct slave *slave = NULL;
1157 struct list_head *iter;
1158 struct ad_info ad_info;
1159
1160 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1161 if (bond_3ad_get_active_agg_info(bond, &ad_info))
1162 return;
1163
1164 bond_for_each_slave_rcu(bond, slave, iter) {
1165 if (!bond_slave_is_up(slave))
1166 continue;
1167
1168 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1169 struct aggregator *agg =
1170 SLAVE_AD_INFO(slave)->port.aggregator;
1171
1172 if (agg &&
1173 agg->aggregator_identifier != ad_info.aggregator_id)
1174 continue;
1175 }
1176
1177 netpoll_poll_dev(slave->dev);
1178 }
1179}
1180
1181static void bond_netpoll_cleanup(struct net_device *bond_dev)
1182{
1183 struct bonding *bond = netdev_priv(bond_dev);
1184 struct list_head *iter;
1185 struct slave *slave;
1186
1187 bond_for_each_slave(bond, slave, iter)
1188 if (bond_slave_is_up(slave))
1189 slave_disable_netpoll(slave);
1190}
1191
1192static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1193{
1194 struct bonding *bond = netdev_priv(dev);
1195 struct list_head *iter;
1196 struct slave *slave;
1197 int err = 0;
1198
1199 bond_for_each_slave(bond, slave, iter) {
1200 err = slave_enable_netpoll(slave);
1201 if (err) {
1202 bond_netpoll_cleanup(dev);
1203 break;
1204 }
1205 }
1206 return err;
1207}
1208#else
1209static inline int slave_enable_netpoll(struct slave *slave)
1210{
1211 return 0;
1212}
1213static inline void slave_disable_netpoll(struct slave *slave)
1214{
1215}
1216static void bond_netpoll_cleanup(struct net_device *bond_dev)
1217{
1218}
1219#endif
1220
1221/*---------------------------------- IOCTL ----------------------------------*/
1222
1223static netdev_features_t bond_fix_features(struct net_device *dev,
1224 netdev_features_t features)
1225{
1226 struct bonding *bond = netdev_priv(dev);
1227 struct list_head *iter;
1228 netdev_features_t mask;
1229 struct slave *slave;
1230
1231#if IS_ENABLED(CONFIG_TLS_DEVICE)
1232 if (bond_sk_check(bond))
1233 features |= BOND_TLS_FEATURES;
1234 else
1235 features &= ~BOND_TLS_FEATURES;
1236#endif
1237
1238 mask = features;
1239
1240 features &= ~NETIF_F_ONE_FOR_ALL;
1241 features |= NETIF_F_ALL_FOR_ALL;
1242
1243 bond_for_each_slave(bond, slave, iter) {
1244 features = netdev_increment_features(features,
1245 slave->dev->features,
1246 mask);
1247 }
1248 features = netdev_add_tso_features(features, mask);
1249
1250 return features;
1251}
1252
1253#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
1254 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
1255 NETIF_F_HIGHDMA | NETIF_F_LRO)
1256
1257#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
1258 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
1259
1260#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
1261 NETIF_F_GSO_SOFTWARE)
1262
1263
1264static void bond_compute_features(struct bonding *bond)
1265{
1266 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
1267 IFF_XMIT_DST_RELEASE_PERM;
1268 netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1269 netdev_features_t enc_features = BOND_ENC_FEATURES;
1270#ifdef CONFIG_XFRM_OFFLOAD
1271 netdev_features_t xfrm_features = BOND_XFRM_FEATURES;
1272#endif /* CONFIG_XFRM_OFFLOAD */
1273 netdev_features_t mpls_features = BOND_MPLS_FEATURES;
1274 struct net_device *bond_dev = bond->dev;
1275 struct list_head *iter;
1276 struct slave *slave;
1277 unsigned short max_hard_header_len = ETH_HLEN;
1278 unsigned int gso_max_size = GSO_MAX_SIZE;
1279 u16 gso_max_segs = GSO_MAX_SEGS;
1280
1281 if (!bond_has_slaves(bond))
1282 goto done;
1283 vlan_features &= NETIF_F_ALL_FOR_ALL;
1284 mpls_features &= NETIF_F_ALL_FOR_ALL;
1285
1286 bond_for_each_slave(bond, slave, iter) {
1287 vlan_features = netdev_increment_features(vlan_features,
1288 slave->dev->vlan_features, BOND_VLAN_FEATURES);
1289
1290 enc_features = netdev_increment_features(enc_features,
1291 slave->dev->hw_enc_features,
1292 BOND_ENC_FEATURES);
1293
1294#ifdef CONFIG_XFRM_OFFLOAD
1295 xfrm_features = netdev_increment_features(xfrm_features,
1296 slave->dev->hw_enc_features,
1297 BOND_XFRM_FEATURES);
1298#endif /* CONFIG_XFRM_OFFLOAD */
1299
1300 mpls_features = netdev_increment_features(mpls_features,
1301 slave->dev->mpls_features,
1302 BOND_MPLS_FEATURES);
1303
1304 dst_release_flag &= slave->dev->priv_flags;
1305 if (slave->dev->hard_header_len > max_hard_header_len)
1306 max_hard_header_len = slave->dev->hard_header_len;
1307
1308 gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
1309 gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
1310 }
1311 bond_dev->hard_header_len = max_hard_header_len;
1312
1313done:
1314 bond_dev->vlan_features = vlan_features;
1315 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1316 NETIF_F_HW_VLAN_CTAG_TX |
1317 NETIF_F_HW_VLAN_STAG_TX;
1318#ifdef CONFIG_XFRM_OFFLOAD
1319 bond_dev->hw_enc_features |= xfrm_features;
1320#endif /* CONFIG_XFRM_OFFLOAD */
1321 bond_dev->mpls_features = mpls_features;
1322 bond_dev->gso_max_segs = gso_max_segs;
1323 netif_set_gso_max_size(bond_dev, gso_max_size);
1324
1325 bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1326 if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
1327 dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1328 bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1329
1330 netdev_change_features(bond_dev);
1331}
1332
1333static void bond_setup_by_slave(struct net_device *bond_dev,
1334 struct net_device *slave_dev)
1335{
1336 bond_dev->header_ops = slave_dev->header_ops;
1337
1338 bond_dev->type = slave_dev->type;
1339 bond_dev->hard_header_len = slave_dev->hard_header_len;
1340 bond_dev->needed_headroom = slave_dev->needed_headroom;
1341 bond_dev->addr_len = slave_dev->addr_len;
1342
1343 memcpy(bond_dev->broadcast, slave_dev->broadcast,
1344 slave_dev->addr_len);
1345}
1346
1347/* On bonding slaves other than the currently active slave, suppress
1348 * duplicates except for alb non-mcast/bcast.
1349 */
1350static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1351 struct slave *slave,
1352 struct bonding *bond)
1353{
1354 if (bond_is_slave_inactive(slave)) {
1355 if (BOND_MODE(bond) == BOND_MODE_ALB &&
1356 skb->pkt_type != PACKET_BROADCAST &&
1357 skb->pkt_type != PACKET_MULTICAST)
1358 return false;
1359 return true;
1360 }
1361 return false;
1362}
1363
1364static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1365{
1366 struct sk_buff *skb = *pskb;
1367 struct slave *slave;
1368 struct bonding *bond;
1369 int (*recv_probe)(const struct sk_buff *, struct bonding *,
1370 struct slave *);
1371 int ret = RX_HANDLER_ANOTHER;
1372
1373 skb = skb_share_check(skb, GFP_ATOMIC);
1374 if (unlikely(!skb))
1375 return RX_HANDLER_CONSUMED;
1376
1377 *pskb = skb;
1378
1379 slave = bond_slave_get_rcu(skb->dev);
1380 bond = slave->bond;
1381
1382 recv_probe = READ_ONCE(bond->recv_probe);
1383 if (recv_probe) {
1384 ret = recv_probe(skb, bond, slave);
1385 if (ret == RX_HANDLER_CONSUMED) {
1386 consume_skb(skb);
1387 return ret;
1388 }
1389 }
1390
1391 /*
1392 * For packets determined by bond_should_deliver_exact_match() call to
1393 * be suppressed we want to make an exception for link-local packets.
1394 * This is necessary for e.g. LLDP daemons to be able to monitor
1395 * inactive slave links without being forced to bind to them
1396 * explicitly.
1397 *
1398 * At the same time, packets that are passed to the bonding master
1399 * (including link-local ones) can have their originating interface
1400 * determined via PACKET_ORIGDEV socket option.
1401 */
1402 if (bond_should_deliver_exact_match(skb, slave, bond)) {
1403 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1404 return RX_HANDLER_PASS;
1405 return RX_HANDLER_EXACT;
1406 }
1407
1408 skb->dev = bond->dev;
1409
1410 if (BOND_MODE(bond) == BOND_MODE_ALB &&
1411 netif_is_bridge_port(bond->dev) &&
1412 skb->pkt_type == PACKET_HOST) {
1413
1414 if (unlikely(skb_cow_head(skb,
1415 skb->data - skb_mac_header(skb)))) {
1416 kfree_skb(skb);
1417 return RX_HANDLER_CONSUMED;
1418 }
1419 bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
1420 bond->dev->addr_len);
1421 }
1422
1423 return ret;
1424}
1425
1426static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1427{
1428 switch (BOND_MODE(bond)) {
1429 case BOND_MODE_ROUNDROBIN:
1430 return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
1431 case BOND_MODE_ACTIVEBACKUP:
1432 return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
1433 case BOND_MODE_BROADCAST:
1434 return NETDEV_LAG_TX_TYPE_BROADCAST;
1435 case BOND_MODE_XOR:
1436 case BOND_MODE_8023AD:
1437 return NETDEV_LAG_TX_TYPE_HASH;
1438 default:
1439 return NETDEV_LAG_TX_TYPE_UNKNOWN;
1440 }
1441}
1442
1443static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
1444 enum netdev_lag_tx_type type)
1445{
1446 if (type != NETDEV_LAG_TX_TYPE_HASH)
1447 return NETDEV_LAG_HASH_NONE;
1448
1449 switch (bond->params.xmit_policy) {
1450 case BOND_XMIT_POLICY_LAYER2:
1451 return NETDEV_LAG_HASH_L2;
1452 case BOND_XMIT_POLICY_LAYER34:
1453 return NETDEV_LAG_HASH_L34;
1454 case BOND_XMIT_POLICY_LAYER23:
1455 return NETDEV_LAG_HASH_L23;
1456 case BOND_XMIT_POLICY_ENCAP23:
1457 return NETDEV_LAG_HASH_E23;
1458 case BOND_XMIT_POLICY_ENCAP34:
1459 return NETDEV_LAG_HASH_E34;
1460 case BOND_XMIT_POLICY_VLAN_SRCMAC:
1461 return NETDEV_LAG_HASH_VLAN_SRCMAC;
1462 default:
1463 return NETDEV_LAG_HASH_UNKNOWN;
1464 }
1465}
1466
1467static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
1468 struct netlink_ext_ack *extack)
1469{
1470 struct netdev_lag_upper_info lag_upper_info;
1471 enum netdev_lag_tx_type type;
1472
1473 type = bond_lag_tx_type(bond);
1474 lag_upper_info.tx_type = type;
1475 lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1476
1477 return netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
1478 &lag_upper_info, extack);
1479}
1480
1481static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1482{
1483 netdev_upper_dev_unlink(slave->dev, bond->dev);
1484 slave->dev->flags &= ~IFF_SLAVE;
1485}
1486
1487static void slave_kobj_release(struct kobject *kobj)
1488{
1489 struct slave *slave = to_slave(kobj);
1490 struct bonding *bond = bond_get_bond_by_slave(slave);
1491
1492 cancel_delayed_work_sync(&slave->notify_work);
1493 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1494 kfree(SLAVE_AD_INFO(slave));
1495
1496 kfree(slave);
1497}
1498
1499static struct kobj_type slave_ktype = {
1500 .release = slave_kobj_release,
1501#ifdef CONFIG_SYSFS
1502 .sysfs_ops = &slave_sysfs_ops,
1503#endif
1504};
1505
1506static int bond_kobj_init(struct slave *slave)
1507{
1508 int err;
1509
1510 err = kobject_init_and_add(&slave->kobj, &slave_ktype,
1511 &(slave->dev->dev.kobj), "bonding_slave");
1512 if (err)
1513 kobject_put(&slave->kobj);
1514
1515 return err;
1516}
1517
1518static struct slave *bond_alloc_slave(struct bonding *bond,
1519 struct net_device *slave_dev)
1520{
1521 struct slave *slave = NULL;
1522
1523 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1524 if (!slave)
1525 return NULL;
1526
1527 slave->bond = bond;
1528 slave->dev = slave_dev;
1529 INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1530
1531 if (bond_kobj_init(slave))
1532 return NULL;
1533
1534 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1535 SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
1536 GFP_KERNEL);
1537 if (!SLAVE_AD_INFO(slave)) {
1538 kobject_put(&slave->kobj);
1539 return NULL;
1540 }
1541 }
1542
1543 return slave;
1544}
1545
1546static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
1547{
1548 info->bond_mode = BOND_MODE(bond);
1549 info->miimon = bond->params.miimon;
1550 info->num_slaves = bond->slave_cnt;
1551}
1552
1553static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1554{
1555 strcpy(info->slave_name, slave->dev->name);
1556 info->link = slave->link;
1557 info->state = bond_slave_state(slave);
1558 info->link_failure_count = slave->link_failure_count;
1559}
1560
1561static void bond_netdev_notify_work(struct work_struct *_work)
1562{
1563 struct slave *slave = container_of(_work, struct slave,
1564 notify_work.work);
1565
1566 if (rtnl_trylock()) {
1567 struct netdev_bonding_info binfo;
1568
1569 bond_fill_ifslave(slave, &binfo.slave);
1570 bond_fill_ifbond(slave->bond, &binfo.master);
1571 netdev_bonding_info_change(slave->dev, &binfo);
1572 rtnl_unlock();
1573 } else {
1574 queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
1575 }
1576}
1577
1578void bond_queue_slave_event(struct slave *slave)
1579{
1580 queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1581}
1582
1583void bond_lower_state_changed(struct slave *slave)
1584{
1585 struct netdev_lag_lower_state_info info;
1586
1587 info.link_up = slave->link == BOND_LINK_UP ||
1588 slave->link == BOND_LINK_FAIL;
1589 info.tx_enabled = bond_is_active_slave(slave);
1590 netdev_lower_state_changed(slave->dev, &info);
1591}
1592
1593/* enslave device <slave> to bond device <master> */
1594int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1595 struct netlink_ext_ack *extack)
1596{
1597 struct bonding *bond = netdev_priv(bond_dev);
1598 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1599 struct slave *new_slave = NULL, *prev_slave;
1600 struct sockaddr_storage ss;
1601 int link_reporting;
1602 int res = 0, i;
1603
1604 if (!bond->params.use_carrier &&
1605 slave_dev->ethtool_ops->get_link == NULL &&
1606 slave_ops->ndo_do_ioctl == NULL) {
1607 slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
1608 }
1609
1610 /* already in-use? */
1611 if (netdev_is_rx_handler_busy(slave_dev)) {
1612 NL_SET_ERR_MSG(extack, "Device is in use and cannot be enslaved");
1613 slave_err(bond_dev, slave_dev,
1614 "Error: Device is in use and cannot be enslaved\n");
1615 return -EBUSY;
1616 }
1617
1618 if (bond_dev == slave_dev) {
1619 NL_SET_ERR_MSG(extack, "Cannot enslave bond to itself.");
1620 netdev_err(bond_dev, "cannot enslave bond to itself.\n");
1621 return -EPERM;
1622 }
1623
1624 /* vlan challenged mutual exclusion */
1625 /* no need to lock since we're protected by rtnl_lock */
1626 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1627 slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
1628 if (vlan_uses_dev(bond_dev)) {
1629 NL_SET_ERR_MSG(extack, "Can not enslave VLAN challenged device to VLAN enabled bond");
1630 slave_err(bond_dev, slave_dev, "Error: cannot enslave VLAN challenged slave on VLAN enabled bond\n");
1631 return -EPERM;
1632 } else {
1633 slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
1634 }
1635 } else {
1636 slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
1637 }
1638
1639 if (slave_dev->features & NETIF_F_HW_ESP)
1640 slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n");
1641
1642 /* Old ifenslave binaries are no longer supported. These can
1643 * be identified with moderate accuracy by the state of the slave:
1644 * the current ifenslave will set the interface down prior to
1645 * enslaving it; the old ifenslave will not.
1646 */
1647 if (slave_dev->flags & IFF_UP) {
1648 NL_SET_ERR_MSG(extack, "Device can not be enslaved while up");
1649 slave_err(bond_dev, slave_dev, "slave is up - this may be due to an out of date ifenslave\n");
1650 return -EPERM;
1651 }
1652
1653 /* set bonding device ether type by slave - bonding netdevices are
1654 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
1655 * there is a need to override some of the type dependent attribs/funcs.
1656 *
1657 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1658 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1659 */
1660 if (!bond_has_slaves(bond)) {
1661 if (bond_dev->type != slave_dev->type) {
1662 slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
1663 bond_dev->type, slave_dev->type);
1664
1665 res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
1666 bond_dev);
1667 res = notifier_to_errno(res);
1668 if (res) {
1669 slave_err(bond_dev, slave_dev, "refused to change device type\n");
1670 return -EBUSY;
1671 }
1672
1673 /* Flush unicast and multicast addresses */
1674 dev_uc_flush(bond_dev);
1675 dev_mc_flush(bond_dev);
1676
1677 if (slave_dev->type != ARPHRD_ETHER)
1678 bond_setup_by_slave(bond_dev, slave_dev);
1679 else {
1680 ether_setup(bond_dev);
1681 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1682 }
1683
1684 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
1685 bond_dev);
1686 }
1687 } else if (bond_dev->type != slave_dev->type) {
1688 NL_SET_ERR_MSG(extack, "Device type is different from other slaves");
1689 slave_err(bond_dev, slave_dev, "ether type (%d) is different from other slaves (%d), can not enslave it\n",
1690 slave_dev->type, bond_dev->type);
1691 return -EINVAL;
1692 }
1693
1694 if (slave_dev->type == ARPHRD_INFINIBAND &&
1695 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1696 NL_SET_ERR_MSG(extack, "Only active-backup mode is supported for infiniband slaves");
1697 slave_warn(bond_dev, slave_dev, "Type (%d) supports only active-backup mode\n",
1698 slave_dev->type);
1699 res = -EOPNOTSUPP;
1700 goto err_undo_flags;
1701 }
1702
1703 if (!slave_ops->ndo_set_mac_address ||
1704 slave_dev->type == ARPHRD_INFINIBAND) {
1705 slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n");
1706 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
1707 bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1708 if (!bond_has_slaves(bond)) {
1709 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1710 slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
1711 } else {
1712 NL_SET_ERR_MSG(extack, "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
1713 slave_err(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
1714 res = -EOPNOTSUPP;
1715 goto err_undo_flags;
1716 }
1717 }
1718 }
1719
1720 call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
1721
1722 /* If this is the first slave, then we need to set the master's hardware
1723 * address to be the same as the slave's.
1724 */
1725 if (!bond_has_slaves(bond) &&
1726 bond->dev->addr_assign_type == NET_ADDR_RANDOM) {
1727 res = bond_set_dev_addr(bond->dev, slave_dev);
1728 if (res)
1729 goto err_undo_flags;
1730 }
1731
1732 new_slave = bond_alloc_slave(bond, slave_dev);
1733 if (!new_slave) {
1734 res = -ENOMEM;
1735 goto err_undo_flags;
1736 }
1737
1738 /* Set the new_slave's queue_id to be zero. Queue ID mapping
1739 * is set via sysfs or module option if desired.
1740 */
1741 new_slave->queue_id = 0;
1742
1743 /* Save slave's original mtu and then set it to match the bond */
1744 new_slave->original_mtu = slave_dev->mtu;
1745 res = dev_set_mtu(slave_dev, bond->dev->mtu);
1746 if (res) {
1747 slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res);
1748 goto err_free;
1749 }
1750
1751 /* Save slave's original ("permanent") mac address for modes
1752 * that need it, and for restoring it upon release, and then
1753 * set it to the master's address
1754 */
1755 bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
1756 slave_dev->addr_len);
1757
1758 if (!bond->params.fail_over_mac ||
1759 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1760 /* Set slave to master's mac address. The application already
1761 * set the master's mac address to that of the first slave
1762 */
1763 memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
1764 ss.ss_family = slave_dev->type;
1765 res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
1766 extack);
1767 if (res) {
1768 slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
1769 goto err_restore_mtu;
1770 }
1771 }
1772
1773 /* set slave flag before open to prevent IPv6 addrconf */
1774 slave_dev->flags |= IFF_SLAVE;
1775
1776 /* open the slave since the application closed it */
1777 res = dev_open(slave_dev, extack);
1778 if (res) {
1779 slave_err(bond_dev, slave_dev, "Opening slave failed\n");
1780 goto err_restore_mac;
1781 }
1782
1783 slave_dev->priv_flags |= IFF_BONDING;
1784 /* initialize slave stats */
1785 dev_get_stats(new_slave->dev, &new_slave->slave_stats);
1786
1787 if (bond_is_lb(bond)) {
1788 /* bond_alb_init_slave() must be called before all other stages since
1789 * it might fail and we do not want to have to undo everything
1790 */
1791 res = bond_alb_init_slave(bond, new_slave);
1792 if (res)
1793 goto err_close;
1794 }
1795
1796 res = vlan_vids_add_by_dev(slave_dev, bond_dev);
1797 if (res) {
1798 slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n");
1799 goto err_close;
1800 }
1801
1802 prev_slave = bond_last_slave(bond);
1803
1804 new_slave->delay = 0;
1805 new_slave->link_failure_count = 0;
1806
1807 if (bond_update_speed_duplex(new_slave) &&
1808 bond_needs_speed_duplex(bond))
1809 new_slave->link = BOND_LINK_DOWN;
1810
1811 new_slave->last_rx = jiffies -
1812 (msecs_to_jiffies(bond->params.arp_interval) + 1);
1813 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
1814 new_slave->target_last_arp_rx[i] = new_slave->last_rx;
1815
1816 if (bond->params.miimon && !bond->params.use_carrier) {
1817 link_reporting = bond_check_dev_link(bond, slave_dev, 1);
1818
1819 if ((link_reporting == -1) && !bond->params.arp_interval) {
1820 /* miimon is set but a bonded network driver
1821 * does not support ETHTOOL/MII and
1822 * arp_interval is not set. Note: if
1823 * use_carrier is enabled, we will never go
1824 * here (because netif_carrier is always
1825 * supported); thus, we don't need to change
1826 * the messages for netif_carrier.
1827 */
1828 slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
1829 } else if (link_reporting == -1) {
1830 /* unable get link status using mii/ethtool */
1831 slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
1832 }
1833 }
1834
1835 /* check for initial state */
1836 new_slave->link = BOND_LINK_NOCHANGE;
1837 if (bond->params.miimon) {
1838 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
1839 if (bond->params.updelay) {
1840 bond_set_slave_link_state(new_slave,
1841 BOND_LINK_BACK,
1842 BOND_SLAVE_NOTIFY_NOW);
1843 new_slave->delay = bond->params.updelay;
1844 } else {
1845 bond_set_slave_link_state(new_slave,
1846 BOND_LINK_UP,
1847 BOND_SLAVE_NOTIFY_NOW);
1848 }
1849 } else {
1850 bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
1851 BOND_SLAVE_NOTIFY_NOW);
1852 }
1853 } else if (bond->params.arp_interval) {
1854 bond_set_slave_link_state(new_slave,
1855 (netif_carrier_ok(slave_dev) ?
1856 BOND_LINK_UP : BOND_LINK_DOWN),
1857 BOND_SLAVE_NOTIFY_NOW);
1858 } else {
1859 bond_set_slave_link_state(new_slave, BOND_LINK_UP,
1860 BOND_SLAVE_NOTIFY_NOW);
1861 }
1862
1863 if (new_slave->link != BOND_LINK_DOWN)
1864 new_slave->last_link_up = jiffies;
1865 slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n",
1866 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
1867 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1868
1869 if (bond_uses_primary(bond) && bond->params.primary[0]) {
1870 /* if there is a primary slave, remember it */
1871 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
1872 rcu_assign_pointer(bond->primary_slave, new_slave);
1873 bond->force_primary = true;
1874 }
1875 }
1876
1877 switch (BOND_MODE(bond)) {
1878 case BOND_MODE_ACTIVEBACKUP:
1879 bond_set_slave_inactive_flags(new_slave,
1880 BOND_SLAVE_NOTIFY_NOW);
1881 break;
1882 case BOND_MODE_8023AD:
1883 /* in 802.3ad mode, the internal mechanism
1884 * will activate the slaves in the selected
1885 * aggregator
1886 */
1887 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
1888 /* if this is the first slave */
1889 if (!prev_slave) {
1890 SLAVE_AD_INFO(new_slave)->id = 1;
1891 /* Initialize AD with the number of times that the AD timer is called in 1 second
1892 * can be called only after the mac address of the bond is set
1893 */
1894 bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
1895 } else {
1896 SLAVE_AD_INFO(new_slave)->id =
1897 SLAVE_AD_INFO(prev_slave)->id + 1;
1898 }
1899
1900 bond_3ad_bind_slave(new_slave);
1901 break;
1902 case BOND_MODE_TLB:
1903 case BOND_MODE_ALB:
1904 bond_set_active_slave(new_slave);
1905 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
1906 break;
1907 default:
1908 slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n");
1909
1910 /* always active in trunk mode */
1911 bond_set_active_slave(new_slave);
1912
1913 /* In trunking mode there is little meaning to curr_active_slave
1914 * anyway (it holds no special properties of the bond device),
1915 * so we can change it without calling change_active_interface()
1916 */
1917 if (!rcu_access_pointer(bond->curr_active_slave) &&
1918 new_slave->link == BOND_LINK_UP)
1919 rcu_assign_pointer(bond->curr_active_slave, new_slave);
1920
1921 break;
1922 } /* switch(bond_mode) */
1923
1924#ifdef CONFIG_NET_POLL_CONTROLLER
1925 if (bond->dev->npinfo) {
1926 if (slave_enable_netpoll(new_slave)) {
1927 slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
1928 res = -EBUSY;
1929 goto err_detach;
1930 }
1931 }
1932#endif
1933
1934 if (!(bond_dev->features & NETIF_F_LRO))
1935 dev_disable_lro(slave_dev);
1936
1937 res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
1938 new_slave);
1939 if (res) {
1940 slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res);
1941 goto err_detach;
1942 }
1943
1944 res = bond_master_upper_dev_link(bond, new_slave, extack);
1945 if (res) {
1946 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res);
1947 goto err_unregister;
1948 }
1949
1950 bond_lower_state_changed(new_slave);
1951
1952 res = bond_sysfs_slave_add(new_slave);
1953 if (res) {
1954 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
1955 goto err_upper_unlink;
1956 }
1957
1958 /* If the mode uses primary, then the following is handled by
1959 * bond_change_active_slave().
1960 */
1961 if (!bond_uses_primary(bond)) {
1962 /* set promiscuity level to new slave */
1963 if (bond_dev->flags & IFF_PROMISC) {
1964 res = dev_set_promiscuity(slave_dev, 1);
1965 if (res)
1966 goto err_sysfs_del;
1967 }
1968
1969 /* set allmulti level to new slave */
1970 if (bond_dev->flags & IFF_ALLMULTI) {
1971 res = dev_set_allmulti(slave_dev, 1);
1972 if (res) {
1973 if (bond_dev->flags & IFF_PROMISC)
1974 dev_set_promiscuity(slave_dev, -1);
1975 goto err_sysfs_del;
1976 }
1977 }
1978
1979 netif_addr_lock_bh(bond_dev);
1980 dev_mc_sync_multiple(slave_dev, bond_dev);
1981 dev_uc_sync_multiple(slave_dev, bond_dev);
1982 netif_addr_unlock_bh(bond_dev);
1983
1984 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1985 /* add lacpdu mc addr to mc list */
1986 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1987
1988 dev_mc_add(slave_dev, lacpdu_multicast);
1989 }
1990 }
1991
1992 bond->slave_cnt++;
1993 bond_compute_features(bond);
1994 bond_set_carrier(bond);
1995
1996 if (bond_uses_primary(bond)) {
1997 block_netpoll_tx();
1998 bond_select_active_slave(bond);
1999 unblock_netpoll_tx();
2000 }
2001
2002 if (bond_mode_can_use_xmit_hash(bond))
2003 bond_update_slave_arr(bond, NULL);
2004
2005
2006 slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
2007 bond_is_active_slave(new_slave) ? "an active" : "a backup",
2008 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
2009
2010 /* enslave is successful */
2011 bond_queue_slave_event(new_slave);
2012 return 0;
2013
2014/* Undo stages on error */
2015err_sysfs_del:
2016 bond_sysfs_slave_del(new_slave);
2017
2018err_upper_unlink:
2019 bond_upper_dev_unlink(bond, new_slave);
2020
2021err_unregister:
2022 netdev_rx_handler_unregister(slave_dev);
2023
2024err_detach:
2025 vlan_vids_del_by_dev(slave_dev, bond_dev);
2026 if (rcu_access_pointer(bond->primary_slave) == new_slave)
2027 RCU_INIT_POINTER(bond->primary_slave, NULL);
2028 if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
2029 block_netpoll_tx();
2030 bond_change_active_slave(bond, NULL);
2031 bond_select_active_slave(bond);
2032 unblock_netpoll_tx();
2033 }
2034 /* either primary_slave or curr_active_slave might've changed */
2035 synchronize_rcu();
2036 slave_disable_netpoll(new_slave);
2037
2038err_close:
2039 if (!netif_is_bond_master(slave_dev))
2040 slave_dev->priv_flags &= ~IFF_BONDING;
2041 dev_close(slave_dev);
2042
2043err_restore_mac:
2044 slave_dev->flags &= ~IFF_SLAVE;
2045 if (!bond->params.fail_over_mac ||
2046 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2047 /* XXX TODO - fom follow mode needs to change master's
2048 * MAC if this slave's MAC is in use by the bond, or at
2049 * least print a warning.
2050 */
2051 bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
2052 new_slave->dev->addr_len);
2053 ss.ss_family = slave_dev->type;
2054 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2055 }
2056
2057err_restore_mtu:
2058 dev_set_mtu(slave_dev, new_slave->original_mtu);
2059
2060err_free:
2061 kobject_put(&new_slave->kobj);
2062
2063err_undo_flags:
2064 /* Enslave of first slave has failed and we need to fix master's mac */
2065 if (!bond_has_slaves(bond)) {
2066 if (ether_addr_equal_64bits(bond_dev->dev_addr,
2067 slave_dev->dev_addr))
2068 eth_hw_addr_random(bond_dev);
2069 if (bond_dev->type != ARPHRD_ETHER) {
2070 dev_close(bond_dev);
2071 ether_setup(bond_dev);
2072 bond_dev->flags |= IFF_MASTER;
2073 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2074 }
2075 }
2076
2077 return res;
2078}
2079
2080/* Try to release the slave device <slave> from the bond device <master>
2081 * It is legal to access curr_active_slave without a lock because all the function
2082 * is RTNL-locked. If "all" is true it means that the function is being called
2083 * while destroying a bond interface and all slaves are being released.
2084 *
2085 * The rules for slave state should be:
2086 * for Active/Backup:
2087 * Active stays on all backups go down
2088 * for Bonded connections:
2089 * The first up interface should be left on and all others downed.
2090 */
2091static int __bond_release_one(struct net_device *bond_dev,
2092 struct net_device *slave_dev,
2093 bool all, bool unregister)
2094{
2095 struct bonding *bond = netdev_priv(bond_dev);
2096 struct slave *slave, *oldcurrent;
2097 struct sockaddr_storage ss;
2098 int old_flags = bond_dev->flags;
2099 netdev_features_t old_features = bond_dev->features;
2100
2101 /* slave is not a slave or master is not master of this slave */
2102 if (!(slave_dev->flags & IFF_SLAVE) ||
2103 !netdev_has_upper_dev(slave_dev, bond_dev)) {
2104 slave_dbg(bond_dev, slave_dev, "cannot release slave\n");
2105 return -EINVAL;
2106 }
2107
2108 block_netpoll_tx();
2109
2110 slave = bond_get_slave_by_dev(bond, slave_dev);
2111 if (!slave) {
2112 /* not a slave of this bond */
2113 slave_info(bond_dev, slave_dev, "interface not enslaved\n");
2114 unblock_netpoll_tx();
2115 return -EINVAL;
2116 }
2117
2118 bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
2119
2120 bond_sysfs_slave_del(slave);
2121
2122 /* recompute stats just before removing the slave */
2123 bond_get_stats(bond->dev, &bond->bond_stats);
2124
2125 bond_upper_dev_unlink(bond, slave);
2126 /* unregister rx_handler early so bond_handle_frame wouldn't be called
2127 * for this slave anymore.
2128 */
2129 netdev_rx_handler_unregister(slave_dev);
2130
2131 if (BOND_MODE(bond) == BOND_MODE_8023AD)
2132 bond_3ad_unbind_slave(slave);
2133
2134 if (bond_mode_can_use_xmit_hash(bond))
2135 bond_update_slave_arr(bond, slave);
2136
2137 slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
2138 bond_is_active_slave(slave) ? "active" : "backup");
2139
2140 oldcurrent = rcu_access_pointer(bond->curr_active_slave);
2141
2142 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2143
2144 if (!all && (!bond->params.fail_over_mac ||
2145 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
2146 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
2147 bond_has_slaves(bond))
2148 slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n",
2149 slave->perm_hwaddr);
2150 }
2151
2152 if (rtnl_dereference(bond->primary_slave) == slave)
2153 RCU_INIT_POINTER(bond->primary_slave, NULL);
2154
2155 if (oldcurrent == slave)
2156 bond_change_active_slave(bond, NULL);
2157
2158 if (bond_is_lb(bond)) {
2159 /* Must be called only after the slave has been
2160 * detached from the list and the curr_active_slave
2161 * has been cleared (if our_slave == old_current),
2162 * but before a new active slave is selected.
2163 */
2164 bond_alb_deinit_slave(bond, slave);
2165 }
2166
2167 if (all) {
2168 RCU_INIT_POINTER(bond->curr_active_slave, NULL);
2169 } else if (oldcurrent == slave) {
2170 /* Note that we hold RTNL over this sequence, so there
2171 * is no concern that another slave add/remove event
2172 * will interfere.
2173 */
2174 bond_select_active_slave(bond);
2175 }
2176
2177 if (!bond_has_slaves(bond)) {
2178 bond_set_carrier(bond);
2179 eth_hw_addr_random(bond_dev);
2180 }
2181
2182 unblock_netpoll_tx();
2183 synchronize_rcu();
2184 bond->slave_cnt--;
2185
2186 if (!bond_has_slaves(bond)) {
2187 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2188 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
2189 }
2190
2191 bond_compute_features(bond);
2192 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2193 (old_features & NETIF_F_VLAN_CHALLENGED))
2194 slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
2195
2196 vlan_vids_del_by_dev(slave_dev, bond_dev);
2197
2198 /* If the mode uses primary, then this case was handled above by
2199 * bond_change_active_slave(..., NULL)
2200 */
2201 if (!bond_uses_primary(bond)) {
2202 /* unset promiscuity level from slave
2203 * NOTE: The NETDEV_CHANGEADDR call above may change the value
2204 * of the IFF_PROMISC flag in the bond_dev, but we need the
2205 * value of that flag before that change, as that was the value
2206 * when this slave was attached, so we cache at the start of the
2207 * function and use it here. Same goes for ALLMULTI below
2208 */
2209 if (old_flags & IFF_PROMISC)
2210 dev_set_promiscuity(slave_dev, -1);
2211
2212 /* unset allmulti level from slave */
2213 if (old_flags & IFF_ALLMULTI)
2214 dev_set_allmulti(slave_dev, -1);
2215
2216 bond_hw_addr_flush(bond_dev, slave_dev);
2217 }
2218
2219 slave_disable_netpoll(slave);
2220
2221 /* close slave before restoring its mac address */
2222 dev_close(slave_dev);
2223
2224 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
2225 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2226 /* restore original ("permanent") mac address */
2227 bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
2228 slave->dev->addr_len);
2229 ss.ss_family = slave_dev->type;
2230 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2231 }
2232
2233 if (unregister)
2234 __dev_set_mtu(slave_dev, slave->original_mtu);
2235 else
2236 dev_set_mtu(slave_dev, slave->original_mtu);
2237
2238 if (!netif_is_bond_master(slave_dev))
2239 slave_dev->priv_flags &= ~IFF_BONDING;
2240
2241 kobject_put(&slave->kobj);
2242
2243 return 0;
2244}
2245
2246/* A wrapper used because of ndo_del_link */
2247int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2248{
2249 return __bond_release_one(bond_dev, slave_dev, false, false);
2250}
2251
2252/* First release a slave and then destroy the bond if no more slaves are left.
2253 * Must be under rtnl_lock when this function is called.
2254 */
2255static int bond_release_and_destroy(struct net_device *bond_dev,
2256 struct net_device *slave_dev)
2257{
2258 struct bonding *bond = netdev_priv(bond_dev);
2259 int ret;
2260
2261 ret = __bond_release_one(bond_dev, slave_dev, false, true);
2262 if (ret == 0 && !bond_has_slaves(bond) &&
2263 bond_dev->reg_state != NETREG_UNREGISTERING) {
2264 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2265 netdev_info(bond_dev, "Destroying bond\n");
2266 bond_remove_proc_entry(bond);
2267 unregister_netdevice(bond_dev);
2268 }
2269 return ret;
2270}
2271
2272static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2273{
2274 struct bonding *bond = netdev_priv(bond_dev);
2275 bond_fill_ifbond(bond, info);
2276}
2277
2278static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
2279{
2280 struct bonding *bond = netdev_priv(bond_dev);
2281 struct list_head *iter;
2282 int i = 0, res = -ENODEV;
2283 struct slave *slave;
2284
2285 bond_for_each_slave(bond, slave, iter) {
2286 if (i++ == (int)info->slave_id) {
2287 res = 0;
2288 bond_fill_ifslave(slave, info);
2289 break;
2290 }
2291 }
2292
2293 return res;
2294}
2295
2296/*-------------------------------- Monitoring -------------------------------*/
2297
2298/* called with rcu_read_lock() */
2299static int bond_miimon_inspect(struct bonding *bond)
2300{
2301 int link_state, commit = 0;
2302 struct list_head *iter;
2303 struct slave *slave;
2304 bool ignore_updelay;
2305
2306 ignore_updelay = !rcu_dereference(bond->curr_active_slave);
2307
2308 bond_for_each_slave_rcu(bond, slave, iter) {
2309 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2310
2311 link_state = bond_check_dev_link(bond, slave->dev, 0);
2312
2313 switch (slave->link) {
2314 case BOND_LINK_UP:
2315 if (link_state)
2316 continue;
2317
2318 bond_propose_link_state(slave, BOND_LINK_FAIL);
2319 commit++;
2320 slave->delay = bond->params.downdelay;
2321 if (slave->delay) {
2322 slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
2323 (BOND_MODE(bond) ==
2324 BOND_MODE_ACTIVEBACKUP) ?
2325 (bond_is_active_slave(slave) ?
2326 "active " : "backup ") : "",
2327 bond->params.downdelay * bond->params.miimon);
2328 }
2329 fallthrough;
2330 case BOND_LINK_FAIL:
2331 if (link_state) {
2332 /* recovered before downdelay expired */
2333 bond_propose_link_state(slave, BOND_LINK_UP);
2334 slave->last_link_up = jiffies;
2335 slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
2336 (bond->params.downdelay - slave->delay) *
2337 bond->params.miimon);
2338 commit++;
2339 continue;
2340 }
2341
2342 if (slave->delay <= 0) {
2343 bond_propose_link_state(slave, BOND_LINK_DOWN);
2344 commit++;
2345 continue;
2346 }
2347
2348 slave->delay--;
2349 break;
2350
2351 case BOND_LINK_DOWN:
2352 if (!link_state)
2353 continue;
2354
2355 bond_propose_link_state(slave, BOND_LINK_BACK);
2356 commit++;
2357 slave->delay = bond->params.updelay;
2358
2359 if (slave->delay) {
2360 slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
2361 ignore_updelay ? 0 :
2362 bond->params.updelay *
2363 bond->params.miimon);
2364 }
2365 fallthrough;
2366 case BOND_LINK_BACK:
2367 if (!link_state) {
2368 bond_propose_link_state(slave, BOND_LINK_DOWN);
2369 slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
2370 (bond->params.updelay - slave->delay) *
2371 bond->params.miimon);
2372 commit++;
2373 continue;
2374 }
2375
2376 if (ignore_updelay)
2377 slave->delay = 0;
2378
2379 if (slave->delay <= 0) {
2380 bond_propose_link_state(slave, BOND_LINK_UP);
2381 commit++;
2382 ignore_updelay = false;
2383 continue;
2384 }
2385
2386 slave->delay--;
2387 break;
2388 }
2389 }
2390
2391 return commit;
2392}
2393
2394static void bond_miimon_link_change(struct bonding *bond,
2395 struct slave *slave,
2396 char link)
2397{
2398 switch (BOND_MODE(bond)) {
2399 case BOND_MODE_8023AD:
2400 bond_3ad_handle_link_change(slave, link);
2401 break;
2402 case BOND_MODE_TLB:
2403 case BOND_MODE_ALB:
2404 bond_alb_handle_link_change(bond, slave, link);
2405 break;
2406 case BOND_MODE_XOR:
2407 bond_update_slave_arr(bond, NULL);
2408 break;
2409 }
2410}
2411
2412static void bond_miimon_commit(struct bonding *bond)
2413{
2414 struct list_head *iter;
2415 struct slave *slave, *primary;
2416
2417 bond_for_each_slave(bond, slave, iter) {
2418 switch (slave->link_new_state) {
2419 case BOND_LINK_NOCHANGE:
2420 /* For 802.3ad mode, check current slave speed and
2421 * duplex again in case its port was disabled after
2422 * invalid speed/duplex reporting but recovered before
2423 * link monitoring could make a decision on the actual
2424 * link status
2425 */
2426 if (BOND_MODE(bond) == BOND_MODE_8023AD &&
2427 slave->link == BOND_LINK_UP)
2428 bond_3ad_adapter_speed_duplex_changed(slave);
2429 continue;
2430
2431 case BOND_LINK_UP:
2432 if (bond_update_speed_duplex(slave) &&
2433 bond_needs_speed_duplex(bond)) {
2434 slave->link = BOND_LINK_DOWN;
2435 if (net_ratelimit())
2436 slave_warn(bond->dev, slave->dev,
2437 "failed to get link speed/duplex\n");
2438 continue;
2439 }
2440 bond_set_slave_link_state(slave, BOND_LINK_UP,
2441 BOND_SLAVE_NOTIFY_NOW);
2442 slave->last_link_up = jiffies;
2443
2444 primary = rtnl_dereference(bond->primary_slave);
2445 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2446 /* prevent it from being the active one */
2447 bond_set_backup_slave(slave);
2448 } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2449 /* make it immediately active */
2450 bond_set_active_slave(slave);
2451 }
2452
2453 slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
2454 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2455 slave->duplex ? "full" : "half");
2456
2457 bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2458
2459 if (!bond->curr_active_slave || slave == primary)
2460 goto do_failover;
2461
2462 continue;
2463
2464 case BOND_LINK_DOWN:
2465 if (slave->link_failure_count < UINT_MAX)
2466 slave->link_failure_count++;
2467
2468 bond_set_slave_link_state(slave, BOND_LINK_DOWN,
2469 BOND_SLAVE_NOTIFY_NOW);
2470
2471 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
2472 BOND_MODE(bond) == BOND_MODE_8023AD)
2473 bond_set_slave_inactive_flags(slave,
2474 BOND_SLAVE_NOTIFY_NOW);
2475
2476 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
2477
2478 bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2479
2480 if (slave == rcu_access_pointer(bond->curr_active_slave))
2481 goto do_failover;
2482
2483 continue;
2484
2485 default:
2486 slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
2487 slave->link_new_state);
2488 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2489
2490 continue;
2491 }
2492
2493do_failover:
2494 block_netpoll_tx();
2495 bond_select_active_slave(bond);
2496 unblock_netpoll_tx();
2497 }
2498
2499 bond_set_carrier(bond);
2500}
2501
2502/* bond_mii_monitor
2503 *
2504 * Really a wrapper that splits the mii monitor into two phases: an
2505 * inspection, then (if inspection indicates something needs to be done)
2506 * an acquisition of appropriate locks followed by a commit phase to
2507 * implement whatever link state changes are indicated.
2508 */
2509static void bond_mii_monitor(struct work_struct *work)
2510{
2511 struct bonding *bond = container_of(work, struct bonding,
2512 mii_work.work);
2513 bool should_notify_peers = false;
2514 bool commit;
2515 unsigned long delay;
2516 struct slave *slave;
2517 struct list_head *iter;
2518
2519 delay = msecs_to_jiffies(bond->params.miimon);
2520
2521 if (!bond_has_slaves(bond))
2522 goto re_arm;
2523
2524 rcu_read_lock();
2525 should_notify_peers = bond_should_notify_peers(bond);
2526 commit = !!bond_miimon_inspect(bond);
2527 if (bond->send_peer_notif) {
2528 rcu_read_unlock();
2529 if (rtnl_trylock()) {
2530 bond->send_peer_notif--;
2531 rtnl_unlock();
2532 }
2533 } else {
2534 rcu_read_unlock();
2535 }
2536
2537 if (commit) {
2538 /* Race avoidance with bond_close cancel of workqueue */
2539 if (!rtnl_trylock()) {
2540 delay = 1;
2541 should_notify_peers = false;
2542 goto re_arm;
2543 }
2544
2545 bond_for_each_slave(bond, slave, iter) {
2546 bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
2547 }
2548 bond_miimon_commit(bond);
2549
2550 rtnl_unlock(); /* might sleep, hold no other locks */
2551 }
2552
2553re_arm:
2554 if (bond->params.miimon)
2555 queue_delayed_work(bond->wq, &bond->mii_work, delay);
2556
2557 if (should_notify_peers) {
2558 if (!rtnl_trylock())
2559 return;
2560 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2561 rtnl_unlock();
2562 }
2563}
2564
2565static int bond_upper_dev_walk(struct net_device *upper,
2566 struct netdev_nested_priv *priv)
2567{
2568 __be32 ip = *(__be32 *)priv->data;
2569
2570 return ip == bond_confirm_addr(upper, 0, ip);
2571}
2572
2573static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2574{
2575 struct netdev_nested_priv priv = {
2576 .data = (void *)&ip,
2577 };
2578 bool ret = false;
2579
2580 if (ip == bond_confirm_addr(bond->dev, 0, ip))
2581 return true;
2582
2583 rcu_read_lock();
2584 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv))
2585 ret = true;
2586 rcu_read_unlock();
2587
2588 return ret;
2589}
2590
2591/* We go to the (large) trouble of VLAN tagging ARP frames because
2592 * switches in VLAN mode (especially if ports are configured as
2593 * "native" to a VLAN) might not pass non-tagged frames.
2594 */
2595static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
2596 __be32 src_ip, struct bond_vlan_tag *tags)
2597{
2598 struct sk_buff *skb;
2599 struct bond_vlan_tag *outer_tag = tags;
2600 struct net_device *slave_dev = slave->dev;
2601 struct net_device *bond_dev = slave->bond->dev;
2602
2603 slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
2604 arp_op, &dest_ip, &src_ip);
2605
2606 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
2607 NULL, slave_dev->dev_addr, NULL);
2608
2609 if (!skb) {
2610 net_err_ratelimited("ARP packet allocation failed\n");
2611 return;
2612 }
2613
2614 if (!tags || tags->vlan_proto == VLAN_N_VID)
2615 goto xmit;
2616
2617 tags++;
2618
2619 /* Go through all the tags backwards and add them to the packet */
2620 while (tags->vlan_proto != VLAN_N_VID) {
2621 if (!tags->vlan_id) {
2622 tags++;
2623 continue;
2624 }
2625
2626 slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n",
2627 ntohs(outer_tag->vlan_proto), tags->vlan_id);
2628 skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
2629 tags->vlan_id);
2630 if (!skb) {
2631 net_err_ratelimited("failed to insert inner VLAN tag\n");
2632 return;
2633 }
2634
2635 tags++;
2636 }
2637 /* Set the outer tag */
2638 if (outer_tag->vlan_id) {
2639 slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n",
2640 ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
2641 __vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
2642 outer_tag->vlan_id);
2643 }
2644
2645xmit:
2646 arp_xmit(skb);
2647}
2648
2649/* Validate the device path between the @start_dev and the @end_dev.
2650 * The path is valid if the @end_dev is reachable through device
2651 * stacking.
2652 * When the path is validated, collect any vlan information in the
2653 * path.
2654 */
2655struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
2656 struct net_device *end_dev,
2657 int level)
2658{
2659 struct bond_vlan_tag *tags;
2660 struct net_device *upper;
2661 struct list_head *iter;
2662
2663 if (start_dev == end_dev) {
2664 tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2665 if (!tags)
2666 return ERR_PTR(-ENOMEM);
2667 tags[level].vlan_proto = VLAN_N_VID;
2668 return tags;
2669 }
2670
2671 netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2672 tags = bond_verify_device_path(upper, end_dev, level + 1);
2673 if (IS_ERR_OR_NULL(tags)) {
2674 if (IS_ERR(tags))
2675 return tags;
2676 continue;
2677 }
2678 if (is_vlan_dev(upper)) {
2679 tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
2680 tags[level].vlan_id = vlan_dev_vlan_id(upper);
2681 }
2682
2683 return tags;
2684 }
2685
2686 return NULL;
2687}
2688
2689static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2690{
2691 struct rtable *rt;
2692 struct bond_vlan_tag *tags;
2693 __be32 *targets = bond->params.arp_targets, addr;
2694 int i;
2695
2696 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
2697 slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n",
2698 __func__, &targets[i]);
2699 tags = NULL;
2700
2701 /* Find out through which dev should the packet go */
2702 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
2703 RTO_ONLINK, 0);
2704 if (IS_ERR(rt)) {
2705 /* there's no route to target - try to send arp
2706 * probe to generate any traffic (arp_validate=0)
2707 */
2708 if (bond->params.arp_validate)
2709 net_warn_ratelimited("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
2710 bond->dev->name,
2711 &targets[i]);
2712 bond_arp_send(slave, ARPOP_REQUEST, targets[i],
2713 0, tags);
2714 continue;
2715 }
2716
2717 /* bond device itself */
2718 if (rt->dst.dev == bond->dev)
2719 goto found;
2720
2721 rcu_read_lock();
2722 tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
2723 rcu_read_unlock();
2724
2725 if (!IS_ERR_OR_NULL(tags))
2726 goto found;
2727
2728 /* Not our device - skip */
2729 slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
2730 &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
2731
2732 ip_rt_put(rt);
2733 continue;
2734
2735found:
2736 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
2737 ip_rt_put(rt);
2738 bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags);
2739 kfree(tags);
2740 }
2741}
2742
2743static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
2744{
2745 int i;
2746
2747 if (!sip || !bond_has_this_ip(bond, tip)) {
2748 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n",
2749 __func__, &sip, &tip);
2750 return;
2751 }
2752
2753 i = bond_get_targets_ip(bond->params.arp_targets, sip);
2754 if (i == -1) {
2755 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n",
2756 __func__, &sip);
2757 return;
2758 }
2759 slave->last_rx = jiffies;
2760 slave->target_last_arp_rx[i] = jiffies;
2761}
2762
2763int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2764 struct slave *slave)
2765{
2766 struct arphdr *arp = (struct arphdr *)skb->data;
2767 struct slave *curr_active_slave, *curr_arp_slave;
2768 unsigned char *arp_ptr;
2769 __be32 sip, tip;
2770 int is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
2771 unsigned int alen;
2772
2773 if (!slave_do_arp_validate(bond, slave)) {
2774 if ((slave_do_arp_validate_only(bond) && is_arp) ||
2775 !slave_do_arp_validate_only(bond))
2776 slave->last_rx = jiffies;
2777 return RX_HANDLER_ANOTHER;
2778 } else if (!is_arp) {
2779 return RX_HANDLER_ANOTHER;
2780 }
2781
2782 alen = arp_hdr_len(bond->dev);
2783
2784 slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
2785 __func__, skb->dev->name);
2786
2787 if (alen > skb_headlen(skb)) {
2788 arp = kmalloc(alen, GFP_ATOMIC);
2789 if (!arp)
2790 goto out_unlock;
2791 if (skb_copy_bits(skb, 0, arp, alen) < 0)
2792 goto out_unlock;
2793 }
2794
2795 if (arp->ar_hln != bond->dev->addr_len ||
2796 skb->pkt_type == PACKET_OTHERHOST ||
2797 skb->pkt_type == PACKET_LOOPBACK ||
2798 arp->ar_hrd != htons(ARPHRD_ETHER) ||
2799 arp->ar_pro != htons(ETH_P_IP) ||
2800 arp->ar_pln != 4)
2801 goto out_unlock;
2802
2803 arp_ptr = (unsigned char *)(arp + 1);
2804 arp_ptr += bond->dev->addr_len;
2805 memcpy(&sip, arp_ptr, 4);
2806 arp_ptr += 4 + bond->dev->addr_len;
2807 memcpy(&tip, arp_ptr, 4);
2808
2809 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
2810 __func__, slave->dev->name, bond_slave_state(slave),
2811 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
2812 &sip, &tip);
2813
2814 curr_active_slave = rcu_dereference(bond->curr_active_slave);
2815 curr_arp_slave = rcu_dereference(bond->current_arp_slave);
2816
2817 /* We 'trust' the received ARP enough to validate it if:
2818 *
2819 * (a) the slave receiving the ARP is active (which includes the
2820 * current ARP slave, if any), or
2821 *
2822 * (b) the receiving slave isn't active, but there is a currently
2823 * active slave and it received valid arp reply(s) after it became
2824 * the currently active slave, or
2825 *
2826 * (c) there is an ARP slave that sent an ARP during the prior ARP
2827 * interval, and we receive an ARP reply on any slave. We accept
2828 * these because switch FDB update delays may deliver the ARP
2829 * reply to a slave other than the sender of the ARP request.
2830 *
2831 * Note: for (b), backup slaves are receiving the broadcast ARP
2832 * request, not a reply. This request passes from the sending
2833 * slave through the L2 switch(es) to the receiving slave. Since
2834 * this is checking the request, sip/tip are swapped for
2835 * validation.
2836 *
2837 * This is done to avoid endless looping when we can't reach the
2838 * arp_ip_target and fool ourselves with our own arp requests.
2839 */
2840 if (bond_is_active_slave(slave))
2841 bond_validate_arp(bond, slave, sip, tip);
2842 else if (curr_active_slave &&
2843 time_after(slave_last_rx(bond, curr_active_slave),
2844 curr_active_slave->last_link_up))
2845 bond_validate_arp(bond, slave, tip, sip);
2846 else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
2847 bond_time_in_interval(bond,
2848 dev_trans_start(curr_arp_slave->dev), 1))
2849 bond_validate_arp(bond, slave, sip, tip);
2850
2851out_unlock:
2852 if (arp != (struct arphdr *)skb->data)
2853 kfree(arp);
2854 return RX_HANDLER_ANOTHER;
2855}
2856
2857/* function to verify if we're in the arp_interval timeslice, returns true if
2858 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
2859 * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
2860 */
2861static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
2862 int mod)
2863{
2864 int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
2865
2866 return time_in_range(jiffies,
2867 last_act - delta_in_ticks,
2868 last_act + mod * delta_in_ticks + delta_in_ticks/2);
2869}
2870
2871/* This function is called regularly to monitor each slave's link
2872 * ensuring that traffic is being sent and received when arp monitoring
2873 * is used in load-balancing mode. if the adapter has been dormant, then an
2874 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
2875 * arp monitoring in active backup mode.
2876 */
2877static void bond_loadbalance_arp_mon(struct bonding *bond)
2878{
2879 struct slave *slave, *oldcurrent;
2880 struct list_head *iter;
2881 int do_failover = 0, slave_state_changed = 0;
2882
2883 if (!bond_has_slaves(bond))
2884 goto re_arm;
2885
2886 rcu_read_lock();
2887
2888 oldcurrent = rcu_dereference(bond->curr_active_slave);
2889 /* see if any of the previous devices are up now (i.e. they have
2890 * xmt and rcv traffic). the curr_active_slave does not come into
2891 * the picture unless it is null. also, slave->last_link_up is not
2892 * needed here because we send an arp on each slave and give a slave
2893 * as long as it needs to get the tx/rx within the delta.
2894 * TODO: what about up/down delay in arp mode? it wasn't here before
2895 * so it can wait
2896 */
2897 bond_for_each_slave_rcu(bond, slave, iter) {
2898 unsigned long trans_start = dev_trans_start(slave->dev);
2899
2900 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2901
2902 if (slave->link != BOND_LINK_UP) {
2903 if (bond_time_in_interval(bond, trans_start, 1) &&
2904 bond_time_in_interval(bond, slave->last_rx, 1)) {
2905
2906 bond_propose_link_state(slave, BOND_LINK_UP);
2907 slave_state_changed = 1;
2908
2909 /* primary_slave has no meaning in round-robin
2910 * mode. the window of a slave being up and
2911 * curr_active_slave being null after enslaving
2912 * is closed.
2913 */
2914 if (!oldcurrent) {
2915 slave_info(bond->dev, slave->dev, "link status definitely up\n");
2916 do_failover = 1;
2917 } else {
2918 slave_info(bond->dev, slave->dev, "interface is now up\n");
2919 }
2920 }
2921 } else {
2922 /* slave->link == BOND_LINK_UP */
2923
2924 /* not all switches will respond to an arp request
2925 * when the source ip is 0, so don't take the link down
2926 * if we don't know our ip yet
2927 */
2928 if (!bond_time_in_interval(bond, trans_start, 2) ||
2929 !bond_time_in_interval(bond, slave->last_rx, 2)) {
2930
2931 bond_propose_link_state(slave, BOND_LINK_DOWN);
2932 slave_state_changed = 1;
2933
2934 if (slave->link_failure_count < UINT_MAX)
2935 slave->link_failure_count++;
2936
2937 slave_info(bond->dev, slave->dev, "interface is now down\n");
2938
2939 if (slave == oldcurrent)
2940 do_failover = 1;
2941 }
2942 }
2943
2944 /* note: if switch is in round-robin mode, all links
2945 * must tx arp to ensure all links rx an arp - otherwise
2946 * links may oscillate or not come up at all; if switch is
2947 * in something like xor mode, there is nothing we can
2948 * do - all replies will be rx'ed on same link causing slaves
2949 * to be unstable during low/no traffic periods
2950 */
2951 if (bond_slave_is_up(slave))
2952 bond_arp_send_all(bond, slave);
2953 }
2954
2955 rcu_read_unlock();
2956
2957 if (do_failover || slave_state_changed) {
2958 if (!rtnl_trylock())
2959 goto re_arm;
2960
2961 bond_for_each_slave(bond, slave, iter) {
2962 if (slave->link_new_state != BOND_LINK_NOCHANGE)
2963 slave->link = slave->link_new_state;
2964 }
2965
2966 if (slave_state_changed) {
2967 bond_slave_state_change(bond);
2968 if (BOND_MODE(bond) == BOND_MODE_XOR)
2969 bond_update_slave_arr(bond, NULL);
2970 }
2971 if (do_failover) {
2972 block_netpoll_tx();
2973 bond_select_active_slave(bond);
2974 unblock_netpoll_tx();
2975 }
2976 rtnl_unlock();
2977 }
2978
2979re_arm:
2980 if (bond->params.arp_interval)
2981 queue_delayed_work(bond->wq, &bond->arp_work,
2982 msecs_to_jiffies(bond->params.arp_interval));
2983}
2984
2985/* Called to inspect slaves for active-backup mode ARP monitor link state
2986 * changes. Sets proposed link state in slaves to specify what action
2987 * should take place for the slave. Returns 0 if no changes are found, >0
2988 * if changes to link states must be committed.
2989 *
2990 * Called with rcu_read_lock held.
2991 */
2992static int bond_ab_arp_inspect(struct bonding *bond)
2993{
2994 unsigned long trans_start, last_rx;
2995 struct list_head *iter;
2996 struct slave *slave;
2997 int commit = 0;
2998
2999 bond_for_each_slave_rcu(bond, slave, iter) {
3000 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3001 last_rx = slave_last_rx(bond, slave);
3002
3003 if (slave->link != BOND_LINK_UP) {
3004 if (bond_time_in_interval(bond, last_rx, 1)) {
3005 bond_propose_link_state(slave, BOND_LINK_UP);
3006 commit++;
3007 } else if (slave->link == BOND_LINK_BACK) {
3008 bond_propose_link_state(slave, BOND_LINK_FAIL);
3009 commit++;
3010 }
3011 continue;
3012 }
3013
3014 /* Give slaves 2*delta after being enslaved or made
3015 * active. This avoids bouncing, as the last receive
3016 * times need a full ARP monitor cycle to be updated.
3017 */
3018 if (bond_time_in_interval(bond, slave->last_link_up, 2))
3019 continue;
3020
3021 /* Backup slave is down if:
3022 * - No current_arp_slave AND
3023 * - more than 3*delta since last receive AND
3024 * - the bond has an IP address
3025 *
3026 * Note: a non-null current_arp_slave indicates
3027 * the curr_active_slave went down and we are
3028 * searching for a new one; under this condition
3029 * we only take the curr_active_slave down - this
3030 * gives each slave a chance to tx/rx traffic
3031 * before being taken out
3032 */
3033 if (!bond_is_active_slave(slave) &&
3034 !rcu_access_pointer(bond->current_arp_slave) &&
3035 !bond_time_in_interval(bond, last_rx, 3)) {
3036 bond_propose_link_state(slave, BOND_LINK_DOWN);
3037 commit++;
3038 }
3039
3040 /* Active slave is down if:
3041 * - more than 2*delta since transmitting OR
3042 * - (more than 2*delta since receive AND
3043 * the bond has an IP address)
3044 */
3045 trans_start = dev_trans_start(slave->dev);
3046 if (bond_is_active_slave(slave) &&
3047 (!bond_time_in_interval(bond, trans_start, 2) ||
3048 !bond_time_in_interval(bond, last_rx, 2))) {
3049 bond_propose_link_state(slave, BOND_LINK_DOWN);
3050 commit++;
3051 }
3052 }
3053
3054 return commit;
3055}
3056
3057/* Called to commit link state changes noted by inspection step of
3058 * active-backup mode ARP monitor.
3059 *
3060 * Called with RTNL hold.
3061 */
3062static void bond_ab_arp_commit(struct bonding *bond)
3063{
3064 unsigned long trans_start;
3065 struct list_head *iter;
3066 struct slave *slave;
3067
3068 bond_for_each_slave(bond, slave, iter) {
3069 switch (slave->link_new_state) {
3070 case BOND_LINK_NOCHANGE:
3071 continue;
3072
3073 case BOND_LINK_UP:
3074 trans_start = dev_trans_start(slave->dev);
3075 if (rtnl_dereference(bond->curr_active_slave) != slave ||
3076 (!rtnl_dereference(bond->curr_active_slave) &&
3077 bond_time_in_interval(bond, trans_start, 1))) {
3078 struct slave *current_arp_slave;
3079
3080 current_arp_slave = rtnl_dereference(bond->current_arp_slave);
3081 bond_set_slave_link_state(slave, BOND_LINK_UP,
3082 BOND_SLAVE_NOTIFY_NOW);
3083 if (current_arp_slave) {
3084 bond_set_slave_inactive_flags(
3085 current_arp_slave,
3086 BOND_SLAVE_NOTIFY_NOW);
3087 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3088 }
3089
3090 slave_info(bond->dev, slave->dev, "link status definitely up\n");
3091
3092 if (!rtnl_dereference(bond->curr_active_slave) ||
3093 slave == rtnl_dereference(bond->primary_slave))
3094 goto do_failover;
3095
3096 }
3097
3098 continue;
3099
3100 case BOND_LINK_DOWN:
3101 if (slave->link_failure_count < UINT_MAX)
3102 slave->link_failure_count++;
3103
3104 bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3105 BOND_SLAVE_NOTIFY_NOW);
3106 bond_set_slave_inactive_flags(slave,
3107 BOND_SLAVE_NOTIFY_NOW);
3108
3109 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
3110
3111 if (slave == rtnl_dereference(bond->curr_active_slave)) {
3112 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3113 goto do_failover;
3114 }
3115
3116 continue;
3117
3118 case BOND_LINK_FAIL:
3119 bond_set_slave_link_state(slave, BOND_LINK_FAIL,
3120 BOND_SLAVE_NOTIFY_NOW);
3121 bond_set_slave_inactive_flags(slave,
3122 BOND_SLAVE_NOTIFY_NOW);
3123
3124 /* A slave has just been enslaved and has become
3125 * the current active slave.
3126 */
3127 if (rtnl_dereference(bond->curr_active_slave))
3128 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3129 continue;
3130
3131 default:
3132 slave_err(bond->dev, slave->dev,
3133 "impossible: link_new_state %d on slave\n",
3134 slave->link_new_state);
3135 continue;
3136 }
3137
3138do_failover:
3139 block_netpoll_tx();
3140 bond_select_active_slave(bond);
3141 unblock_netpoll_tx();
3142 }
3143
3144 bond_set_carrier(bond);
3145}
3146
3147/* Send ARP probes for active-backup mode ARP monitor.
3148 *
3149 * Called with rcu_read_lock held.
3150 */
3151static bool bond_ab_arp_probe(struct bonding *bond)
3152{
3153 struct slave *slave, *before = NULL, *new_slave = NULL,
3154 *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
3155 *curr_active_slave = rcu_dereference(bond->curr_active_slave);
3156 struct list_head *iter;
3157 bool found = false;
3158 bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
3159
3160 if (curr_arp_slave && curr_active_slave)
3161 netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
3162 curr_arp_slave->dev->name,
3163 curr_active_slave->dev->name);
3164
3165 if (curr_active_slave) {
3166 bond_arp_send_all(bond, curr_active_slave);
3167 return should_notify_rtnl;
3168 }
3169
3170 /* if we don't have a curr_active_slave, search for the next available
3171 * backup slave from the current_arp_slave and make it the candidate
3172 * for becoming the curr_active_slave
3173 */
3174
3175 if (!curr_arp_slave) {
3176 curr_arp_slave = bond_first_slave_rcu(bond);
3177 if (!curr_arp_slave)
3178 return should_notify_rtnl;
3179 }
3180
3181 bond_for_each_slave_rcu(bond, slave, iter) {
3182 if (!found && !before && bond_slave_is_up(slave))
3183 before = slave;
3184
3185 if (found && !new_slave && bond_slave_is_up(slave))
3186 new_slave = slave;
3187 /* if the link state is up at this point, we
3188 * mark it down - this can happen if we have
3189 * simultaneous link failures and
3190 * reselect_active_interface doesn't make this
3191 * one the current slave so it is still marked
3192 * up when it is actually down
3193 */
3194 if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
3195 bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3196 BOND_SLAVE_NOTIFY_LATER);
3197 if (slave->link_failure_count < UINT_MAX)
3198 slave->link_failure_count++;
3199
3200 bond_set_slave_inactive_flags(slave,
3201 BOND_SLAVE_NOTIFY_LATER);
3202
3203 slave_info(bond->dev, slave->dev, "backup interface is now down\n");
3204 }
3205 if (slave == curr_arp_slave)
3206 found = true;
3207 }
3208
3209 if (!new_slave && before)
3210 new_slave = before;
3211
3212 if (!new_slave)
3213 goto check_state;
3214
3215 bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
3216 BOND_SLAVE_NOTIFY_LATER);
3217 bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
3218 bond_arp_send_all(bond, new_slave);
3219 new_slave->last_link_up = jiffies;
3220 rcu_assign_pointer(bond->current_arp_slave, new_slave);
3221
3222check_state:
3223 bond_for_each_slave_rcu(bond, slave, iter) {
3224 if (slave->should_notify || slave->should_notify_link) {
3225 should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
3226 break;
3227 }
3228 }
3229 return should_notify_rtnl;
3230}
3231
3232static void bond_activebackup_arp_mon(struct bonding *bond)
3233{
3234 bool should_notify_peers = false;
3235 bool should_notify_rtnl = false;
3236 int delta_in_ticks;
3237
3238 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3239
3240 if (!bond_has_slaves(bond))
3241 goto re_arm;
3242
3243 rcu_read_lock();
3244
3245 should_notify_peers = bond_should_notify_peers(bond);
3246
3247 if (bond_ab_arp_inspect(bond)) {
3248 rcu_read_unlock();
3249
3250 /* Race avoidance with bond_close flush of workqueue */
3251 if (!rtnl_trylock()) {
3252 delta_in_ticks = 1;
3253 should_notify_peers = false;
3254 goto re_arm;
3255 }
3256
3257 bond_ab_arp_commit(bond);
3258
3259 rtnl_unlock();
3260 rcu_read_lock();
3261 }
3262
3263 should_notify_rtnl = bond_ab_arp_probe(bond);
3264 rcu_read_unlock();
3265
3266re_arm:
3267 if (bond->params.arp_interval)
3268 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
3269
3270 if (should_notify_peers || should_notify_rtnl) {
3271 if (!rtnl_trylock())
3272 return;
3273
3274 if (should_notify_peers)
3275 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
3276 bond->dev);
3277 if (should_notify_rtnl) {
3278 bond_slave_state_notify(bond);
3279 bond_slave_link_notify(bond);
3280 }
3281
3282 rtnl_unlock();
3283 }
3284}
3285
3286static void bond_arp_monitor(struct work_struct *work)
3287{
3288 struct bonding *bond = container_of(work, struct bonding,
3289 arp_work.work);
3290
3291 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3292 bond_activebackup_arp_mon(bond);
3293 else
3294 bond_loadbalance_arp_mon(bond);
3295}
3296
3297/*-------------------------- netdev event handling --------------------------*/
3298
3299/* Change device name */
3300static int bond_event_changename(struct bonding *bond)
3301{
3302 bond_remove_proc_entry(bond);
3303 bond_create_proc_entry(bond);
3304
3305 bond_debug_reregister(bond);
3306
3307 return NOTIFY_DONE;
3308}
3309
3310static int bond_master_netdev_event(unsigned long event,
3311 struct net_device *bond_dev)
3312{
3313 struct bonding *event_bond = netdev_priv(bond_dev);
3314
3315 netdev_dbg(bond_dev, "%s called\n", __func__);
3316
3317 switch (event) {
3318 case NETDEV_CHANGENAME:
3319 return bond_event_changename(event_bond);
3320 case NETDEV_UNREGISTER:
3321 bond_remove_proc_entry(event_bond);
3322 break;
3323 case NETDEV_REGISTER:
3324 bond_create_proc_entry(event_bond);
3325 break;
3326 default:
3327 break;
3328 }
3329
3330 return NOTIFY_DONE;
3331}
3332
3333static int bond_slave_netdev_event(unsigned long event,
3334 struct net_device *slave_dev)
3335{
3336 struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3337 struct bonding *bond;
3338 struct net_device *bond_dev;
3339
3340 /* A netdev event can be generated while enslaving a device
3341 * before netdev_rx_handler_register is called in which case
3342 * slave will be NULL
3343 */
3344 if (!slave) {
3345 netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__);
3346 return NOTIFY_DONE;
3347 }
3348
3349 bond_dev = slave->bond->dev;
3350 bond = slave->bond;
3351 primary = rtnl_dereference(bond->primary_slave);
3352
3353 slave_dbg(bond_dev, slave_dev, "%s called\n", __func__);
3354
3355 switch (event) {
3356 case NETDEV_UNREGISTER:
3357 if (bond_dev->type != ARPHRD_ETHER)
3358 bond_release_and_destroy(bond_dev, slave_dev);
3359 else
3360 __bond_release_one(bond_dev, slave_dev, false, true);
3361 break;
3362 case NETDEV_UP:
3363 case NETDEV_CHANGE:
3364 /* For 802.3ad mode only:
3365 * Getting invalid Speed/Duplex values here will put slave
3366 * in weird state. Mark it as link-fail if the link was
3367 * previously up or link-down if it hasn't yet come up, and
3368 * let link-monitoring (miimon) set it right when correct
3369 * speeds/duplex are available.
3370 */
3371 if (bond_update_speed_duplex(slave) &&
3372 BOND_MODE(bond) == BOND_MODE_8023AD) {
3373 if (slave->last_link_up)
3374 slave->link = BOND_LINK_FAIL;
3375 else
3376 slave->link = BOND_LINK_DOWN;
3377 }
3378
3379 if (BOND_MODE(bond) == BOND_MODE_8023AD)
3380 bond_3ad_adapter_speed_duplex_changed(slave);
3381 fallthrough;
3382 case NETDEV_DOWN:
3383 /* Refresh slave-array if applicable!
3384 * If the setup does not use miimon or arpmon (mode-specific!),
3385 * then these events will not cause the slave-array to be
3386 * refreshed. This will cause xmit to use a slave that is not
3387 * usable. Avoid such situation by refeshing the array at these
3388 * events. If these (miimon/arpmon) parameters are configured
3389 * then array gets refreshed twice and that should be fine!
3390 */
3391 if (bond_mode_can_use_xmit_hash(bond))
3392 bond_update_slave_arr(bond, NULL);
3393 break;
3394 case NETDEV_CHANGEMTU:
3395 /* TODO: Should slaves be allowed to
3396 * independently alter their MTU? For
3397 * an active-backup bond, slaves need
3398 * not be the same type of device, so
3399 * MTUs may vary. For other modes,
3400 * slaves arguably should have the
3401 * same MTUs. To do this, we'd need to
3402 * take over the slave's change_mtu
3403 * function for the duration of their
3404 * servitude.
3405 */
3406 break;
3407 case NETDEV_CHANGENAME:
3408 /* we don't care if we don't have primary set */
3409 if (!bond_uses_primary(bond) ||
3410 !bond->params.primary[0])
3411 break;
3412
3413 if (slave == primary) {
3414 /* slave's name changed - he's no longer primary */
3415 RCU_INIT_POINTER(bond->primary_slave, NULL);
3416 } else if (!strcmp(slave_dev->name, bond->params.primary)) {
3417 /* we have a new primary slave */
3418 rcu_assign_pointer(bond->primary_slave, slave);
3419 } else { /* we didn't change primary - exit */
3420 break;
3421 }
3422
3423 netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3424 primary ? slave_dev->name : "none");
3425
3426 block_netpoll_tx();
3427 bond_select_active_slave(bond);
3428 unblock_netpoll_tx();
3429 break;
3430 case NETDEV_FEAT_CHANGE:
3431 bond_compute_features(bond);
3432 break;
3433 case NETDEV_RESEND_IGMP:
3434 /* Propagate to master device */
3435 call_netdevice_notifiers(event, slave->bond->dev);
3436 break;
3437 default:
3438 break;
3439 }
3440
3441 return NOTIFY_DONE;
3442}
3443
3444/* bond_netdev_event: handle netdev notifier chain events.
3445 *
3446 * This function receives events for the netdev chain. The caller (an
3447 * ioctl handler calling blocking_notifier_call_chain) holds the necessary
3448 * locks for us to safely manipulate the slave devices (RTNL lock,
3449 * dev_probe_lock).
3450 */
3451static int bond_netdev_event(struct notifier_block *this,
3452 unsigned long event, void *ptr)
3453{
3454 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
3455
3456 netdev_dbg(event_dev, "%s received %s\n",
3457 __func__, netdev_cmd_to_name(event));
3458
3459 if (!(event_dev->priv_flags & IFF_BONDING))
3460 return NOTIFY_DONE;
3461
3462 if (event_dev->flags & IFF_MASTER) {
3463 int ret;
3464
3465 ret = bond_master_netdev_event(event, event_dev);
3466 if (ret != NOTIFY_DONE)
3467 return ret;
3468 }
3469
3470 if (event_dev->flags & IFF_SLAVE)
3471 return bond_slave_netdev_event(event, event_dev);
3472
3473 return NOTIFY_DONE;
3474}
3475
3476static struct notifier_block bond_netdev_notifier = {
3477 .notifier_call = bond_netdev_event,
3478};
3479
3480/*---------------------------- Hashing Policies -----------------------------*/
3481
3482/* L2 hash helper */
3483static inline u32 bond_eth_hash(struct sk_buff *skb)
3484{
3485 struct ethhdr *ep, hdr_tmp;
3486
3487 ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp);
3488 if (ep)
3489 return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto;
3490 return 0;
3491}
3492
3493static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk,
3494 int *noff, int *proto, bool l34)
3495{
3496 const struct ipv6hdr *iph6;
3497 const struct iphdr *iph;
3498
3499 if (skb->protocol == htons(ETH_P_IP)) {
3500 if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph))))
3501 return false;
3502 iph = (const struct iphdr *)(skb->data + *noff);
3503 iph_to_flow_copy_v4addrs(fk, iph);
3504 *noff += iph->ihl << 2;
3505 if (!ip_is_fragment(iph))
3506 *proto = iph->protocol;
3507 } else if (skb->protocol == htons(ETH_P_IPV6)) {
3508 if (unlikely(!pskb_may_pull(skb, *noff + sizeof(*iph6))))
3509 return false;
3510 iph6 = (const struct ipv6hdr *)(skb->data + *noff);
3511 iph_to_flow_copy_v6addrs(fk, iph6);
3512 *noff += sizeof(*iph6);
3513 *proto = iph6->nexthdr;
3514 } else {
3515 return false;
3516 }
3517
3518 if (l34 && *proto >= 0)
3519 fk->ports.ports = skb_flow_get_ports(skb, *noff, *proto);
3520
3521 return true;
3522}
3523
3524static u32 bond_vlan_srcmac_hash(struct sk_buff *skb)
3525{
3526 struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
3527 u32 srcmac_vendor = 0, srcmac_dev = 0;
3528 u16 vlan;
3529 int i;
3530
3531 for (i = 0; i < 3; i++)
3532 srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
3533
3534 for (i = 3; i < ETH_ALEN; i++)
3535 srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];
3536
3537 if (!skb_vlan_tag_present(skb))
3538 return srcmac_vendor ^ srcmac_dev;
3539
3540 vlan = skb_vlan_tag_get(skb);
3541
3542 return vlan ^ srcmac_vendor ^ srcmac_dev;
3543}
3544
3545/* Extract the appropriate headers based on bond's xmit policy */
3546static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb,
3547 struct flow_keys *fk)
3548{
3549 bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
3550 int noff, proto = -1;
3551
3552 switch (bond->params.xmit_policy) {
3553 case BOND_XMIT_POLICY_ENCAP23:
3554 case BOND_XMIT_POLICY_ENCAP34:
3555 memset(fk, 0, sizeof(*fk));
3556 return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
3557 fk, NULL, 0, 0, 0, 0);
3558 default:
3559 break;
3560 }
3561
3562 fk->ports.ports = 0;
3563 memset(&fk->icmp, 0, sizeof(fk->icmp));
3564 noff = skb_network_offset(skb);
3565 if (!bond_flow_ip(skb, fk, &noff, &proto, l34))
3566 return false;
3567
3568 /* ICMP error packets contains at least 8 bytes of the header
3569 * of the packet which generated the error. Use this information
3570 * to correlate ICMP error packets within the same flow which
3571 * generated the error.
3572 */
3573 if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
3574 skb_flow_get_icmp_tci(skb, &fk->icmp, skb->data,
3575 skb_transport_offset(skb),
3576 skb_headlen(skb));
3577 if (proto == IPPROTO_ICMP) {
3578 if (!icmp_is_err(fk->icmp.type))
3579 return true;
3580
3581 noff += sizeof(struct icmphdr);
3582 } else if (proto == IPPROTO_ICMPV6) {
3583 if (!icmpv6_is_err(fk->icmp.type))
3584 return true;
3585
3586 noff += sizeof(struct icmp6hdr);
3587 }
3588 return bond_flow_ip(skb, fk, &noff, &proto, l34);
3589 }
3590
3591 return true;
3592}
3593
3594static u32 bond_ip_hash(u32 hash, struct flow_keys *flow)
3595{
3596 hash ^= (__force u32)flow_get_u32_dst(flow) ^
3597 (__force u32)flow_get_u32_src(flow);
3598 hash ^= (hash >> 16);
3599 hash ^= (hash >> 8);
3600 /* discard lowest hash bit to deal with the common even ports pattern */
3601 return hash >> 1;
3602}
3603
3604/**
3605 * bond_xmit_hash - generate a hash value based on the xmit policy
3606 * @bond: bonding device
3607 * @skb: buffer to use for headers
3608 *
3609 * This function will extract the necessary headers from the skb buffer and use
3610 * them to generate a hash based on the xmit_policy set in the bonding device
3611 */
3612u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
3613{
3614 struct flow_keys flow;
3615 u32 hash;
3616
3617 if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
3618 skb->l4_hash)
3619 return skb->hash;
3620
3621 if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
3622 return bond_vlan_srcmac_hash(skb);
3623
3624 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
3625 !bond_flow_dissect(bond, skb, &flow))
3626 return bond_eth_hash(skb);
3627
3628 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
3629 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
3630 hash = bond_eth_hash(skb);
3631 } else {
3632 if (flow.icmp.id)
3633 memcpy(&hash, &flow.icmp, sizeof(hash));
3634 else
3635 memcpy(&hash, &flow.ports.ports, sizeof(hash));
3636 }
3637
3638 return bond_ip_hash(hash, &flow);
3639}
3640
3641/*-------------------------- Device entry points ----------------------------*/
3642
3643void bond_work_init_all(struct bonding *bond)
3644{
3645 INIT_DELAYED_WORK(&bond->mcast_work,
3646 bond_resend_igmp_join_requests_delayed);
3647 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
3648 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
3649 INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
3650 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
3651 INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
3652}
3653
3654static void bond_work_cancel_all(struct bonding *bond)
3655{
3656 cancel_delayed_work_sync(&bond->mii_work);
3657 cancel_delayed_work_sync(&bond->arp_work);
3658 cancel_delayed_work_sync(&bond->alb_work);
3659 cancel_delayed_work_sync(&bond->ad_work);
3660 cancel_delayed_work_sync(&bond->mcast_work);
3661 cancel_delayed_work_sync(&bond->slave_arr_work);
3662}
3663
3664static int bond_open(struct net_device *bond_dev)
3665{
3666 struct bonding *bond = netdev_priv(bond_dev);
3667 struct list_head *iter;
3668 struct slave *slave;
3669
3670 /* reset slave->backup and slave->inactive */
3671 if (bond_has_slaves(bond)) {
3672 bond_for_each_slave(bond, slave, iter) {
3673 if (bond_uses_primary(bond) &&
3674 slave != rcu_access_pointer(bond->curr_active_slave)) {
3675 bond_set_slave_inactive_flags(slave,
3676 BOND_SLAVE_NOTIFY_NOW);
3677 } else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
3678 bond_set_slave_active_flags(slave,
3679 BOND_SLAVE_NOTIFY_NOW);
3680 }
3681 }
3682 }
3683
3684 if (bond_is_lb(bond)) {
3685 /* bond_alb_initialize must be called before the timer
3686 * is started.
3687 */
3688 if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
3689 return -ENOMEM;
3690 if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
3691 queue_delayed_work(bond->wq, &bond->alb_work, 0);
3692 }
3693
3694 if (bond->params.miimon) /* link check interval, in milliseconds. */
3695 queue_delayed_work(bond->wq, &bond->mii_work, 0);
3696
3697 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
3698 queue_delayed_work(bond->wq, &bond->arp_work, 0);
3699 bond->recv_probe = bond_arp_rcv;
3700 }
3701
3702 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
3703 queue_delayed_work(bond->wq, &bond->ad_work, 0);
3704 /* register to receive LACPDUs */
3705 bond->recv_probe = bond_3ad_lacpdu_recv;
3706 bond_3ad_initiate_agg_selection(bond, 1);
3707 }
3708
3709 if (bond_mode_can_use_xmit_hash(bond))
3710 bond_update_slave_arr(bond, NULL);
3711
3712 return 0;
3713}
3714
3715static int bond_close(struct net_device *bond_dev)
3716{
3717 struct bonding *bond = netdev_priv(bond_dev);
3718
3719 bond_work_cancel_all(bond);
3720 bond->send_peer_notif = 0;
3721 if (bond_is_lb(bond))
3722 bond_alb_deinitialize(bond);
3723 bond->recv_probe = NULL;
3724
3725 return 0;
3726}
3727
3728/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
3729 * that some drivers can provide 32bit values only.
3730 */
3731static void bond_fold_stats(struct rtnl_link_stats64 *_res,
3732 const struct rtnl_link_stats64 *_new,
3733 const struct rtnl_link_stats64 *_old)
3734{
3735 const u64 *new = (const u64 *)_new;
3736 const u64 *old = (const u64 *)_old;
3737 u64 *res = (u64 *)_res;
3738 int i;
3739
3740 for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
3741 u64 nv = new[i];
3742 u64 ov = old[i];
3743 s64 delta = nv - ov;
3744
3745 /* detects if this particular field is 32bit only */
3746 if (((nv | ov) >> 32) == 0)
3747 delta = (s64)(s32)((u32)nv - (u32)ov);
3748
3749 /* filter anomalies, some drivers reset their stats
3750 * at down/up events.
3751 */
3752 if (delta > 0)
3753 res[i] += delta;
3754 }
3755}
3756
3757#ifdef CONFIG_LOCKDEP
3758static int bond_get_lowest_level_rcu(struct net_device *dev)
3759{
3760 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
3761 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
3762 int cur = 0, max = 0;
3763
3764 now = dev;
3765 iter = &dev->adj_list.lower;
3766
3767 while (1) {
3768 next = NULL;
3769 while (1) {
3770 ldev = netdev_next_lower_dev_rcu(now, &iter);
3771 if (!ldev)
3772 break;
3773
3774 next = ldev;
3775 niter = &ldev->adj_list.lower;
3776 dev_stack[cur] = now;
3777 iter_stack[cur++] = iter;
3778 if (max <= cur)
3779 max = cur;
3780 break;
3781 }
3782
3783 if (!next) {
3784 if (!cur)
3785 return max;
3786 next = dev_stack[--cur];
3787 niter = iter_stack[cur];
3788 }
3789
3790 now = next;
3791 iter = niter;
3792 }
3793
3794 return max;
3795}
3796#endif
3797
3798static void bond_get_stats(struct net_device *bond_dev,
3799 struct rtnl_link_stats64 *stats)
3800{
3801 struct bonding *bond = netdev_priv(bond_dev);
3802 struct rtnl_link_stats64 temp;
3803 struct list_head *iter;
3804 struct slave *slave;
3805 int nest_level = 0;
3806
3807
3808 rcu_read_lock();
3809#ifdef CONFIG_LOCKDEP
3810 nest_level = bond_get_lowest_level_rcu(bond_dev);
3811#endif
3812
3813 spin_lock_nested(&bond->stats_lock, nest_level);
3814 memcpy(stats, &bond->bond_stats, sizeof(*stats));
3815
3816 bond_for_each_slave_rcu(bond, slave, iter) {
3817 const struct rtnl_link_stats64 *new =
3818 dev_get_stats(slave->dev, &temp);
3819
3820 bond_fold_stats(stats, new, &slave->slave_stats);
3821
3822 /* save off the slave stats for the next run */
3823 memcpy(&slave->slave_stats, new, sizeof(*new));
3824 }
3825
3826 memcpy(&bond->bond_stats, stats, sizeof(*stats));
3827 spin_unlock(&bond->stats_lock);
3828 rcu_read_unlock();
3829}
3830
3831static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
3832{
3833 struct bonding *bond = netdev_priv(bond_dev);
3834 struct net_device *slave_dev = NULL;
3835 struct ifbond k_binfo;
3836 struct ifbond __user *u_binfo = NULL;
3837 struct ifslave k_sinfo;
3838 struct ifslave __user *u_sinfo = NULL;
3839 struct mii_ioctl_data *mii = NULL;
3840 struct bond_opt_value newval;
3841 struct net *net;
3842 int res = 0;
3843
3844 netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
3845
3846 switch (cmd) {
3847 case SIOCGMIIPHY:
3848 mii = if_mii(ifr);
3849 if (!mii)
3850 return -EINVAL;
3851
3852 mii->phy_id = 0;
3853 fallthrough;
3854 case SIOCGMIIREG:
3855 /* We do this again just in case we were called by SIOCGMIIREG
3856 * instead of SIOCGMIIPHY.
3857 */
3858 mii = if_mii(ifr);
3859 if (!mii)
3860 return -EINVAL;
3861
3862 if (mii->reg_num == 1) {
3863 mii->val_out = 0;
3864 if (netif_carrier_ok(bond->dev))
3865 mii->val_out = BMSR_LSTATUS;
3866 }
3867
3868 return 0;
3869 case BOND_INFO_QUERY_OLD:
3870 case SIOCBONDINFOQUERY:
3871 u_binfo = (struct ifbond __user *)ifr->ifr_data;
3872
3873 if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
3874 return -EFAULT;
3875
3876 bond_info_query(bond_dev, &k_binfo);
3877 if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
3878 return -EFAULT;
3879
3880 return 0;
3881 case BOND_SLAVE_INFO_QUERY_OLD:
3882 case SIOCBONDSLAVEINFOQUERY:
3883 u_sinfo = (struct ifslave __user *)ifr->ifr_data;
3884
3885 if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
3886 return -EFAULT;
3887
3888 res = bond_slave_info_query(bond_dev, &k_sinfo);
3889 if (res == 0 &&
3890 copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
3891 return -EFAULT;
3892
3893 return res;
3894 default:
3895 break;
3896 }
3897
3898 net = dev_net(bond_dev);
3899
3900 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3901 return -EPERM;
3902
3903 slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
3904
3905 slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev);
3906
3907 if (!slave_dev)
3908 return -ENODEV;
3909
3910 switch (cmd) {
3911 case BOND_ENSLAVE_OLD:
3912 case SIOCBONDENSLAVE:
3913 res = bond_enslave(bond_dev, slave_dev, NULL);
3914 break;
3915 case BOND_RELEASE_OLD:
3916 case SIOCBONDRELEASE:
3917 res = bond_release(bond_dev, slave_dev);
3918 break;
3919 case BOND_SETHWADDR_OLD:
3920 case SIOCBONDSETHWADDR:
3921 res = bond_set_dev_addr(bond_dev, slave_dev);
3922 break;
3923 case BOND_CHANGE_ACTIVE_OLD:
3924 case SIOCBONDCHANGEACTIVE:
3925 bond_opt_initstr(&newval, slave_dev->name);
3926 res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
3927 &newval);
3928 break;
3929 default:
3930 res = -EOPNOTSUPP;
3931 }
3932
3933 return res;
3934}
3935
3936static void bond_change_rx_flags(struct net_device *bond_dev, int change)
3937{
3938 struct bonding *bond = netdev_priv(bond_dev);
3939
3940 if (change & IFF_PROMISC)
3941 bond_set_promiscuity(bond,
3942 bond_dev->flags & IFF_PROMISC ? 1 : -1);
3943
3944 if (change & IFF_ALLMULTI)
3945 bond_set_allmulti(bond,
3946 bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
3947}
3948
3949static void bond_set_rx_mode(struct net_device *bond_dev)
3950{
3951 struct bonding *bond = netdev_priv(bond_dev);
3952 struct list_head *iter;
3953 struct slave *slave;
3954
3955 rcu_read_lock();
3956 if (bond_uses_primary(bond)) {
3957 slave = rcu_dereference(bond->curr_active_slave);
3958 if (slave) {
3959 dev_uc_sync(slave->dev, bond_dev);
3960 dev_mc_sync(slave->dev, bond_dev);
3961 }
3962 } else {
3963 bond_for_each_slave_rcu(bond, slave, iter) {
3964 dev_uc_sync_multiple(slave->dev, bond_dev);
3965 dev_mc_sync_multiple(slave->dev, bond_dev);
3966 }
3967 }
3968 rcu_read_unlock();
3969}
3970
3971static int bond_neigh_init(struct neighbour *n)
3972{
3973 struct bonding *bond = netdev_priv(n->dev);
3974 const struct net_device_ops *slave_ops;
3975 struct neigh_parms parms;
3976 struct slave *slave;
3977 int ret = 0;
3978
3979 rcu_read_lock();
3980 slave = bond_first_slave_rcu(bond);
3981 if (!slave)
3982 goto out;
3983 slave_ops = slave->dev->netdev_ops;
3984 if (!slave_ops->ndo_neigh_setup)
3985 goto out;
3986
3987 /* TODO: find another way [1] to implement this.
3988 * Passing a zeroed structure is fragile,
3989 * but at least we do not pass garbage.
3990 *
3991 * [1] One way would be that ndo_neigh_setup() never touch
3992 * struct neigh_parms, but propagate the new neigh_setup()
3993 * back to ___neigh_create() / neigh_parms_alloc()
3994 */
3995 memset(&parms, 0, sizeof(parms));
3996 ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
3997
3998 if (ret)
3999 goto out;
4000
4001 if (parms.neigh_setup)
4002 ret = parms.neigh_setup(n);
4003out:
4004 rcu_read_unlock();
4005 return ret;
4006}
4007
4008/* The bonding ndo_neigh_setup is called at init time beofre any
4009 * slave exists. So we must declare proxy setup function which will
4010 * be used at run time to resolve the actual slave neigh param setup.
4011 *
4012 * It's also called by master devices (such as vlans) to setup their
4013 * underlying devices. In that case - do nothing, we're already set up from
4014 * our init.
4015 */
4016static int bond_neigh_setup(struct net_device *dev,
4017 struct neigh_parms *parms)
4018{
4019 /* modify only our neigh_parms */
4020 if (parms->dev == dev)
4021 parms->neigh_setup = bond_neigh_init;
4022
4023 return 0;
4024}
4025
4026/* Change the MTU of all of a master's slaves to match the master */
4027static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4028{
4029 struct bonding *bond = netdev_priv(bond_dev);
4030 struct slave *slave, *rollback_slave;
4031 struct list_head *iter;
4032 int res = 0;
4033
4034 netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
4035
4036 bond_for_each_slave(bond, slave, iter) {
4037 slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n",
4038 slave, slave->dev->netdev_ops->ndo_change_mtu);
4039
4040 res = dev_set_mtu(slave->dev, new_mtu);
4041
4042 if (res) {
4043 /* If we failed to set the slave's mtu to the new value
4044 * we must abort the operation even in ACTIVE_BACKUP
4045 * mode, because if we allow the backup slaves to have
4046 * different mtu values than the active slave we'll
4047 * need to change their mtu when doing a failover. That
4048 * means changing their mtu from timer context, which
4049 * is probably not a good idea.
4050 */
4051 slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n",
4052 res, new_mtu);
4053 goto unwind;
4054 }
4055 }
4056
4057 bond_dev->mtu = new_mtu;
4058
4059 return 0;
4060
4061unwind:
4062 /* unwind from head to the slave that failed */
4063 bond_for_each_slave(bond, rollback_slave, iter) {
4064 int tmp_res;
4065
4066 if (rollback_slave == slave)
4067 break;
4068
4069 tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
4070 if (tmp_res)
4071 slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n",
4072 tmp_res);
4073 }
4074
4075 return res;
4076}
4077
4078/* Change HW address
4079 *
4080 * Note that many devices must be down to change the HW address, and
4081 * downing the master releases all slaves. We can make bonds full of
4082 * bonding devices to test this, however.
4083 */
4084static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4085{
4086 struct bonding *bond = netdev_priv(bond_dev);
4087 struct slave *slave, *rollback_slave;
4088 struct sockaddr_storage *ss = addr, tmp_ss;
4089 struct list_head *iter;
4090 int res = 0;
4091
4092 if (BOND_MODE(bond) == BOND_MODE_ALB)
4093 return bond_alb_set_mac_address(bond_dev, addr);
4094
4095
4096 netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond);
4097
4098 /* If fail_over_mac is enabled, do nothing and return success.
4099 * Returning an error causes ifenslave to fail.
4100 */
4101 if (bond->params.fail_over_mac &&
4102 BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
4103 return 0;
4104
4105 if (!is_valid_ether_addr(ss->__data))
4106 return -EADDRNOTAVAIL;
4107
4108 bond_for_each_slave(bond, slave, iter) {
4109 slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n",
4110 __func__, slave);
4111 res = dev_set_mac_address(slave->dev, addr, NULL);
4112 if (res) {
4113 /* TODO: consider downing the slave
4114 * and retry ?
4115 * User should expect communications
4116 * breakage anyway until ARP finish
4117 * updating, so...
4118 */
4119 slave_dbg(bond_dev, slave->dev, "%s: err %d\n",
4120 __func__, res);
4121 goto unwind;
4122 }
4123 }
4124
4125 /* success */
4126 memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
4127 return 0;
4128
4129unwind:
4130 memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
4131 tmp_ss.ss_family = bond_dev->type;
4132
4133 /* unwind from head to the slave that failed */
4134 bond_for_each_slave(bond, rollback_slave, iter) {
4135 int tmp_res;
4136
4137 if (rollback_slave == slave)
4138 break;
4139
4140 tmp_res = dev_set_mac_address(rollback_slave->dev,
4141 (struct sockaddr *)&tmp_ss, NULL);
4142 if (tmp_res) {
4143 slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
4144 __func__, tmp_res);
4145 }
4146 }
4147
4148 return res;
4149}
4150
4151/**
4152 * bond_get_slave_by_id - get xmit slave with slave_id
4153 * @bond: bonding device that is transmitting
4154 * @slave_id: slave id up to slave_cnt-1 through which to transmit
4155 *
4156 * This function tries to get slave with slave_id but in case
4157 * it fails, it tries to find the first available slave for transmission.
4158 */
4159static struct slave *bond_get_slave_by_id(struct bonding *bond,
4160 int slave_id)
4161{
4162 struct list_head *iter;
4163 struct slave *slave;
4164 int i = slave_id;
4165
4166 /* Here we start from the slave with slave_id */
4167 bond_for_each_slave_rcu(bond, slave, iter) {
4168 if (--i < 0) {
4169 if (bond_slave_can_tx(slave))
4170 return slave;
4171 }
4172 }
4173
4174 /* Here we start from the first slave up to slave_id */
4175 i = slave_id;
4176 bond_for_each_slave_rcu(bond, slave, iter) {
4177 if (--i < 0)
4178 break;
4179 if (bond_slave_can_tx(slave))
4180 return slave;
4181 }
4182 /* no slave that can tx has been found */
4183 return NULL;
4184}
4185
4186/**
4187 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
4188 * @bond: bonding device to use
4189 *
4190 * Based on the value of the bonding device's packets_per_slave parameter
4191 * this function generates a slave id, which is usually used as the next
4192 * slave to transmit through.
4193 */
4194static u32 bond_rr_gen_slave_id(struct bonding *bond)
4195{
4196 u32 slave_id;
4197 struct reciprocal_value reciprocal_packets_per_slave;
4198 int packets_per_slave = bond->params.packets_per_slave;
4199
4200 switch (packets_per_slave) {
4201 case 0:
4202 slave_id = prandom_u32();
4203 break;
4204 case 1:
4205 slave_id = bond->rr_tx_counter;
4206 break;
4207 default:
4208 reciprocal_packets_per_slave =
4209 bond->params.reciprocal_packets_per_slave;
4210 slave_id = reciprocal_divide(bond->rr_tx_counter,
4211 reciprocal_packets_per_slave);
4212 break;
4213 }
4214 bond->rr_tx_counter++;
4215
4216 return slave_id;
4217}
4218
4219static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond,
4220 struct sk_buff *skb)
4221{
4222 struct slave *slave;
4223 int slave_cnt;
4224 u32 slave_id;
4225
4226 /* Start with the curr_active_slave that joined the bond as the
4227 * default for sending IGMP traffic. For failover purposes one
4228 * needs to maintain some consistency for the interface that will
4229 * send the join/membership reports. The curr_active_slave found
4230 * will send all of this type of traffic.
4231 */
4232 if (skb->protocol == htons(ETH_P_IP)) {
4233 int noff = skb_network_offset(skb);
4234 struct iphdr *iph;
4235
4236 if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
4237 goto non_igmp;
4238
4239 iph = ip_hdr(skb);
4240 if (iph->protocol == IPPROTO_IGMP) {
4241 slave = rcu_dereference(bond->curr_active_slave);
4242 if (slave)
4243 return slave;
4244 return bond_get_slave_by_id(bond, 0);
4245 }
4246 }
4247
4248non_igmp:
4249 slave_cnt = READ_ONCE(bond->slave_cnt);
4250 if (likely(slave_cnt)) {
4251 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4252 return bond_get_slave_by_id(bond, slave_id);
4253 }
4254 return NULL;
4255}
4256
4257static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
4258 struct net_device *bond_dev)
4259{
4260 struct bonding *bond = netdev_priv(bond_dev);
4261 struct slave *slave;
4262
4263 slave = bond_xmit_roundrobin_slave_get(bond, skb);
4264 if (likely(slave))
4265 return bond_dev_queue_xmit(bond, skb, slave->dev);
4266
4267 return bond_tx_drop(bond_dev, skb);
4268}
4269
4270static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond,
4271 struct sk_buff *skb)
4272{
4273 return rcu_dereference(bond->curr_active_slave);
4274}
4275
4276/* In active-backup mode, we know that bond->curr_active_slave is always valid if
4277 * the bond has a usable interface.
4278 */
4279static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
4280 struct net_device *bond_dev)
4281{
4282 struct bonding *bond = netdev_priv(bond_dev);
4283 struct slave *slave;
4284
4285 slave = bond_xmit_activebackup_slave_get(bond, skb);
4286 if (slave)
4287 return bond_dev_queue_xmit(bond, skb, slave->dev);
4288
4289 return bond_tx_drop(bond_dev, skb);
4290}
4291
4292/* Use this to update slave_array when (a) it's not appropriate to update
4293 * slave_array right away (note that update_slave_array() may sleep)
4294 * and / or (b) RTNL is not held.
4295 */
4296void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
4297{
4298 queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
4299}
4300
4301/* Slave array work handler. Holds only RTNL */
4302static void bond_slave_arr_handler(struct work_struct *work)
4303{
4304 struct bonding *bond = container_of(work, struct bonding,
4305 slave_arr_work.work);
4306 int ret;
4307
4308 if (!rtnl_trylock())
4309 goto err;
4310
4311 ret = bond_update_slave_arr(bond, NULL);
4312 rtnl_unlock();
4313 if (ret) {
4314 pr_warn_ratelimited("Failed to update slave array from WT\n");
4315 goto err;
4316 }
4317 return;
4318
4319err:
4320 bond_slave_arr_work_rearm(bond, 1);
4321}
4322
4323static void bond_skip_slave(struct bond_up_slave *slaves,
4324 struct slave *skipslave)
4325{
4326 int idx;
4327
4328 /* Rare situation where caller has asked to skip a specific
4329 * slave but allocation failed (most likely!). BTW this is
4330 * only possible when the call is initiated from
4331 * __bond_release_one(). In this situation; overwrite the
4332 * skipslave entry in the array with the last entry from the
4333 * array to avoid a situation where the xmit path may choose
4334 * this to-be-skipped slave to send a packet out.
4335 */
4336 for (idx = 0; slaves && idx < slaves->count; idx++) {
4337 if (skipslave == slaves->arr[idx]) {
4338 slaves->arr[idx] =
4339 slaves->arr[slaves->count - 1];
4340 slaves->count--;
4341 break;
4342 }
4343 }
4344}
4345
4346static void bond_set_slave_arr(struct bonding *bond,
4347 struct bond_up_slave *usable_slaves,
4348 struct bond_up_slave *all_slaves)
4349{
4350 struct bond_up_slave *usable, *all;
4351
4352 usable = rtnl_dereference(bond->usable_slaves);
4353 rcu_assign_pointer(bond->usable_slaves, usable_slaves);
4354 kfree_rcu(usable, rcu);
4355
4356 all = rtnl_dereference(bond->all_slaves);
4357 rcu_assign_pointer(bond->all_slaves, all_slaves);
4358 kfree_rcu(all, rcu);
4359}
4360
4361static void bond_reset_slave_arr(struct bonding *bond)
4362{
4363 struct bond_up_slave *usable, *all;
4364
4365 usable = rtnl_dereference(bond->usable_slaves);
4366 if (usable) {
4367 RCU_INIT_POINTER(bond->usable_slaves, NULL);
4368 kfree_rcu(usable, rcu);
4369 }
4370
4371 all = rtnl_dereference(bond->all_slaves);
4372 if (all) {
4373 RCU_INIT_POINTER(bond->all_slaves, NULL);
4374 kfree_rcu(all, rcu);
4375 }
4376}
4377
4378/* Build the usable slaves array in control path for modes that use xmit-hash
4379 * to determine the slave interface -
4380 * (a) BOND_MODE_8023AD
4381 * (b) BOND_MODE_XOR
4382 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
4383 *
4384 * The caller is expected to hold RTNL only and NO other lock!
4385 */
4386int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
4387{
4388 struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL;
4389 struct slave *slave;
4390 struct list_head *iter;
4391 int agg_id = 0;
4392 int ret = 0;
4393
4394 might_sleep();
4395
4396 usable_slaves = kzalloc(struct_size(usable_slaves, arr,
4397 bond->slave_cnt), GFP_KERNEL);
4398 all_slaves = kzalloc(struct_size(all_slaves, arr,
4399 bond->slave_cnt), GFP_KERNEL);
4400 if (!usable_slaves || !all_slaves) {
4401 ret = -ENOMEM;
4402 goto out;
4403 }
4404 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
4405 struct ad_info ad_info;
4406
4407 spin_lock_bh(&bond->mode_lock);
4408 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
4409 spin_unlock_bh(&bond->mode_lock);
4410 pr_debug("bond_3ad_get_active_agg_info failed\n");
4411 /* No active aggragator means it's not safe to use
4412 * the previous array.
4413 */
4414 bond_reset_slave_arr(bond);
4415 goto out;
4416 }
4417 spin_unlock_bh(&bond->mode_lock);
4418 agg_id = ad_info.aggregator_id;
4419 }
4420 bond_for_each_slave(bond, slave, iter) {
4421 if (skipslave == slave)
4422 continue;
4423
4424 all_slaves->arr[all_slaves->count++] = slave;
4425 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
4426 struct aggregator *agg;
4427
4428 agg = SLAVE_AD_INFO(slave)->port.aggregator;
4429 if (!agg || agg->aggregator_identifier != agg_id)
4430 continue;
4431 }
4432 if (!bond_slave_can_tx(slave))
4433 continue;
4434
4435 slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
4436 usable_slaves->count);
4437
4438 usable_slaves->arr[usable_slaves->count++] = slave;
4439 }
4440
4441 bond_set_slave_arr(bond, usable_slaves, all_slaves);
4442 return ret;
4443out:
4444 if (ret != 0 && skipslave) {
4445 bond_skip_slave(rtnl_dereference(bond->all_slaves),
4446 skipslave);
4447 bond_skip_slave(rtnl_dereference(bond->usable_slaves),
4448 skipslave);
4449 }
4450 kfree_rcu(all_slaves, rcu);
4451 kfree_rcu(usable_slaves, rcu);
4452
4453 return ret;
4454}
4455
4456static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
4457 struct sk_buff *skb,
4458 struct bond_up_slave *slaves)
4459{
4460 struct slave *slave;
4461 unsigned int count;
4462 u32 hash;
4463
4464 hash = bond_xmit_hash(bond, skb);
4465 count = slaves ? READ_ONCE(slaves->count) : 0;
4466 if (unlikely(!count))
4467 return NULL;
4468
4469 slave = slaves->arr[hash % count];
4470 return slave;
4471}
4472
4473/* Use this Xmit function for 3AD as well as XOR modes. The current
4474 * usable slave array is formed in the control path. The xmit function
4475 * just calculates hash and sends the packet out.
4476 */
4477static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
4478 struct net_device *dev)
4479{
4480 struct bonding *bond = netdev_priv(dev);
4481 struct bond_up_slave *slaves;
4482 struct slave *slave;
4483
4484 slaves = rcu_dereference(bond->usable_slaves);
4485 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
4486 if (likely(slave))
4487 return bond_dev_queue_xmit(bond, skb, slave->dev);
4488
4489 return bond_tx_drop(dev, skb);
4490}
4491
4492/* in broadcast mode, we send everything to all usable interfaces. */
4493static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
4494 struct net_device *bond_dev)
4495{
4496 struct bonding *bond = netdev_priv(bond_dev);
4497 struct slave *slave = NULL;
4498 struct list_head *iter;
4499
4500 bond_for_each_slave_rcu(bond, slave, iter) {
4501 if (bond_is_last_slave(bond, slave))
4502 break;
4503 if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
4504 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
4505
4506 if (!skb2) {
4507 net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
4508 bond_dev->name, __func__);
4509 continue;
4510 }
4511 bond_dev_queue_xmit(bond, skb2, slave->dev);
4512 }
4513 }
4514 if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
4515 return bond_dev_queue_xmit(bond, skb, slave->dev);
4516
4517 return bond_tx_drop(bond_dev, skb);
4518}
4519
4520/*------------------------- Device initialization ---------------------------*/
4521
4522/* Lookup the slave that corresponds to a qid */
4523static inline int bond_slave_override(struct bonding *bond,
4524 struct sk_buff *skb)
4525{
4526 struct slave *slave = NULL;
4527 struct list_head *iter;
4528
4529 if (!skb_rx_queue_recorded(skb))
4530 return 1;
4531
4532 /* Find out if any slaves have the same mapping as this skb. */
4533 bond_for_each_slave_rcu(bond, slave, iter) {
4534 if (slave->queue_id == skb_get_queue_mapping(skb)) {
4535 if (bond_slave_is_up(slave) &&
4536 slave->link == BOND_LINK_UP) {
4537 bond_dev_queue_xmit(bond, skb, slave->dev);
4538 return 0;
4539 }
4540 /* If the slave isn't UP, use default transmit policy. */
4541 break;
4542 }
4543 }
4544
4545 return 1;
4546}
4547
4548
4549static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
4550 struct net_device *sb_dev)
4551{
4552 /* This helper function exists to help dev_pick_tx get the correct
4553 * destination queue. Using a helper function skips a call to
4554 * skb_tx_hash and will put the skbs in the queue we expect on their
4555 * way down to the bonding driver.
4556 */
4557 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
4558
4559 /* Save the original txq to restore before passing to the driver */
4560 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
4561
4562 if (unlikely(txq >= dev->real_num_tx_queues)) {
4563 do {
4564 txq -= dev->real_num_tx_queues;
4565 } while (txq >= dev->real_num_tx_queues);
4566 }
4567 return txq;
4568}
4569
4570static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
4571 struct sk_buff *skb,
4572 bool all_slaves)
4573{
4574 struct bonding *bond = netdev_priv(master_dev);
4575 struct bond_up_slave *slaves;
4576 struct slave *slave = NULL;
4577
4578 switch (BOND_MODE(bond)) {
4579 case BOND_MODE_ROUNDROBIN:
4580 slave = bond_xmit_roundrobin_slave_get(bond, skb);
4581 break;
4582 case BOND_MODE_ACTIVEBACKUP:
4583 slave = bond_xmit_activebackup_slave_get(bond, skb);
4584 break;
4585 case BOND_MODE_8023AD:
4586 case BOND_MODE_XOR:
4587 if (all_slaves)
4588 slaves = rcu_dereference(bond->all_slaves);
4589 else
4590 slaves = rcu_dereference(bond->usable_slaves);
4591 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
4592 break;
4593 case BOND_MODE_BROADCAST:
4594 break;
4595 case BOND_MODE_ALB:
4596 slave = bond_xmit_alb_slave_get(bond, skb);
4597 break;
4598 case BOND_MODE_TLB:
4599 slave = bond_xmit_tlb_slave_get(bond, skb);
4600 break;
4601 default:
4602 /* Should never happen, mode already checked */
4603 WARN_ONCE(true, "Unknown bonding mode");
4604 break;
4605 }
4606
4607 if (slave)
4608 return slave->dev;
4609 return NULL;
4610}
4611
4612static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
4613{
4614 switch (sk->sk_family) {
4615#if IS_ENABLED(CONFIG_IPV6)
4616 case AF_INET6:
4617 if (sk->sk_ipv6only ||
4618 ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
4619 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
4620 flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
4621 flow->addrs.v6addrs.dst = sk->sk_v6_daddr;
4622 break;
4623 }
4624 fallthrough;
4625#endif
4626 default: /* AF_INET */
4627 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
4628 flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr;
4629 flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr;
4630 break;
4631 }
4632
4633 flow->ports.src = inet_sk(sk)->inet_sport;
4634 flow->ports.dst = inet_sk(sk)->inet_dport;
4635}
4636
4637/**
4638 * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields
4639 * @sk: socket to use for headers
4640 *
4641 * This function will extract the necessary field from the socket and use
4642 * them to generate a hash based on the LAYER34 xmit_policy.
4643 * Assumes that sk is a TCP or UDP socket.
4644 */
4645static u32 bond_sk_hash_l34(struct sock *sk)
4646{
4647 struct flow_keys flow;
4648 u32 hash;
4649
4650 bond_sk_to_flow(sk, &flow);
4651
4652 /* L4 */
4653 memcpy(&hash, &flow.ports.ports, sizeof(hash));
4654 /* L3 */
4655 return bond_ip_hash(hash, &flow);
4656}
4657
4658static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
4659 struct sock *sk)
4660{
4661 struct bond_up_slave *slaves;
4662 struct slave *slave;
4663 unsigned int count;
4664 u32 hash;
4665
4666 slaves = rcu_dereference(bond->usable_slaves);
4667 count = slaves ? READ_ONCE(slaves->count) : 0;
4668 if (unlikely(!count))
4669 return NULL;
4670
4671 hash = bond_sk_hash_l34(sk);
4672 slave = slaves->arr[hash % count];
4673
4674 return slave->dev;
4675}
4676
4677static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
4678 struct sock *sk)
4679{
4680 struct bonding *bond = netdev_priv(dev);
4681 struct net_device *lower = NULL;
4682
4683 rcu_read_lock();
4684 if (bond_sk_check(bond))
4685 lower = __bond_sk_get_lower_dev(bond, sk);
4686 rcu_read_unlock();
4687
4688 return lower;
4689}
4690
4691#if IS_ENABLED(CONFIG_TLS_DEVICE)
4692static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
4693 struct net_device *dev)
4694{
4695 if (likely(bond_get_slave_by_dev(bond, tls_get_ctx(skb->sk)->netdev)))
4696 return bond_dev_queue_xmit(bond, skb, tls_get_ctx(skb->sk)->netdev);
4697 return bond_tx_drop(dev, skb);
4698}
4699#endif
4700
4701static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4702{
4703 struct bonding *bond = netdev_priv(dev);
4704
4705 if (bond_should_override_tx_queue(bond) &&
4706 !bond_slave_override(bond, skb))
4707 return NETDEV_TX_OK;
4708
4709#if IS_ENABLED(CONFIG_TLS_DEVICE)
4710 if (skb->sk && tls_is_sk_tx_device_offloaded(skb->sk))
4711 return bond_tls_device_xmit(bond, skb, dev);
4712#endif
4713
4714 switch (BOND_MODE(bond)) {
4715 case BOND_MODE_ROUNDROBIN:
4716 return bond_xmit_roundrobin(skb, dev);
4717 case BOND_MODE_ACTIVEBACKUP:
4718 return bond_xmit_activebackup(skb, dev);
4719 case BOND_MODE_8023AD:
4720 case BOND_MODE_XOR:
4721 return bond_3ad_xor_xmit(skb, dev);
4722 case BOND_MODE_BROADCAST:
4723 return bond_xmit_broadcast(skb, dev);
4724 case BOND_MODE_ALB:
4725 return bond_alb_xmit(skb, dev);
4726 case BOND_MODE_TLB:
4727 return bond_tlb_xmit(skb, dev);
4728 default:
4729 /* Should never happen, mode already checked */
4730 netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
4731 WARN_ON_ONCE(1);
4732 return bond_tx_drop(dev, skb);
4733 }
4734}
4735
4736static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
4737{
4738 struct bonding *bond = netdev_priv(dev);
4739 netdev_tx_t ret = NETDEV_TX_OK;
4740
4741 /* If we risk deadlock from transmitting this in the
4742 * netpoll path, tell netpoll to queue the frame for later tx
4743 */
4744 if (unlikely(is_netpoll_tx_blocked(dev)))
4745 return NETDEV_TX_BUSY;
4746
4747 rcu_read_lock();
4748 if (bond_has_slaves(bond))
4749 ret = __bond_start_xmit(skb, dev);
4750 else
4751 ret = bond_tx_drop(dev, skb);
4752 rcu_read_unlock();
4753
4754 return ret;
4755}
4756
4757static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
4758{
4759 if (speed == 0 || speed == SPEED_UNKNOWN)
4760 speed = slave->speed;
4761 else
4762 speed = min(speed, slave->speed);
4763
4764 return speed;
4765}
4766
4767static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
4768 struct ethtool_link_ksettings *cmd)
4769{
4770 struct bonding *bond = netdev_priv(bond_dev);
4771 struct list_head *iter;
4772 struct slave *slave;
4773 u32 speed = 0;
4774
4775 cmd->base.duplex = DUPLEX_UNKNOWN;
4776 cmd->base.port = PORT_OTHER;
4777
4778 /* Since bond_slave_can_tx returns false for all inactive or down slaves, we
4779 * do not need to check mode. Though link speed might not represent
4780 * the true receive or transmit bandwidth (not all modes are symmetric)
4781 * this is an accurate maximum.
4782 */
4783 bond_for_each_slave(bond, slave, iter) {
4784 if (bond_slave_can_tx(slave)) {
4785 if (slave->speed != SPEED_UNKNOWN) {
4786 if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
4787 speed = bond_mode_bcast_speed(slave,
4788 speed);
4789 else
4790 speed += slave->speed;
4791 }
4792 if (cmd->base.duplex == DUPLEX_UNKNOWN &&
4793 slave->duplex != DUPLEX_UNKNOWN)
4794 cmd->base.duplex = slave->duplex;
4795 }
4796 }
4797 cmd->base.speed = speed ? : SPEED_UNKNOWN;
4798
4799 return 0;
4800}
4801
4802static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4803 struct ethtool_drvinfo *drvinfo)
4804{
4805 strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
4806 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
4807 BOND_ABI_VERSION);
4808}
4809
4810static const struct ethtool_ops bond_ethtool_ops = {
4811 .get_drvinfo = bond_ethtool_get_drvinfo,
4812 .get_link = ethtool_op_get_link,
4813 .get_link_ksettings = bond_ethtool_get_link_ksettings,
4814};
4815
4816static const struct net_device_ops bond_netdev_ops = {
4817 .ndo_init = bond_init,
4818 .ndo_uninit = bond_uninit,
4819 .ndo_open = bond_open,
4820 .ndo_stop = bond_close,
4821 .ndo_start_xmit = bond_start_xmit,
4822 .ndo_select_queue = bond_select_queue,
4823 .ndo_get_stats64 = bond_get_stats,
4824 .ndo_do_ioctl = bond_do_ioctl,
4825 .ndo_change_rx_flags = bond_change_rx_flags,
4826 .ndo_set_rx_mode = bond_set_rx_mode,
4827 .ndo_change_mtu = bond_change_mtu,
4828 .ndo_set_mac_address = bond_set_mac_address,
4829 .ndo_neigh_setup = bond_neigh_setup,
4830 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
4831 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4832#ifdef CONFIG_NET_POLL_CONTROLLER
4833 .ndo_netpoll_setup = bond_netpoll_setup,
4834 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
4835 .ndo_poll_controller = bond_poll_controller,
4836#endif
4837 .ndo_add_slave = bond_enslave,
4838 .ndo_del_slave = bond_release,
4839 .ndo_fix_features = bond_fix_features,
4840 .ndo_features_check = passthru_features_check,
4841 .ndo_get_xmit_slave = bond_xmit_get_slave,
4842 .ndo_sk_get_lower_dev = bond_sk_get_lower_dev,
4843};
4844
4845static const struct device_type bond_type = {
4846 .name = "bond",
4847};
4848
4849static void bond_destructor(struct net_device *bond_dev)
4850{
4851 struct bonding *bond = netdev_priv(bond_dev);
4852 if (bond->wq)
4853 destroy_workqueue(bond->wq);
4854}
4855
4856void bond_setup(struct net_device *bond_dev)
4857{
4858 struct bonding *bond = netdev_priv(bond_dev);
4859
4860 spin_lock_init(&bond->mode_lock);
4861 bond->params = bonding_defaults;
4862
4863 /* Initialize pointers */
4864 bond->dev = bond_dev;
4865
4866 /* Initialize the device entry points */
4867 ether_setup(bond_dev);
4868 bond_dev->max_mtu = ETH_MAX_MTU;
4869 bond_dev->netdev_ops = &bond_netdev_ops;
4870 bond_dev->ethtool_ops = &bond_ethtool_ops;
4871
4872 bond_dev->needs_free_netdev = true;
4873 bond_dev->priv_destructor = bond_destructor;
4874
4875 SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
4876
4877 /* Initialize the device options */
4878 bond_dev->flags |= IFF_MASTER;
4879 bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
4880 bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
4881
4882#ifdef CONFIG_XFRM_OFFLOAD
4883 /* set up xfrm device ops (only supported in active-backup right now) */
4884 bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
4885 bond->xs = NULL;
4886#endif /* CONFIG_XFRM_OFFLOAD */
4887
4888 /* don't acquire bond device's netif_tx_lock when transmitting */
4889 bond_dev->features |= NETIF_F_LLTX;
4890
4891 /* By default, we declare the bond to be fully
4892 * VLAN hardware accelerated capable. Special
4893 * care is taken in the various xmit functions
4894 * when there are slaves that are not hw accel
4895 * capable
4896 */
4897
4898 /* Don't allow bond devices to change network namespaces. */
4899 bond_dev->features |= NETIF_F_NETNS_LOCAL;
4900
4901 bond_dev->hw_features = BOND_VLAN_FEATURES |
4902 NETIF_F_HW_VLAN_CTAG_RX |
4903 NETIF_F_HW_VLAN_CTAG_FILTER;
4904
4905 bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
4906 bond_dev->features |= bond_dev->hw_features;
4907 bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
4908#ifdef CONFIG_XFRM_OFFLOAD
4909 bond_dev->hw_features |= BOND_XFRM_FEATURES;
4910 /* Only enable XFRM features if this is an active-backup config */
4911 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
4912 bond_dev->features |= BOND_XFRM_FEATURES;
4913#endif /* CONFIG_XFRM_OFFLOAD */
4914#if IS_ENABLED(CONFIG_TLS_DEVICE)
4915 if (bond_sk_check(bond))
4916 bond_dev->features |= BOND_TLS_FEATURES;
4917#endif
4918}
4919
4920/* Destroy a bonding device.
4921 * Must be under rtnl_lock when this function is called.
4922 */
4923static void bond_uninit(struct net_device *bond_dev)
4924{
4925 struct bonding *bond = netdev_priv(bond_dev);
4926 struct bond_up_slave *usable, *all;
4927 struct list_head *iter;
4928 struct slave *slave;
4929
4930 bond_netpoll_cleanup(bond_dev);
4931
4932 /* Release the bonded slaves */
4933 bond_for_each_slave(bond, slave, iter)
4934 __bond_release_one(bond_dev, slave->dev, true, true);
4935 netdev_info(bond_dev, "Released all slaves\n");
4936
4937 usable = rtnl_dereference(bond->usable_slaves);
4938 if (usable) {
4939 RCU_INIT_POINTER(bond->usable_slaves, NULL);
4940 kfree_rcu(usable, rcu);
4941 }
4942
4943 all = rtnl_dereference(bond->all_slaves);
4944 if (all) {
4945 RCU_INIT_POINTER(bond->all_slaves, NULL);
4946 kfree_rcu(all, rcu);
4947 }
4948
4949 list_del(&bond->bond_list);
4950
4951 bond_debug_unregister(bond);
4952}
4953
4954/*------------------------- Module initialization ---------------------------*/
4955
4956static int bond_check_params(struct bond_params *params)
4957{
4958 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
4959 struct bond_opt_value newval;
4960 const struct bond_opt_value *valptr;
4961 int arp_all_targets_value = 0;
4962 u16 ad_actor_sys_prio = 0;
4963 u16 ad_user_port_key = 0;
4964 __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
4965 int arp_ip_count;
4966 int bond_mode = BOND_MODE_ROUNDROBIN;
4967 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
4968 int lacp_fast = 0;
4969 int tlb_dynamic_lb;
4970
4971 /* Convert string parameters. */
4972 if (mode) {
4973 bond_opt_initstr(&newval, mode);
4974 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
4975 if (!valptr) {
4976 pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
4977 return -EINVAL;
4978 }
4979 bond_mode = valptr->value;
4980 }
4981
4982 if (xmit_hash_policy) {
4983 if (bond_mode == BOND_MODE_ROUNDROBIN ||
4984 bond_mode == BOND_MODE_ACTIVEBACKUP ||
4985 bond_mode == BOND_MODE_BROADCAST) {
4986 pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
4987 bond_mode_name(bond_mode));
4988 } else {
4989 bond_opt_initstr(&newval, xmit_hash_policy);
4990 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
4991 &newval);
4992 if (!valptr) {
4993 pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
4994 xmit_hash_policy);
4995 return -EINVAL;
4996 }
4997 xmit_hashtype = valptr->value;
4998 }
4999 }
5000
5001 if (lacp_rate) {
5002 if (bond_mode != BOND_MODE_8023AD) {
5003 pr_info("lacp_rate param is irrelevant in mode %s\n",
5004 bond_mode_name(bond_mode));
5005 } else {
5006 bond_opt_initstr(&newval, lacp_rate);
5007 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
5008 &newval);
5009 if (!valptr) {
5010 pr_err("Error: Invalid lacp rate \"%s\"\n",
5011 lacp_rate);
5012 return -EINVAL;
5013 }
5014 lacp_fast = valptr->value;
5015 }
5016 }
5017
5018 if (ad_select) {
5019 bond_opt_initstr(&newval, ad_select);
5020 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
5021 &newval);
5022 if (!valptr) {
5023 pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
5024 return -EINVAL;
5025 }
5026 params->ad_select = valptr->value;
5027 if (bond_mode != BOND_MODE_8023AD)
5028 pr_warn("ad_select param only affects 802.3ad mode\n");
5029 } else {
5030 params->ad_select = BOND_AD_STABLE;
5031 }
5032
5033 if (max_bonds < 0) {
5034 pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
5035 max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
5036 max_bonds = BOND_DEFAULT_MAX_BONDS;
5037 }
5038
5039 if (miimon < 0) {
5040 pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5041 miimon, INT_MAX);
5042 miimon = 0;
5043 }
5044
5045 if (updelay < 0) {
5046 pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5047 updelay, INT_MAX);
5048 updelay = 0;
5049 }
5050
5051 if (downdelay < 0) {
5052 pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5053 downdelay, INT_MAX);
5054 downdelay = 0;
5055 }
5056
5057 if ((use_carrier != 0) && (use_carrier != 1)) {
5058 pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
5059 use_carrier);
5060 use_carrier = 1;
5061 }
5062
5063 if (num_peer_notif < 0 || num_peer_notif > 255) {
5064 pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
5065 num_peer_notif);
5066 num_peer_notif = 1;
5067 }
5068
5069 /* reset values for 802.3ad/TLB/ALB */
5070 if (!bond_mode_uses_arp(bond_mode)) {
5071 if (!miimon) {
5072 pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
5073 pr_warn("Forcing miimon to 100msec\n");
5074 miimon = BOND_DEFAULT_MIIMON;
5075 }
5076 }
5077
5078 if (tx_queues < 1 || tx_queues > 255) {
5079 pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
5080 tx_queues, BOND_DEFAULT_TX_QUEUES);
5081 tx_queues = BOND_DEFAULT_TX_QUEUES;
5082 }
5083
5084 if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
5085 pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
5086 all_slaves_active);
5087 all_slaves_active = 0;
5088 }
5089
5090 if (resend_igmp < 0 || resend_igmp > 255) {
5091 pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
5092 resend_igmp, BOND_DEFAULT_RESEND_IGMP);
5093 resend_igmp = BOND_DEFAULT_RESEND_IGMP;
5094 }
5095
5096 bond_opt_initval(&newval, packets_per_slave);
5097 if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
5098 pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
5099 packets_per_slave, USHRT_MAX);
5100 packets_per_slave = 1;
5101 }
5102
5103 if (bond_mode == BOND_MODE_ALB) {
5104 pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
5105 updelay);
5106 }
5107
5108 if (!miimon) {
5109 if (updelay || downdelay) {
5110 /* just warn the user the up/down delay will have
5111 * no effect since miimon is zero...
5112 */
5113 pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
5114 updelay, downdelay);
5115 }
5116 } else {
5117 /* don't allow arp monitoring */
5118 if (arp_interval) {
5119 pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
5120 miimon, arp_interval);
5121 arp_interval = 0;
5122 }
5123
5124 if ((updelay % miimon) != 0) {
5125 pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
5126 updelay, miimon, (updelay / miimon) * miimon);
5127 }
5128
5129 updelay /= miimon;
5130
5131 if ((downdelay % miimon) != 0) {
5132 pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
5133 downdelay, miimon,
5134 (downdelay / miimon) * miimon);
5135 }
5136
5137 downdelay /= miimon;
5138 }
5139
5140 if (arp_interval < 0) {
5141 pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
5142 arp_interval, INT_MAX);
5143 arp_interval = 0;
5144 }
5145
5146 for (arp_ip_count = 0, i = 0;
5147 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
5148 __be32 ip;
5149
5150 /* not a complete check, but good enough to catch mistakes */
5151 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
5152 !bond_is_ip_target_ok(ip)) {
5153 pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
5154 arp_ip_target[i]);
5155 arp_interval = 0;
5156 } else {
5157 if (bond_get_targets_ip(arp_target, ip) == -1)
5158 arp_target[arp_ip_count++] = ip;
5159 else
5160 pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
5161 &ip);
5162 }
5163 }
5164
5165 if (arp_interval && !arp_ip_count) {
5166 /* don't allow arping if no arp_ip_target given... */
5167 pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
5168 arp_interval);
5169 arp_interval = 0;
5170 }
5171
5172 if (arp_validate) {
5173 if (!arp_interval) {
5174 pr_err("arp_validate requires arp_interval\n");
5175 return -EINVAL;
5176 }
5177
5178 bond_opt_initstr(&newval, arp_validate);
5179 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
5180 &newval);
5181 if (!valptr) {
5182 pr_err("Error: invalid arp_validate \"%s\"\n",
5183 arp_validate);
5184 return -EINVAL;
5185 }
5186 arp_validate_value = valptr->value;
5187 } else {
5188 arp_validate_value = 0;
5189 }
5190
5191 if (arp_all_targets) {
5192 bond_opt_initstr(&newval, arp_all_targets);
5193 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
5194 &newval);
5195 if (!valptr) {
5196 pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
5197 arp_all_targets);
5198 arp_all_targets_value = 0;
5199 } else {
5200 arp_all_targets_value = valptr->value;
5201 }
5202 }
5203
5204 if (miimon) {
5205 pr_info("MII link monitoring set to %d ms\n", miimon);
5206 } else if (arp_interval) {
5207 valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
5208 arp_validate_value);
5209 pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
5210 arp_interval, valptr->string, arp_ip_count);
5211
5212 for (i = 0; i < arp_ip_count; i++)
5213 pr_cont(" %s", arp_ip_target[i]);
5214
5215 pr_cont("\n");
5216
5217 } else if (max_bonds) {
5218 /* miimon and arp_interval not set, we need one so things
5219 * work as expected, see bonding.txt for details
5220 */
5221 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
5222 }
5223
5224 if (primary && !bond_mode_uses_primary(bond_mode)) {
5225 /* currently, using a primary only makes sense
5226 * in active backup, TLB or ALB modes
5227 */
5228 pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
5229 primary, bond_mode_name(bond_mode));
5230 primary = NULL;
5231 }
5232
5233 if (primary && primary_reselect) {
5234 bond_opt_initstr(&newval, primary_reselect);
5235 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
5236 &newval);
5237 if (!valptr) {
5238 pr_err("Error: Invalid primary_reselect \"%s\"\n",
5239 primary_reselect);
5240 return -EINVAL;
5241 }
5242 primary_reselect_value = valptr->value;
5243 } else {
5244 primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
5245 }
5246
5247 if (fail_over_mac) {
5248 bond_opt_initstr(&newval, fail_over_mac);
5249 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
5250 &newval);
5251 if (!valptr) {
5252 pr_err("Error: invalid fail_over_mac \"%s\"\n",
5253 fail_over_mac);
5254 return -EINVAL;
5255 }
5256 fail_over_mac_value = valptr->value;
5257 if (bond_mode != BOND_MODE_ACTIVEBACKUP)
5258 pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
5259 } else {
5260 fail_over_mac_value = BOND_FOM_NONE;
5261 }
5262
5263 bond_opt_initstr(&newval, "default");
5264 valptr = bond_opt_parse(
5265 bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
5266 &newval);
5267 if (!valptr) {
5268 pr_err("Error: No ad_actor_sys_prio default value");
5269 return -EINVAL;
5270 }
5271 ad_actor_sys_prio = valptr->value;
5272
5273 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
5274 &newval);
5275 if (!valptr) {
5276 pr_err("Error: No ad_user_port_key default value");
5277 return -EINVAL;
5278 }
5279 ad_user_port_key = valptr->value;
5280
5281 bond_opt_initstr(&newval, "default");
5282 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
5283 if (!valptr) {
5284 pr_err("Error: No tlb_dynamic_lb default value");
5285 return -EINVAL;
5286 }
5287 tlb_dynamic_lb = valptr->value;
5288
5289 if (lp_interval == 0) {
5290 pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
5291 INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
5292 lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
5293 }
5294
5295 /* fill params struct with the proper values */
5296 params->mode = bond_mode;
5297 params->xmit_policy = xmit_hashtype;
5298 params->miimon = miimon;
5299 params->num_peer_notif = num_peer_notif;
5300 params->arp_interval = arp_interval;
5301 params->arp_validate = arp_validate_value;
5302 params->arp_all_targets = arp_all_targets_value;
5303 params->updelay = updelay;
5304 params->downdelay = downdelay;
5305 params->peer_notif_delay = 0;
5306 params->use_carrier = use_carrier;
5307 params->lacp_fast = lacp_fast;
5308 params->primary[0] = 0;
5309 params->primary_reselect = primary_reselect_value;
5310 params->fail_over_mac = fail_over_mac_value;
5311 params->tx_queues = tx_queues;
5312 params->all_slaves_active = all_slaves_active;
5313 params->resend_igmp = resend_igmp;
5314 params->min_links = min_links;
5315 params->lp_interval = lp_interval;
5316 params->packets_per_slave = packets_per_slave;
5317 params->tlb_dynamic_lb = tlb_dynamic_lb;
5318 params->ad_actor_sys_prio = ad_actor_sys_prio;
5319 eth_zero_addr(params->ad_actor_system);
5320 params->ad_user_port_key = ad_user_port_key;
5321 if (packets_per_slave > 0) {
5322 params->reciprocal_packets_per_slave =
5323 reciprocal_value(packets_per_slave);
5324 } else {
5325 /* reciprocal_packets_per_slave is unused if
5326 * packets_per_slave is 0 or 1, just initialize it
5327 */
5328 params->reciprocal_packets_per_slave =
5329 (struct reciprocal_value) { 0 };
5330 }
5331
5332 if (primary) {
5333 strncpy(params->primary, primary, IFNAMSIZ);
5334 params->primary[IFNAMSIZ - 1] = 0;
5335 }
5336
5337 memcpy(params->arp_targets, arp_target, sizeof(arp_target));
5338
5339 return 0;
5340}
5341
5342/* Called from registration process */
5343static int bond_init(struct net_device *bond_dev)
5344{
5345 struct bonding *bond = netdev_priv(bond_dev);
5346 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
5347
5348 netdev_dbg(bond_dev, "Begin bond_init\n");
5349
5350 bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
5351 if (!bond->wq)
5352 return -ENOMEM;
5353
5354 spin_lock_init(&bond->stats_lock);
5355 netdev_lockdep_set_classes(bond_dev);
5356
5357 list_add_tail(&bond->bond_list, &bn->dev_list);
5358
5359 bond_prepare_sysfs_group(bond);
5360
5361 bond_debug_register(bond);
5362
5363 /* Ensure valid dev_addr */
5364 if (is_zero_ether_addr(bond_dev->dev_addr) &&
5365 bond_dev->addr_assign_type == NET_ADDR_PERM)
5366 eth_hw_addr_random(bond_dev);
5367
5368 return 0;
5369}
5370
5371unsigned int bond_get_num_tx_queues(void)
5372{
5373 return tx_queues;
5374}
5375
5376/* Create a new bond based on the specified name and bonding parameters.
5377 * If name is NULL, obtain a suitable "bond%d" name for us.
5378 * Caller must NOT hold rtnl_lock; we need to release it here before we
5379 * set up our sysfs entries.
5380 */
5381int bond_create(struct net *net, const char *name)
5382{
5383 struct net_device *bond_dev;
5384 struct bonding *bond;
5385 struct alb_bond_info *bond_info;
5386 int res;
5387
5388 rtnl_lock();
5389
5390 bond_dev = alloc_netdev_mq(sizeof(struct bonding),
5391 name ? name : "bond%d", NET_NAME_UNKNOWN,
5392 bond_setup, tx_queues);
5393 if (!bond_dev) {
5394 pr_err("%s: eek! can't alloc netdev!\n", name);
5395 rtnl_unlock();
5396 return -ENOMEM;
5397 }
5398
5399 /*
5400 * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
5401 * It is set to 0 by default which is wrong.
5402 */
5403 bond = netdev_priv(bond_dev);
5404 bond_info = &(BOND_ALB_INFO(bond));
5405 bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
5406
5407 dev_net_set(bond_dev, net);
5408 bond_dev->rtnl_link_ops = &bond_link_ops;
5409
5410 res = register_netdevice(bond_dev);
5411 if (res < 0) {
5412 free_netdev(bond_dev);
5413 rtnl_unlock();
5414
5415 return res;
5416 }
5417
5418 netif_carrier_off(bond_dev);
5419
5420 bond_work_init_all(bond);
5421
5422 rtnl_unlock();
5423 return 0;
5424}
5425
5426static int __net_init bond_net_init(struct net *net)
5427{
5428 struct bond_net *bn = net_generic(net, bond_net_id);
5429
5430 bn->net = net;
5431 INIT_LIST_HEAD(&bn->dev_list);
5432
5433 bond_create_proc_dir(bn);
5434 bond_create_sysfs(bn);
5435
5436 return 0;
5437}
5438
5439static void __net_exit bond_net_exit(struct net *net)
5440{
5441 struct bond_net *bn = net_generic(net, bond_net_id);
5442 struct bonding *bond, *tmp_bond;
5443 LIST_HEAD(list);
5444
5445 bond_destroy_sysfs(bn);
5446
5447 /* Kill off any bonds created after unregistering bond rtnl ops */
5448 rtnl_lock();
5449 list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
5450 unregister_netdevice_queue(bond->dev, &list);
5451 unregister_netdevice_many(&list);
5452 rtnl_unlock();
5453
5454 bond_destroy_proc_dir(bn);
5455}
5456
5457static struct pernet_operations bond_net_ops = {
5458 .init = bond_net_init,
5459 .exit = bond_net_exit,
5460 .id = &bond_net_id,
5461 .size = sizeof(struct bond_net),
5462};
5463
5464static int __init bonding_init(void)
5465{
5466 int i;
5467 int res;
5468
5469 res = bond_check_params(&bonding_defaults);
5470 if (res)
5471 goto out;
5472
5473 res = register_pernet_subsys(&bond_net_ops);
5474 if (res)
5475 goto out;
5476
5477 res = bond_netlink_init();
5478 if (res)
5479 goto err_link;
5480
5481 bond_create_debugfs();
5482
5483 for (i = 0; i < max_bonds; i++) {
5484 res = bond_create(&init_net, NULL);
5485 if (res)
5486 goto err;
5487 }
5488
5489 skb_flow_dissector_init(&flow_keys_bonding,
5490 flow_keys_bonding_keys,
5491 ARRAY_SIZE(flow_keys_bonding_keys));
5492
5493 register_netdevice_notifier(&bond_netdev_notifier);
5494out:
5495 return res;
5496err:
5497 bond_destroy_debugfs();
5498 bond_netlink_fini();
5499err_link:
5500 unregister_pernet_subsys(&bond_net_ops);
5501 goto out;
5502
5503}
5504
5505static void __exit bonding_exit(void)
5506{
5507 unregister_netdevice_notifier(&bond_netdev_notifier);
5508
5509 bond_destroy_debugfs();
5510
5511 bond_netlink_fini();
5512 unregister_pernet_subsys(&bond_net_ops);
5513
5514#ifdef CONFIG_NET_POLL_CONTROLLER
5515 /* Make sure we don't have an imbalance on our netpoll blocking */
5516 WARN_ON(atomic_read(&netpoll_block_tx));
5517#endif
5518}
5519
5520module_init(bonding_init);
5521module_exit(bonding_exit);
5522MODULE_LICENSE("GPL");
5523MODULE_DESCRIPTION(DRV_DESCRIPTION);
5524MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");