Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-1.0+
2/*
3 * originally based on the dummy device.
4 *
5 * Copyright 1999, Thomas Davis, tadavis@lbl.gov.
6 * Based on dummy.c, and eql.c devices.
7 *
8 * bonding.c: an Ethernet Bonding driver
9 *
10 * This is useful to talk to a Cisco EtherChannel compatible equipment:
11 * Cisco 5500
12 * Sun Trunking (Solaris)
13 * Alteon AceDirector Trunks
14 * Linux Bonding
15 * and probably many L2 switches ...
16 *
17 * How it works:
18 * ifconfig bond0 ipaddress netmask up
19 * will setup a network device, with an ip address. No mac address
20 * will be assigned at this time. The hw mac address will come from
21 * the first slave bonded to the channel. All slaves will then use
22 * this hw mac address.
23 *
24 * ifconfig bond0 down
25 * will release all slaves, marking them as down.
26 *
27 * ifenslave bond0 eth0
28 * will attach eth0 to bond0 as a slave. eth0 hw mac address will either
29 * a: be used as initial mac address
30 * b: if a hw mac address already is there, eth0's hw mac address
31 * will then be set from bond0.
32 *
33 */
34
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/types.h>
38#include <linux/fcntl.h>
39#include <linux/filter.h>
40#include <linux/interrupt.h>
41#include <linux/ptrace.h>
42#include <linux/ioport.h>
43#include <linux/in.h>
44#include <net/ip.h>
45#include <linux/ip.h>
46#include <linux/icmp.h>
47#include <linux/icmpv6.h>
48#include <linux/tcp.h>
49#include <linux/udp.h>
50#include <linux/slab.h>
51#include <linux/string.h>
52#include <linux/init.h>
53#include <linux/timer.h>
54#include <linux/socket.h>
55#include <linux/ctype.h>
56#include <linux/inet.h>
57#include <linux/bitops.h>
58#include <linux/io.h>
59#include <asm/dma.h>
60#include <linux/uaccess.h>
61#include <linux/errno.h>
62#include <linux/netdevice.h>
63#include <linux/inetdevice.h>
64#include <linux/igmp.h>
65#include <linux/etherdevice.h>
66#include <linux/skbuff.h>
67#include <net/sock.h>
68#include <linux/rtnetlink.h>
69#include <linux/smp.h>
70#include <linux/if_ether.h>
71#include <net/arp.h>
72#include <linux/mii.h>
73#include <linux/ethtool.h>
74#include <linux/if_vlan.h>
75#include <linux/if_bonding.h>
76#include <linux/phy.h>
77#include <linux/jiffies.h>
78#include <linux/preempt.h>
79#include <net/route.h>
80#include <net/net_namespace.h>
81#include <net/netns/generic.h>
82#include <net/pkt_sched.h>
83#include <linux/rculist.h>
84#include <net/flow_dissector.h>
85#include <net/xfrm.h>
86#include <net/bonding.h>
87#include <net/bond_3ad.h>
88#include <net/bond_alb.h>
89#if IS_ENABLED(CONFIG_TLS_DEVICE)
90#include <net/tls.h>
91#endif
92#include <net/ip6_route.h>
93
94#include "bonding_priv.h"
95
96/*---------------------------- Module parameters ----------------------------*/
97
98/* monitor all links that often (in milliseconds). <=0 disables monitoring */
99
100static int max_bonds = BOND_DEFAULT_MAX_BONDS;
101static int tx_queues = BOND_DEFAULT_TX_QUEUES;
102static int num_peer_notif = 1;
103static int miimon;
104static int updelay;
105static int downdelay;
106static int use_carrier = 1;
107static char *mode;
108static char *primary;
109static char *primary_reselect;
110static char *lacp_rate;
111static int min_links;
112static char *ad_select;
113static char *xmit_hash_policy;
114static int arp_interval;
115static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
116static char *arp_validate;
117static char *arp_all_targets;
118static char *fail_over_mac;
119static int all_slaves_active;
120static struct bond_params bonding_defaults;
121static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
122static int packets_per_slave = 1;
123static int lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
124
125module_param(max_bonds, int, 0);
126MODULE_PARM_DESC(max_bonds, "Max number of bonded devices");
127module_param(tx_queues, int, 0);
128MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)");
129module_param_named(num_grat_arp, num_peer_notif, int, 0644);
130MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on "
131 "failover event (alias of num_unsol_na)");
132module_param_named(num_unsol_na, num_peer_notif, int, 0644);
133MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on "
134 "failover event (alias of num_grat_arp)");
135module_param(miimon, int, 0);
136MODULE_PARM_DESC(miimon, "Link check interval in milliseconds");
137module_param(updelay, int, 0);
138MODULE_PARM_DESC(updelay, "Delay before considering link up, in milliseconds");
139module_param(downdelay, int, 0);
140MODULE_PARM_DESC(downdelay, "Delay before considering link down, "
141 "in milliseconds");
142module_param(use_carrier, int, 0);
143MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; "
144 "0 for off, 1 for on (default)");
145module_param(mode, charp, 0);
146MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, "
147 "1 for active-backup, 2 for balance-xor, "
148 "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, "
149 "6 for balance-alb");
150module_param(primary, charp, 0);
151MODULE_PARM_DESC(primary, "Primary network device to use");
152module_param(primary_reselect, charp, 0);
153MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
154 "once it comes up; "
155 "0 for always (default), "
156 "1 for only if speed of primary is "
157 "better, "
158 "2 for only on active slave "
159 "failure");
160module_param(lacp_rate, charp, 0);
161MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; "
162 "0 for slow, 1 for fast");
163module_param(ad_select, charp, 0);
164MODULE_PARM_DESC(ad_select, "802.3ad aggregation selection logic; "
165 "0 for stable (default), 1 for bandwidth, "
166 "2 for count");
167module_param(min_links, int, 0);
168MODULE_PARM_DESC(min_links, "Minimum number of available links before turning on carrier");
169
170module_param(xmit_hash_policy, charp, 0);
171MODULE_PARM_DESC(xmit_hash_policy, "balance-alb, balance-tlb, balance-xor, 802.3ad hashing method; "
172 "0 for layer 2 (default), 1 for layer 3+4, "
173 "2 for layer 2+3, 3 for encap layer 2+3, "
174 "4 for encap layer 3+4, 5 for vlan+srcmac");
175module_param(arp_interval, int, 0);
176MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds");
177module_param_array(arp_ip_target, charp, NULL, 0);
178MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form");
179module_param(arp_validate, charp, 0);
180MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; "
181 "0 for none (default), 1 for active, "
182 "2 for backup, 3 for all");
183module_param(arp_all_targets, charp, 0);
184MODULE_PARM_DESC(arp_all_targets, "fail on any/all arp targets timeout; 0 for any (default), 1 for all");
185module_param(fail_over_mac, charp, 0);
186MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to "
187 "the same MAC; 0 for none (default), "
188 "1 for active, 2 for follow");
189module_param(all_slaves_active, int, 0);
190MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface "
191 "by setting active flag for all slaves; "
192 "0 for never (default), 1 for always.");
193module_param(resend_igmp, int, 0);
194MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on "
195 "link failure");
196module_param(packets_per_slave, int, 0);
197MODULE_PARM_DESC(packets_per_slave, "Packets to send per slave in balance-rr "
198 "mode; 0 for a random slave, 1 packet per "
199 "slave (default), >1 packets per slave.");
200module_param(lp_interval, uint, 0);
201MODULE_PARM_DESC(lp_interval, "The number of seconds between instances where "
202 "the bonding driver sends learning packets to "
203 "each slaves peer switch. The default is 1.");
204
205/*----------------------------- Global variables ----------------------------*/
206
207#ifdef CONFIG_NET_POLL_CONTROLLER
208atomic_t netpoll_block_tx = ATOMIC_INIT(0);
209#endif
210
211unsigned int bond_net_id __read_mostly;
212
213static const struct flow_dissector_key flow_keys_bonding_keys[] = {
214 {
215 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
216 .offset = offsetof(struct flow_keys, control),
217 },
218 {
219 .key_id = FLOW_DISSECTOR_KEY_BASIC,
220 .offset = offsetof(struct flow_keys, basic),
221 },
222 {
223 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
224 .offset = offsetof(struct flow_keys, addrs.v4addrs),
225 },
226 {
227 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
228 .offset = offsetof(struct flow_keys, addrs.v6addrs),
229 },
230 {
231 .key_id = FLOW_DISSECTOR_KEY_TIPC,
232 .offset = offsetof(struct flow_keys, addrs.tipckey),
233 },
234 {
235 .key_id = FLOW_DISSECTOR_KEY_PORTS,
236 .offset = offsetof(struct flow_keys, ports),
237 },
238 {
239 .key_id = FLOW_DISSECTOR_KEY_ICMP,
240 .offset = offsetof(struct flow_keys, icmp),
241 },
242 {
243 .key_id = FLOW_DISSECTOR_KEY_VLAN,
244 .offset = offsetof(struct flow_keys, vlan),
245 },
246 {
247 .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL,
248 .offset = offsetof(struct flow_keys, tags),
249 },
250 {
251 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
252 .offset = offsetof(struct flow_keys, keyid),
253 },
254};
255
256static struct flow_dissector flow_keys_bonding __read_mostly;
257
258/*-------------------------- Forward declarations ---------------------------*/
259
260static int bond_init(struct net_device *bond_dev);
261static void bond_uninit(struct net_device *bond_dev);
262static void bond_get_stats(struct net_device *bond_dev,
263 struct rtnl_link_stats64 *stats);
264static void bond_slave_arr_handler(struct work_struct *work);
265static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
266 int mod);
267static void bond_netdev_notify_work(struct work_struct *work);
268
269/*---------------------------- General routines -----------------------------*/
270
271const char *bond_mode_name(int mode)
272{
273 static const char *names[] = {
274 [BOND_MODE_ROUNDROBIN] = "load balancing (round-robin)",
275 [BOND_MODE_ACTIVEBACKUP] = "fault-tolerance (active-backup)",
276 [BOND_MODE_XOR] = "load balancing (xor)",
277 [BOND_MODE_BROADCAST] = "fault-tolerance (broadcast)",
278 [BOND_MODE_8023AD] = "IEEE 802.3ad Dynamic link aggregation",
279 [BOND_MODE_TLB] = "transmit load balancing",
280 [BOND_MODE_ALB] = "adaptive load balancing",
281 };
282
283 if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
284 return "unknown";
285
286 return names[mode];
287}
288
289/**
290 * bond_dev_queue_xmit - Prepare skb for xmit.
291 *
292 * @bond: bond device that got this skb for tx.
293 * @skb: hw accel VLAN tagged skb to transmit
294 * @slave_dev: slave that is supposed to xmit this skbuff
295 */
296netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
297 struct net_device *slave_dev)
298{
299 skb->dev = slave_dev;
300
301 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
302 sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
303 skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
304
305 if (unlikely(netpoll_tx_running(bond->dev)))
306 return bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
307
308 return dev_queue_xmit(skb);
309}
310
311static bool bond_sk_check(struct bonding *bond)
312{
313 switch (BOND_MODE(bond)) {
314 case BOND_MODE_8023AD:
315 case BOND_MODE_XOR:
316 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34)
317 return true;
318 fallthrough;
319 default:
320 return false;
321 }
322}
323
324static bool bond_xdp_check(struct bonding *bond)
325{
326 switch (BOND_MODE(bond)) {
327 case BOND_MODE_ROUNDROBIN:
328 case BOND_MODE_ACTIVEBACKUP:
329 return true;
330 case BOND_MODE_8023AD:
331 case BOND_MODE_XOR:
332 /* vlan+srcmac is not supported with XDP as in most cases the 802.1q
333 * payload is not in the packet due to hardware offload.
334 */
335 if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
336 return true;
337 fallthrough;
338 default:
339 return false;
340 }
341}
342
343/*---------------------------------- VLAN -----------------------------------*/
344
345/* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
346 * We don't protect the slave list iteration with a lock because:
347 * a. This operation is performed in IOCTL context,
348 * b. The operation is protected by the RTNL semaphore in the 8021q code,
349 * c. Holding a lock with BH disabled while directly calling a base driver
350 * entry point is generally a BAD idea.
351 *
352 * The design of synchronization/protection for this operation in the 8021q
353 * module is good for one or more VLAN devices over a single physical device
354 * and cannot be extended for a teaming solution like bonding, so there is a
355 * potential race condition here where a net device from the vlan group might
356 * be referenced (either by a base driver or the 8021q code) while it is being
357 * removed from the system. However, it turns out we're not making matters
358 * worse, and if it works for regular VLAN usage it will work here too.
359*/
360
361/**
362 * bond_vlan_rx_add_vid - Propagates adding an id to slaves
363 * @bond_dev: bonding net device that got called
364 * @proto: network protocol ID
365 * @vid: vlan id being added
366 */
367static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
368 __be16 proto, u16 vid)
369{
370 struct bonding *bond = netdev_priv(bond_dev);
371 struct slave *slave, *rollback_slave;
372 struct list_head *iter;
373 int res;
374
375 bond_for_each_slave(bond, slave, iter) {
376 res = vlan_vid_add(slave->dev, proto, vid);
377 if (res)
378 goto unwind;
379 }
380
381 return 0;
382
383unwind:
384 /* unwind to the slave that failed */
385 bond_for_each_slave(bond, rollback_slave, iter) {
386 if (rollback_slave == slave)
387 break;
388
389 vlan_vid_del(rollback_slave->dev, proto, vid);
390 }
391
392 return res;
393}
394
395/**
396 * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves
397 * @bond_dev: bonding net device that got called
398 * @proto: network protocol ID
399 * @vid: vlan id being removed
400 */
401static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
402 __be16 proto, u16 vid)
403{
404 struct bonding *bond = netdev_priv(bond_dev);
405 struct list_head *iter;
406 struct slave *slave;
407
408 bond_for_each_slave(bond, slave, iter)
409 vlan_vid_del(slave->dev, proto, vid);
410
411 if (bond_is_lb(bond))
412 bond_alb_clear_vlan(bond, vid);
413
414 return 0;
415}
416
417/*---------------------------------- XFRM -----------------------------------*/
418
419#ifdef CONFIG_XFRM_OFFLOAD
420/**
421 * bond_ipsec_add_sa - program device with a security association
422 * @xs: pointer to transformer state struct
423 * @extack: extack point to fill failure reason
424 **/
425static int bond_ipsec_add_sa(struct xfrm_state *xs,
426 struct netlink_ext_ack *extack)
427{
428 struct net_device *bond_dev = xs->xso.dev;
429 struct bond_ipsec *ipsec;
430 struct bonding *bond;
431 struct slave *slave;
432 int err;
433
434 if (!bond_dev)
435 return -EINVAL;
436
437 rcu_read_lock();
438 bond = netdev_priv(bond_dev);
439 slave = rcu_dereference(bond->curr_active_slave);
440 if (!slave) {
441 rcu_read_unlock();
442 return -ENODEV;
443 }
444
445 if (!slave->dev->xfrmdev_ops ||
446 !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
447 netif_is_bond_master(slave->dev)) {
448 NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
449 rcu_read_unlock();
450 return -EINVAL;
451 }
452
453 ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
454 if (!ipsec) {
455 rcu_read_unlock();
456 return -ENOMEM;
457 }
458 xs->xso.real_dev = slave->dev;
459
460 err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
461 if (!err) {
462 ipsec->xs = xs;
463 INIT_LIST_HEAD(&ipsec->list);
464 spin_lock_bh(&bond->ipsec_lock);
465 list_add(&ipsec->list, &bond->ipsec_list);
466 spin_unlock_bh(&bond->ipsec_lock);
467 } else {
468 kfree(ipsec);
469 }
470 rcu_read_unlock();
471 return err;
472}
473
474static void bond_ipsec_add_sa_all(struct bonding *bond)
475{
476 struct net_device *bond_dev = bond->dev;
477 struct bond_ipsec *ipsec;
478 struct slave *slave;
479
480 rcu_read_lock();
481 slave = rcu_dereference(bond->curr_active_slave);
482 if (!slave)
483 goto out;
484
485 if (!slave->dev->xfrmdev_ops ||
486 !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
487 netif_is_bond_master(slave->dev)) {
488 spin_lock_bh(&bond->ipsec_lock);
489 if (!list_empty(&bond->ipsec_list))
490 slave_warn(bond_dev, slave->dev,
491 "%s: no slave xdo_dev_state_add\n",
492 __func__);
493 spin_unlock_bh(&bond->ipsec_lock);
494 goto out;
495 }
496
497 spin_lock_bh(&bond->ipsec_lock);
498 list_for_each_entry(ipsec, &bond->ipsec_list, list) {
499 ipsec->xs->xso.real_dev = slave->dev;
500 if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
501 slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
502 ipsec->xs->xso.real_dev = NULL;
503 }
504 }
505 spin_unlock_bh(&bond->ipsec_lock);
506out:
507 rcu_read_unlock();
508}
509
510/**
511 * bond_ipsec_del_sa - clear out this specific SA
512 * @xs: pointer to transformer state struct
513 **/
514static void bond_ipsec_del_sa(struct xfrm_state *xs)
515{
516 struct net_device *bond_dev = xs->xso.dev;
517 struct bond_ipsec *ipsec;
518 struct bonding *bond;
519 struct slave *slave;
520
521 if (!bond_dev)
522 return;
523
524 rcu_read_lock();
525 bond = netdev_priv(bond_dev);
526 slave = rcu_dereference(bond->curr_active_slave);
527
528 if (!slave)
529 goto out;
530
531 if (!xs->xso.real_dev)
532 goto out;
533
534 WARN_ON(xs->xso.real_dev != slave->dev);
535
536 if (!slave->dev->xfrmdev_ops ||
537 !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
538 netif_is_bond_master(slave->dev)) {
539 slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
540 goto out;
541 }
542
543 slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
544out:
545 spin_lock_bh(&bond->ipsec_lock);
546 list_for_each_entry(ipsec, &bond->ipsec_list, list) {
547 if (ipsec->xs == xs) {
548 list_del(&ipsec->list);
549 kfree(ipsec);
550 break;
551 }
552 }
553 spin_unlock_bh(&bond->ipsec_lock);
554 rcu_read_unlock();
555}
556
557static void bond_ipsec_del_sa_all(struct bonding *bond)
558{
559 struct net_device *bond_dev = bond->dev;
560 struct bond_ipsec *ipsec;
561 struct slave *slave;
562
563 rcu_read_lock();
564 slave = rcu_dereference(bond->curr_active_slave);
565 if (!slave) {
566 rcu_read_unlock();
567 return;
568 }
569
570 spin_lock_bh(&bond->ipsec_lock);
571 list_for_each_entry(ipsec, &bond->ipsec_list, list) {
572 if (!ipsec->xs->xso.real_dev)
573 continue;
574
575 if (!slave->dev->xfrmdev_ops ||
576 !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
577 netif_is_bond_master(slave->dev)) {
578 slave_warn(bond_dev, slave->dev,
579 "%s: no slave xdo_dev_state_delete\n",
580 __func__);
581 } else {
582 slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
583 }
584 ipsec->xs->xso.real_dev = NULL;
585 }
586 spin_unlock_bh(&bond->ipsec_lock);
587 rcu_read_unlock();
588}
589
590/**
591 * bond_ipsec_offload_ok - can this packet use the xfrm hw offload
592 * @skb: current data packet
593 * @xs: pointer to transformer state struct
594 **/
595static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
596{
597 struct net_device *bond_dev = xs->xso.dev;
598 struct net_device *real_dev;
599 struct slave *curr_active;
600 struct bonding *bond;
601 int err;
602
603 bond = netdev_priv(bond_dev);
604 rcu_read_lock();
605 curr_active = rcu_dereference(bond->curr_active_slave);
606 real_dev = curr_active->dev;
607
608 if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
609 err = false;
610 goto out;
611 }
612
613 if (!xs->xso.real_dev) {
614 err = false;
615 goto out;
616 }
617
618 if (!real_dev->xfrmdev_ops ||
619 !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
620 netif_is_bond_master(real_dev)) {
621 err = false;
622 goto out;
623 }
624
625 err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
626out:
627 rcu_read_unlock();
628 return err;
629}
630
631static const struct xfrmdev_ops bond_xfrmdev_ops = {
632 .xdo_dev_state_add = bond_ipsec_add_sa,
633 .xdo_dev_state_delete = bond_ipsec_del_sa,
634 .xdo_dev_offload_ok = bond_ipsec_offload_ok,
635};
636#endif /* CONFIG_XFRM_OFFLOAD */
637
638/*------------------------------- Link status -------------------------------*/
639
640/* Set the carrier state for the master according to the state of its
641 * slaves. If any slaves are up, the master is up. In 802.3ad mode,
642 * do special 802.3ad magic.
643 *
644 * Returns zero if carrier state does not change, nonzero if it does.
645 */
646int bond_set_carrier(struct bonding *bond)
647{
648 struct list_head *iter;
649 struct slave *slave;
650
651 if (!bond_has_slaves(bond))
652 goto down;
653
654 if (BOND_MODE(bond) == BOND_MODE_8023AD)
655 return bond_3ad_set_carrier(bond);
656
657 bond_for_each_slave(bond, slave, iter) {
658 if (slave->link == BOND_LINK_UP) {
659 if (!netif_carrier_ok(bond->dev)) {
660 netif_carrier_on(bond->dev);
661 return 1;
662 }
663 return 0;
664 }
665 }
666
667down:
668 if (netif_carrier_ok(bond->dev)) {
669 netif_carrier_off(bond->dev);
670 return 1;
671 }
672 return 0;
673}
674
675/* Get link speed and duplex from the slave's base driver
676 * using ethtool. If for some reason the call fails or the
677 * values are invalid, set speed and duplex to -1,
678 * and return. Return 1 if speed or duplex settings are
679 * UNKNOWN; 0 otherwise.
680 */
681static int bond_update_speed_duplex(struct slave *slave)
682{
683 struct net_device *slave_dev = slave->dev;
684 struct ethtool_link_ksettings ecmd;
685 int res;
686
687 slave->speed = SPEED_UNKNOWN;
688 slave->duplex = DUPLEX_UNKNOWN;
689
690 res = __ethtool_get_link_ksettings(slave_dev, &ecmd);
691 if (res < 0)
692 return 1;
693 if (ecmd.base.speed == 0 || ecmd.base.speed == ((__u32)-1))
694 return 1;
695 switch (ecmd.base.duplex) {
696 case DUPLEX_FULL:
697 case DUPLEX_HALF:
698 break;
699 default:
700 return 1;
701 }
702
703 slave->speed = ecmd.base.speed;
704 slave->duplex = ecmd.base.duplex;
705
706 return 0;
707}
708
709const char *bond_slave_link_status(s8 link)
710{
711 switch (link) {
712 case BOND_LINK_UP:
713 return "up";
714 case BOND_LINK_FAIL:
715 return "going down";
716 case BOND_LINK_DOWN:
717 return "down";
718 case BOND_LINK_BACK:
719 return "going back";
720 default:
721 return "unknown";
722 }
723}
724
725/* if <dev> supports MII link status reporting, check its link status.
726 *
727 * We either do MII/ETHTOOL ioctls, or check netif_carrier_ok(),
728 * depending upon the setting of the use_carrier parameter.
729 *
730 * Return either BMSR_LSTATUS, meaning that the link is up (or we
731 * can't tell and just pretend it is), or 0, meaning that the link is
732 * down.
733 *
734 * If reporting is non-zero, instead of faking link up, return -1 if
735 * both ETHTOOL and MII ioctls fail (meaning the device does not
736 * support them). If use_carrier is set, return whatever it says.
737 * It'd be nice if there was a good way to tell if a driver supports
738 * netif_carrier, but there really isn't.
739 */
740static int bond_check_dev_link(struct bonding *bond,
741 struct net_device *slave_dev, int reporting)
742{
743 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
744 int (*ioctl)(struct net_device *, struct ifreq *, int);
745 struct ifreq ifr;
746 struct mii_ioctl_data *mii;
747
748 if (!reporting && !netif_running(slave_dev))
749 return 0;
750
751 if (bond->params.use_carrier)
752 return netif_carrier_ok(slave_dev) ? BMSR_LSTATUS : 0;
753
754 /* Try to get link status using Ethtool first. */
755 if (slave_dev->ethtool_ops->get_link)
756 return slave_dev->ethtool_ops->get_link(slave_dev) ?
757 BMSR_LSTATUS : 0;
758
759 /* Ethtool can't be used, fallback to MII ioctls. */
760 ioctl = slave_ops->ndo_eth_ioctl;
761 if (ioctl) {
762 /* TODO: set pointer to correct ioctl on a per team member
763 * bases to make this more efficient. that is, once
764 * we determine the correct ioctl, we will always
765 * call it and not the others for that team
766 * member.
767 */
768
769 /* We cannot assume that SIOCGMIIPHY will also read a
770 * register; not all network drivers (e.g., e100)
771 * support that.
772 */
773
774 /* Yes, the mii is overlaid on the ifreq.ifr_ifru */
775 strscpy_pad(ifr.ifr_name, slave_dev->name, IFNAMSIZ);
776 mii = if_mii(&ifr);
777 if (ioctl(slave_dev, &ifr, SIOCGMIIPHY) == 0) {
778 mii->reg_num = MII_BMSR;
779 if (ioctl(slave_dev, &ifr, SIOCGMIIREG) == 0)
780 return mii->val_out & BMSR_LSTATUS;
781 }
782 }
783
784 /* If reporting, report that either there's no ndo_eth_ioctl,
785 * or both SIOCGMIIREG and get_link failed (meaning that we
786 * cannot report link status). If not reporting, pretend
787 * we're ok.
788 */
789 return reporting ? -1 : BMSR_LSTATUS;
790}
791
792/*----------------------------- Multicast list ------------------------------*/
793
794/* Push the promiscuity flag down to appropriate slaves */
795static int bond_set_promiscuity(struct bonding *bond, int inc)
796{
797 struct list_head *iter;
798 int err = 0;
799
800 if (bond_uses_primary(bond)) {
801 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
802
803 if (curr_active)
804 err = dev_set_promiscuity(curr_active->dev, inc);
805 } else {
806 struct slave *slave;
807
808 bond_for_each_slave(bond, slave, iter) {
809 err = dev_set_promiscuity(slave->dev, inc);
810 if (err)
811 return err;
812 }
813 }
814 return err;
815}
816
817/* Push the allmulti flag down to all slaves */
818static int bond_set_allmulti(struct bonding *bond, int inc)
819{
820 struct list_head *iter;
821 int err = 0;
822
823 if (bond_uses_primary(bond)) {
824 struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
825
826 if (curr_active)
827 err = dev_set_allmulti(curr_active->dev, inc);
828 } else {
829 struct slave *slave;
830
831 bond_for_each_slave(bond, slave, iter) {
832 err = dev_set_allmulti(slave->dev, inc);
833 if (err)
834 return err;
835 }
836 }
837 return err;
838}
839
840/* Retrieve the list of registered multicast addresses for the bonding
841 * device and retransmit an IGMP JOIN request to the current active
842 * slave.
843 */
844static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
845{
846 struct bonding *bond = container_of(work, struct bonding,
847 mcast_work.work);
848
849 if (!rtnl_trylock()) {
850 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
851 return;
852 }
853 call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
854
855 if (bond->igmp_retrans > 1) {
856 bond->igmp_retrans--;
857 queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
858 }
859 rtnl_unlock();
860}
861
862/* Flush bond's hardware addresses from slave */
863static void bond_hw_addr_flush(struct net_device *bond_dev,
864 struct net_device *slave_dev)
865{
866 struct bonding *bond = netdev_priv(bond_dev);
867
868 dev_uc_unsync(slave_dev, bond_dev);
869 dev_mc_unsync(slave_dev, bond_dev);
870
871 if (BOND_MODE(bond) == BOND_MODE_8023AD)
872 dev_mc_del(slave_dev, lacpdu_mcast_addr);
873}
874
875/*--------------------------- Active slave change ---------------------------*/
876
877/* Update the hardware address list and promisc/allmulti for the new and
878 * old active slaves (if any). Modes that are not using primary keep all
879 * slaves up date at all times; only the modes that use primary need to call
880 * this function to swap these settings during a failover.
881 */
882static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
883 struct slave *old_active)
884{
885 if (old_active) {
886 if (bond->dev->flags & IFF_PROMISC)
887 dev_set_promiscuity(old_active->dev, -1);
888
889 if (bond->dev->flags & IFF_ALLMULTI)
890 dev_set_allmulti(old_active->dev, -1);
891
892 if (bond->dev->flags & IFF_UP)
893 bond_hw_addr_flush(bond->dev, old_active->dev);
894 }
895
896 if (new_active) {
897 /* FIXME: Signal errors upstream. */
898 if (bond->dev->flags & IFF_PROMISC)
899 dev_set_promiscuity(new_active->dev, 1);
900
901 if (bond->dev->flags & IFF_ALLMULTI)
902 dev_set_allmulti(new_active->dev, 1);
903
904 if (bond->dev->flags & IFF_UP) {
905 netif_addr_lock_bh(bond->dev);
906 dev_uc_sync(new_active->dev, bond->dev);
907 dev_mc_sync(new_active->dev, bond->dev);
908 netif_addr_unlock_bh(bond->dev);
909 }
910 }
911}
912
913/**
914 * bond_set_dev_addr - clone slave's address to bond
915 * @bond_dev: bond net device
916 * @slave_dev: slave net device
917 *
918 * Should be called with RTNL held.
919 */
920static int bond_set_dev_addr(struct net_device *bond_dev,
921 struct net_device *slave_dev)
922{
923 int err;
924
925 slave_dbg(bond_dev, slave_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
926 bond_dev, slave_dev, slave_dev->addr_len);
927 err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL);
928 if (err)
929 return err;
930
931 __dev_addr_set(bond_dev, slave_dev->dev_addr, slave_dev->addr_len);
932 bond_dev->addr_assign_type = NET_ADDR_STOLEN;
933 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
934 return 0;
935}
936
937static struct slave *bond_get_old_active(struct bonding *bond,
938 struct slave *new_active)
939{
940 struct slave *slave;
941 struct list_head *iter;
942
943 bond_for_each_slave(bond, slave, iter) {
944 if (slave == new_active)
945 continue;
946
947 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
948 return slave;
949 }
950
951 return NULL;
952}
953
954/* bond_do_fail_over_mac
955 *
956 * Perform special MAC address swapping for fail_over_mac settings
957 *
958 * Called with RTNL
959 */
960static void bond_do_fail_over_mac(struct bonding *bond,
961 struct slave *new_active,
962 struct slave *old_active)
963{
964 u8 tmp_mac[MAX_ADDR_LEN];
965 struct sockaddr_storage ss;
966 int rv;
967
968 switch (bond->params.fail_over_mac) {
969 case BOND_FOM_ACTIVE:
970 if (new_active) {
971 rv = bond_set_dev_addr(bond->dev, new_active->dev);
972 if (rv)
973 slave_err(bond->dev, new_active->dev, "Error %d setting bond MAC from slave\n",
974 -rv);
975 }
976 break;
977 case BOND_FOM_FOLLOW:
978 /* if new_active && old_active, swap them
979 * if just old_active, do nothing (going to no active slave)
980 * if just new_active, set new_active to bond's MAC
981 */
982 if (!new_active)
983 return;
984
985 if (!old_active)
986 old_active = bond_get_old_active(bond, new_active);
987
988 if (old_active) {
989 bond_hw_addr_copy(tmp_mac, new_active->dev->dev_addr,
990 new_active->dev->addr_len);
991 bond_hw_addr_copy(ss.__data,
992 old_active->dev->dev_addr,
993 old_active->dev->addr_len);
994 ss.ss_family = new_active->dev->type;
995 } else {
996 bond_hw_addr_copy(ss.__data, bond->dev->dev_addr,
997 bond->dev->addr_len);
998 ss.ss_family = bond->dev->type;
999 }
1000
1001 rv = dev_set_mac_address(new_active->dev,
1002 (struct sockaddr *)&ss, NULL);
1003 if (rv) {
1004 slave_err(bond->dev, new_active->dev, "Error %d setting MAC of new active slave\n",
1005 -rv);
1006 goto out;
1007 }
1008
1009 if (!old_active)
1010 goto out;
1011
1012 bond_hw_addr_copy(ss.__data, tmp_mac,
1013 new_active->dev->addr_len);
1014 ss.ss_family = old_active->dev->type;
1015
1016 rv = dev_set_mac_address(old_active->dev,
1017 (struct sockaddr *)&ss, NULL);
1018 if (rv)
1019 slave_err(bond->dev, old_active->dev, "Error %d setting MAC of old active slave\n",
1020 -rv);
1021out:
1022 break;
1023 default:
1024 netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
1025 bond->params.fail_over_mac);
1026 break;
1027 }
1028
1029}
1030
1031/**
1032 * bond_choose_primary_or_current - select the primary or high priority slave
1033 * @bond: our bonding struct
1034 *
1035 * - Check if there is a primary link. If the primary link was set and is up,
1036 * go on and do link reselection.
1037 *
1038 * - If primary link is not set or down, find the highest priority link.
1039 * If the highest priority link is not current slave, set it as primary
1040 * link and do link reselection.
1041 */
1042static struct slave *bond_choose_primary_or_current(struct bonding *bond)
1043{
1044 struct slave *prim = rtnl_dereference(bond->primary_slave);
1045 struct slave *curr = rtnl_dereference(bond->curr_active_slave);
1046 struct slave *slave, *hprio = NULL;
1047 struct list_head *iter;
1048
1049 if (!prim || prim->link != BOND_LINK_UP) {
1050 bond_for_each_slave(bond, slave, iter) {
1051 if (slave->link == BOND_LINK_UP) {
1052 hprio = hprio ?: slave;
1053 if (slave->prio > hprio->prio)
1054 hprio = slave;
1055 }
1056 }
1057
1058 if (hprio && hprio != curr) {
1059 prim = hprio;
1060 goto link_reselect;
1061 }
1062
1063 if (!curr || curr->link != BOND_LINK_UP)
1064 return NULL;
1065 return curr;
1066 }
1067
1068 if (bond->force_primary) {
1069 bond->force_primary = false;
1070 return prim;
1071 }
1072
1073link_reselect:
1074 if (!curr || curr->link != BOND_LINK_UP)
1075 return prim;
1076
1077 /* At this point, prim and curr are both up */
1078 switch (bond->params.primary_reselect) {
1079 case BOND_PRI_RESELECT_ALWAYS:
1080 return prim;
1081 case BOND_PRI_RESELECT_BETTER:
1082 if (prim->speed < curr->speed)
1083 return curr;
1084 if (prim->speed == curr->speed && prim->duplex <= curr->duplex)
1085 return curr;
1086 return prim;
1087 case BOND_PRI_RESELECT_FAILURE:
1088 return curr;
1089 default:
1090 netdev_err(bond->dev, "impossible primary_reselect %d\n",
1091 bond->params.primary_reselect);
1092 return curr;
1093 }
1094}
1095
1096/**
1097 * bond_find_best_slave - select the best available slave to be the active one
1098 * @bond: our bonding struct
1099 */
1100static struct slave *bond_find_best_slave(struct bonding *bond)
1101{
1102 struct slave *slave, *bestslave = NULL;
1103 struct list_head *iter;
1104 int mintime = bond->params.updelay;
1105
1106 slave = bond_choose_primary_or_current(bond);
1107 if (slave)
1108 return slave;
1109
1110 bond_for_each_slave(bond, slave, iter) {
1111 if (slave->link == BOND_LINK_UP)
1112 return slave;
1113 if (slave->link == BOND_LINK_BACK && bond_slave_is_up(slave) &&
1114 slave->delay < mintime) {
1115 mintime = slave->delay;
1116 bestslave = slave;
1117 }
1118 }
1119
1120 return bestslave;
1121}
1122
1123static bool bond_should_notify_peers(struct bonding *bond)
1124{
1125 struct slave *slave;
1126
1127 rcu_read_lock();
1128 slave = rcu_dereference(bond->curr_active_slave);
1129 rcu_read_unlock();
1130
1131 if (!slave || !bond->send_peer_notif ||
1132 bond->send_peer_notif %
1133 max(1, bond->params.peer_notif_delay) != 0 ||
1134 !netif_carrier_ok(bond->dev) ||
1135 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
1136 return false;
1137
1138 netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
1139 slave ? slave->dev->name : "NULL");
1140
1141 return true;
1142}
1143
1144/**
1145 * bond_change_active_slave - change the active slave into the specified one
1146 * @bond: our bonding struct
1147 * @new_active: the new slave to make the active one
1148 *
1149 * Set the new slave to the bond's settings and unset them on the old
1150 * curr_active_slave.
1151 * Setting include flags, mc-list, promiscuity, allmulti, etc.
1152 *
1153 * If @new's link state is %BOND_LINK_BACK we'll set it to %BOND_LINK_UP,
1154 * because it is apparently the best available slave we have, even though its
1155 * updelay hasn't timed out yet.
1156 *
1157 * Caller must hold RTNL.
1158 */
1159void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1160{
1161 struct slave *old_active;
1162
1163 ASSERT_RTNL();
1164
1165 old_active = rtnl_dereference(bond->curr_active_slave);
1166
1167 if (old_active == new_active)
1168 return;
1169
1170#ifdef CONFIG_XFRM_OFFLOAD
1171 bond_ipsec_del_sa_all(bond);
1172#endif /* CONFIG_XFRM_OFFLOAD */
1173
1174 if (new_active) {
1175 new_active->last_link_up = jiffies;
1176
1177 if (new_active->link == BOND_LINK_BACK) {
1178 if (bond_uses_primary(bond)) {
1179 slave_info(bond->dev, new_active->dev, "making interface the new active one %d ms earlier\n",
1180 (bond->params.updelay - new_active->delay) * bond->params.miimon);
1181 }
1182
1183 new_active->delay = 0;
1184 bond_set_slave_link_state(new_active, BOND_LINK_UP,
1185 BOND_SLAVE_NOTIFY_NOW);
1186
1187 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1188 bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
1189
1190 if (bond_is_lb(bond))
1191 bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
1192 } else {
1193 if (bond_uses_primary(bond))
1194 slave_info(bond->dev, new_active->dev, "making interface the new active one\n");
1195 }
1196 }
1197
1198 if (bond_uses_primary(bond))
1199 bond_hw_addr_swap(bond, new_active, old_active);
1200
1201 if (bond_is_lb(bond)) {
1202 bond_alb_handle_active_change(bond, new_active);
1203 if (old_active)
1204 bond_set_slave_inactive_flags(old_active,
1205 BOND_SLAVE_NOTIFY_NOW);
1206 if (new_active)
1207 bond_set_slave_active_flags(new_active,
1208 BOND_SLAVE_NOTIFY_NOW);
1209 } else {
1210 rcu_assign_pointer(bond->curr_active_slave, new_active);
1211 }
1212
1213 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
1214 if (old_active)
1215 bond_set_slave_inactive_flags(old_active,
1216 BOND_SLAVE_NOTIFY_NOW);
1217
1218 if (new_active) {
1219 bool should_notify_peers = false;
1220
1221 bond_set_slave_active_flags(new_active,
1222 BOND_SLAVE_NOTIFY_NOW);
1223
1224 if (bond->params.fail_over_mac)
1225 bond_do_fail_over_mac(bond, new_active,
1226 old_active);
1227
1228 if (netif_running(bond->dev)) {
1229 bond->send_peer_notif =
1230 bond->params.num_peer_notif *
1231 max(1, bond->params.peer_notif_delay);
1232 should_notify_peers =
1233 bond_should_notify_peers(bond);
1234 }
1235
1236 call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
1237 if (should_notify_peers) {
1238 bond->send_peer_notif--;
1239 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
1240 bond->dev);
1241 }
1242 }
1243 }
1244
1245#ifdef CONFIG_XFRM_OFFLOAD
1246 bond_ipsec_add_sa_all(bond);
1247#endif /* CONFIG_XFRM_OFFLOAD */
1248
1249 /* resend IGMP joins since active slave has changed or
1250 * all were sent on curr_active_slave.
1251 * resend only if bond is brought up with the affected
1252 * bonding modes and the retransmission is enabled
1253 */
1254 if (netif_running(bond->dev) && (bond->params.resend_igmp > 0) &&
1255 ((bond_uses_primary(bond) && new_active) ||
1256 BOND_MODE(bond) == BOND_MODE_ROUNDROBIN)) {
1257 bond->igmp_retrans = bond->params.resend_igmp;
1258 queue_delayed_work(bond->wq, &bond->mcast_work, 1);
1259 }
1260}
1261
1262/**
1263 * bond_select_active_slave - select a new active slave, if needed
1264 * @bond: our bonding struct
1265 *
1266 * This functions should be called when one of the following occurs:
1267 * - The old curr_active_slave has been released or lost its link.
1268 * - The primary_slave has got its link back.
1269 * - A slave has got its link back and there's no old curr_active_slave.
1270 *
1271 * Caller must hold RTNL.
1272 */
1273void bond_select_active_slave(struct bonding *bond)
1274{
1275 struct slave *best_slave;
1276 int rv;
1277
1278 ASSERT_RTNL();
1279
1280 best_slave = bond_find_best_slave(bond);
1281 if (best_slave != rtnl_dereference(bond->curr_active_slave)) {
1282 bond_change_active_slave(bond, best_slave);
1283 rv = bond_set_carrier(bond);
1284 if (!rv)
1285 return;
1286
1287 if (netif_carrier_ok(bond->dev))
1288 netdev_info(bond->dev, "active interface up!\n");
1289 else
1290 netdev_info(bond->dev, "now running without any active interface!\n");
1291 }
1292}
1293
1294#ifdef CONFIG_NET_POLL_CONTROLLER
1295static inline int slave_enable_netpoll(struct slave *slave)
1296{
1297 struct netpoll *np;
1298 int err = 0;
1299
1300 np = kzalloc(sizeof(*np), GFP_KERNEL);
1301 err = -ENOMEM;
1302 if (!np)
1303 goto out;
1304
1305 err = __netpoll_setup(np, slave->dev);
1306 if (err) {
1307 kfree(np);
1308 goto out;
1309 }
1310 slave->np = np;
1311out:
1312 return err;
1313}
1314static inline void slave_disable_netpoll(struct slave *slave)
1315{
1316 struct netpoll *np = slave->np;
1317
1318 if (!np)
1319 return;
1320
1321 slave->np = NULL;
1322
1323 __netpoll_free(np);
1324}
1325
1326static void bond_poll_controller(struct net_device *bond_dev)
1327{
1328 struct bonding *bond = netdev_priv(bond_dev);
1329 struct slave *slave = NULL;
1330 struct list_head *iter;
1331 struct ad_info ad_info;
1332
1333 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1334 if (bond_3ad_get_active_agg_info(bond, &ad_info))
1335 return;
1336
1337 bond_for_each_slave_rcu(bond, slave, iter) {
1338 if (!bond_slave_is_up(slave))
1339 continue;
1340
1341 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1342 struct aggregator *agg =
1343 SLAVE_AD_INFO(slave)->port.aggregator;
1344
1345 if (agg &&
1346 agg->aggregator_identifier != ad_info.aggregator_id)
1347 continue;
1348 }
1349
1350 netpoll_poll_dev(slave->dev);
1351 }
1352}
1353
1354static void bond_netpoll_cleanup(struct net_device *bond_dev)
1355{
1356 struct bonding *bond = netdev_priv(bond_dev);
1357 struct list_head *iter;
1358 struct slave *slave;
1359
1360 bond_for_each_slave(bond, slave, iter)
1361 if (bond_slave_is_up(slave))
1362 slave_disable_netpoll(slave);
1363}
1364
1365static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
1366{
1367 struct bonding *bond = netdev_priv(dev);
1368 struct list_head *iter;
1369 struct slave *slave;
1370 int err = 0;
1371
1372 bond_for_each_slave(bond, slave, iter) {
1373 err = slave_enable_netpoll(slave);
1374 if (err) {
1375 bond_netpoll_cleanup(dev);
1376 break;
1377 }
1378 }
1379 return err;
1380}
1381#else
1382static inline int slave_enable_netpoll(struct slave *slave)
1383{
1384 return 0;
1385}
1386static inline void slave_disable_netpoll(struct slave *slave)
1387{
1388}
1389static void bond_netpoll_cleanup(struct net_device *bond_dev)
1390{
1391}
1392#endif
1393
1394/*---------------------------------- IOCTL ----------------------------------*/
1395
1396static netdev_features_t bond_fix_features(struct net_device *dev,
1397 netdev_features_t features)
1398{
1399 struct bonding *bond = netdev_priv(dev);
1400 struct list_head *iter;
1401 netdev_features_t mask;
1402 struct slave *slave;
1403
1404 mask = features;
1405
1406 features &= ~NETIF_F_ONE_FOR_ALL;
1407 features |= NETIF_F_ALL_FOR_ALL;
1408
1409 bond_for_each_slave(bond, slave, iter) {
1410 features = netdev_increment_features(features,
1411 slave->dev->features,
1412 mask);
1413 }
1414 features = netdev_add_tso_features(features, mask);
1415
1416 return features;
1417}
1418
1419#define BOND_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
1420 NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
1421 NETIF_F_HIGHDMA | NETIF_F_LRO)
1422
1423#define BOND_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
1424 NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
1425
1426#define BOND_MPLS_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
1427 NETIF_F_GSO_SOFTWARE)
1428
1429
1430static void bond_compute_features(struct bonding *bond)
1431{
1432 unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
1433 IFF_XMIT_DST_RELEASE_PERM;
1434 netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1435 netdev_features_t enc_features = BOND_ENC_FEATURES;
1436#ifdef CONFIG_XFRM_OFFLOAD
1437 netdev_features_t xfrm_features = BOND_XFRM_FEATURES;
1438#endif /* CONFIG_XFRM_OFFLOAD */
1439 netdev_features_t mpls_features = BOND_MPLS_FEATURES;
1440 struct net_device *bond_dev = bond->dev;
1441 struct list_head *iter;
1442 struct slave *slave;
1443 unsigned short max_hard_header_len = ETH_HLEN;
1444 unsigned int tso_max_size = TSO_MAX_SIZE;
1445 u16 tso_max_segs = TSO_MAX_SEGS;
1446
1447 if (!bond_has_slaves(bond))
1448 goto done;
1449 vlan_features &= NETIF_F_ALL_FOR_ALL;
1450 mpls_features &= NETIF_F_ALL_FOR_ALL;
1451
1452 bond_for_each_slave(bond, slave, iter) {
1453 vlan_features = netdev_increment_features(vlan_features,
1454 slave->dev->vlan_features, BOND_VLAN_FEATURES);
1455
1456 enc_features = netdev_increment_features(enc_features,
1457 slave->dev->hw_enc_features,
1458 BOND_ENC_FEATURES);
1459
1460#ifdef CONFIG_XFRM_OFFLOAD
1461 xfrm_features = netdev_increment_features(xfrm_features,
1462 slave->dev->hw_enc_features,
1463 BOND_XFRM_FEATURES);
1464#endif /* CONFIG_XFRM_OFFLOAD */
1465
1466 mpls_features = netdev_increment_features(mpls_features,
1467 slave->dev->mpls_features,
1468 BOND_MPLS_FEATURES);
1469
1470 dst_release_flag &= slave->dev->priv_flags;
1471 if (slave->dev->hard_header_len > max_hard_header_len)
1472 max_hard_header_len = slave->dev->hard_header_len;
1473
1474 tso_max_size = min(tso_max_size, slave->dev->tso_max_size);
1475 tso_max_segs = min(tso_max_segs, slave->dev->tso_max_segs);
1476 }
1477 bond_dev->hard_header_len = max_hard_header_len;
1478
1479done:
1480 bond_dev->vlan_features = vlan_features;
1481 bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
1482 NETIF_F_HW_VLAN_CTAG_TX |
1483 NETIF_F_HW_VLAN_STAG_TX;
1484#ifdef CONFIG_XFRM_OFFLOAD
1485 bond_dev->hw_enc_features |= xfrm_features;
1486#endif /* CONFIG_XFRM_OFFLOAD */
1487 bond_dev->mpls_features = mpls_features;
1488 netif_set_tso_max_segs(bond_dev, tso_max_segs);
1489 netif_set_tso_max_size(bond_dev, tso_max_size);
1490
1491 bond_dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1492 if ((bond_dev->priv_flags & IFF_XMIT_DST_RELEASE_PERM) &&
1493 dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1494 bond_dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1495
1496 netdev_change_features(bond_dev);
1497}
1498
1499static void bond_setup_by_slave(struct net_device *bond_dev,
1500 struct net_device *slave_dev)
1501{
1502 bond_dev->header_ops = slave_dev->header_ops;
1503
1504 bond_dev->type = slave_dev->type;
1505 bond_dev->hard_header_len = slave_dev->hard_header_len;
1506 bond_dev->needed_headroom = slave_dev->needed_headroom;
1507 bond_dev->addr_len = slave_dev->addr_len;
1508
1509 memcpy(bond_dev->broadcast, slave_dev->broadcast,
1510 slave_dev->addr_len);
1511
1512 if (slave_dev->flags & IFF_POINTOPOINT) {
1513 bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
1514 bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
1515 }
1516}
1517
1518/* On bonding slaves other than the currently active slave, suppress
1519 * duplicates except for alb non-mcast/bcast.
1520 */
1521static bool bond_should_deliver_exact_match(struct sk_buff *skb,
1522 struct slave *slave,
1523 struct bonding *bond)
1524{
1525 if (bond_is_slave_inactive(slave)) {
1526 if (BOND_MODE(bond) == BOND_MODE_ALB &&
1527 skb->pkt_type != PACKET_BROADCAST &&
1528 skb->pkt_type != PACKET_MULTICAST)
1529 return false;
1530 return true;
1531 }
1532 return false;
1533}
1534
1535static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
1536{
1537 struct sk_buff *skb = *pskb;
1538 struct slave *slave;
1539 struct bonding *bond;
1540 int (*recv_probe)(const struct sk_buff *, struct bonding *,
1541 struct slave *);
1542 int ret = RX_HANDLER_ANOTHER;
1543
1544 skb = skb_share_check(skb, GFP_ATOMIC);
1545 if (unlikely(!skb))
1546 return RX_HANDLER_CONSUMED;
1547
1548 *pskb = skb;
1549
1550 slave = bond_slave_get_rcu(skb->dev);
1551 bond = slave->bond;
1552
1553 recv_probe = READ_ONCE(bond->recv_probe);
1554 if (recv_probe) {
1555 ret = recv_probe(skb, bond, slave);
1556 if (ret == RX_HANDLER_CONSUMED) {
1557 consume_skb(skb);
1558 return ret;
1559 }
1560 }
1561
1562 /*
1563 * For packets determined by bond_should_deliver_exact_match() call to
1564 * be suppressed we want to make an exception for link-local packets.
1565 * This is necessary for e.g. LLDP daemons to be able to monitor
1566 * inactive slave links without being forced to bind to them
1567 * explicitly.
1568 *
1569 * At the same time, packets that are passed to the bonding master
1570 * (including link-local ones) can have their originating interface
1571 * determined via PACKET_ORIGDEV socket option.
1572 */
1573 if (bond_should_deliver_exact_match(skb, slave, bond)) {
1574 if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
1575 return RX_HANDLER_PASS;
1576 return RX_HANDLER_EXACT;
1577 }
1578
1579 skb->dev = bond->dev;
1580
1581 if (BOND_MODE(bond) == BOND_MODE_ALB &&
1582 netif_is_bridge_port(bond->dev) &&
1583 skb->pkt_type == PACKET_HOST) {
1584
1585 if (unlikely(skb_cow_head(skb,
1586 skb->data - skb_mac_header(skb)))) {
1587 kfree_skb(skb);
1588 return RX_HANDLER_CONSUMED;
1589 }
1590 bond_hw_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr,
1591 bond->dev->addr_len);
1592 }
1593
1594 return ret;
1595}
1596
1597static enum netdev_lag_tx_type bond_lag_tx_type(struct bonding *bond)
1598{
1599 switch (BOND_MODE(bond)) {
1600 case BOND_MODE_ROUNDROBIN:
1601 return NETDEV_LAG_TX_TYPE_ROUNDROBIN;
1602 case BOND_MODE_ACTIVEBACKUP:
1603 return NETDEV_LAG_TX_TYPE_ACTIVEBACKUP;
1604 case BOND_MODE_BROADCAST:
1605 return NETDEV_LAG_TX_TYPE_BROADCAST;
1606 case BOND_MODE_XOR:
1607 case BOND_MODE_8023AD:
1608 return NETDEV_LAG_TX_TYPE_HASH;
1609 default:
1610 return NETDEV_LAG_TX_TYPE_UNKNOWN;
1611 }
1612}
1613
1614static enum netdev_lag_hash bond_lag_hash_type(struct bonding *bond,
1615 enum netdev_lag_tx_type type)
1616{
1617 if (type != NETDEV_LAG_TX_TYPE_HASH)
1618 return NETDEV_LAG_HASH_NONE;
1619
1620 switch (bond->params.xmit_policy) {
1621 case BOND_XMIT_POLICY_LAYER2:
1622 return NETDEV_LAG_HASH_L2;
1623 case BOND_XMIT_POLICY_LAYER34:
1624 return NETDEV_LAG_HASH_L34;
1625 case BOND_XMIT_POLICY_LAYER23:
1626 return NETDEV_LAG_HASH_L23;
1627 case BOND_XMIT_POLICY_ENCAP23:
1628 return NETDEV_LAG_HASH_E23;
1629 case BOND_XMIT_POLICY_ENCAP34:
1630 return NETDEV_LAG_HASH_E34;
1631 case BOND_XMIT_POLICY_VLAN_SRCMAC:
1632 return NETDEV_LAG_HASH_VLAN_SRCMAC;
1633 default:
1634 return NETDEV_LAG_HASH_UNKNOWN;
1635 }
1636}
1637
1638static int bond_master_upper_dev_link(struct bonding *bond, struct slave *slave,
1639 struct netlink_ext_ack *extack)
1640{
1641 struct netdev_lag_upper_info lag_upper_info;
1642 enum netdev_lag_tx_type type;
1643 int err;
1644
1645 type = bond_lag_tx_type(bond);
1646 lag_upper_info.tx_type = type;
1647 lag_upper_info.hash_type = bond_lag_hash_type(bond, type);
1648
1649 err = netdev_master_upper_dev_link(slave->dev, bond->dev, slave,
1650 &lag_upper_info, extack);
1651 if (err)
1652 return err;
1653
1654 slave->dev->flags |= IFF_SLAVE;
1655 return 0;
1656}
1657
1658static void bond_upper_dev_unlink(struct bonding *bond, struct slave *slave)
1659{
1660 netdev_upper_dev_unlink(slave->dev, bond->dev);
1661 slave->dev->flags &= ~IFF_SLAVE;
1662}
1663
1664static void slave_kobj_release(struct kobject *kobj)
1665{
1666 struct slave *slave = to_slave(kobj);
1667 struct bonding *bond = bond_get_bond_by_slave(slave);
1668
1669 cancel_delayed_work_sync(&slave->notify_work);
1670 if (BOND_MODE(bond) == BOND_MODE_8023AD)
1671 kfree(SLAVE_AD_INFO(slave));
1672
1673 kfree(slave);
1674}
1675
1676static struct kobj_type slave_ktype = {
1677 .release = slave_kobj_release,
1678#ifdef CONFIG_SYSFS
1679 .sysfs_ops = &slave_sysfs_ops,
1680#endif
1681};
1682
1683static int bond_kobj_init(struct slave *slave)
1684{
1685 int err;
1686
1687 err = kobject_init_and_add(&slave->kobj, &slave_ktype,
1688 &(slave->dev->dev.kobj), "bonding_slave");
1689 if (err)
1690 kobject_put(&slave->kobj);
1691
1692 return err;
1693}
1694
1695static struct slave *bond_alloc_slave(struct bonding *bond,
1696 struct net_device *slave_dev)
1697{
1698 struct slave *slave = NULL;
1699
1700 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
1701 if (!slave)
1702 return NULL;
1703
1704 slave->bond = bond;
1705 slave->dev = slave_dev;
1706 INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
1707
1708 if (bond_kobj_init(slave))
1709 return NULL;
1710
1711 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
1712 SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
1713 GFP_KERNEL);
1714 if (!SLAVE_AD_INFO(slave)) {
1715 kobject_put(&slave->kobj);
1716 return NULL;
1717 }
1718 }
1719
1720 return slave;
1721}
1722
1723static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
1724{
1725 info->bond_mode = BOND_MODE(bond);
1726 info->miimon = bond->params.miimon;
1727 info->num_slaves = bond->slave_cnt;
1728}
1729
1730static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
1731{
1732 strcpy(info->slave_name, slave->dev->name);
1733 info->link = slave->link;
1734 info->state = bond_slave_state(slave);
1735 info->link_failure_count = slave->link_failure_count;
1736}
1737
1738static void bond_netdev_notify_work(struct work_struct *_work)
1739{
1740 struct slave *slave = container_of(_work, struct slave,
1741 notify_work.work);
1742
1743 if (rtnl_trylock()) {
1744 struct netdev_bonding_info binfo;
1745
1746 bond_fill_ifslave(slave, &binfo.slave);
1747 bond_fill_ifbond(slave->bond, &binfo.master);
1748 netdev_bonding_info_change(slave->dev, &binfo);
1749 rtnl_unlock();
1750 } else {
1751 queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
1752 }
1753}
1754
1755void bond_queue_slave_event(struct slave *slave)
1756{
1757 queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
1758}
1759
1760void bond_lower_state_changed(struct slave *slave)
1761{
1762 struct netdev_lag_lower_state_info info;
1763
1764 info.link_up = slave->link == BOND_LINK_UP ||
1765 slave->link == BOND_LINK_FAIL;
1766 info.tx_enabled = bond_is_active_slave(slave);
1767 netdev_lower_state_changed(slave->dev, &info);
1768}
1769
1770#define BOND_NL_ERR(bond_dev, extack, errmsg) do { \
1771 if (extack) \
1772 NL_SET_ERR_MSG(extack, errmsg); \
1773 else \
1774 netdev_err(bond_dev, "Error: %s\n", errmsg); \
1775} while (0)
1776
1777#define SLAVE_NL_ERR(bond_dev, slave_dev, extack, errmsg) do { \
1778 if (extack) \
1779 NL_SET_ERR_MSG(extack, errmsg); \
1780 else \
1781 slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \
1782} while (0)
1783
1784/* The bonding driver uses ether_setup() to convert a master bond device
1785 * to ARPHRD_ETHER, that resets the target netdevice's flags so we always
1786 * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE and IFF_UP
1787 * if they were set
1788 */
1789static void bond_ether_setup(struct net_device *bond_dev)
1790{
1791 unsigned int flags = bond_dev->flags & (IFF_SLAVE | IFF_UP);
1792
1793 ether_setup(bond_dev);
1794 bond_dev->flags |= IFF_MASTER | flags;
1795 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1796}
1797
1798void bond_xdp_set_features(struct net_device *bond_dev)
1799{
1800 struct bonding *bond = netdev_priv(bond_dev);
1801 xdp_features_t val = NETDEV_XDP_ACT_MASK;
1802 struct list_head *iter;
1803 struct slave *slave;
1804
1805 ASSERT_RTNL();
1806
1807 if (!bond_xdp_check(bond)) {
1808 xdp_clear_features_flag(bond_dev);
1809 return;
1810 }
1811
1812 bond_for_each_slave(bond, slave, iter)
1813 val &= slave->dev->xdp_features;
1814
1815 xdp_set_features_flag(bond_dev, val);
1816}
1817
1818/* enslave device <slave> to bond device <master> */
1819int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
1820 struct netlink_ext_ack *extack)
1821{
1822 struct bonding *bond = netdev_priv(bond_dev);
1823 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1824 struct slave *new_slave = NULL, *prev_slave;
1825 struct sockaddr_storage ss;
1826 int link_reporting;
1827 int res = 0, i;
1828
1829 if (slave_dev->flags & IFF_MASTER &&
1830 !netif_is_bond_master(slave_dev)) {
1831 BOND_NL_ERR(bond_dev, extack,
1832 "Device type (master device) cannot be enslaved");
1833 return -EPERM;
1834 }
1835
1836 if (!bond->params.use_carrier &&
1837 slave_dev->ethtool_ops->get_link == NULL &&
1838 slave_ops->ndo_eth_ioctl == NULL) {
1839 slave_warn(bond_dev, slave_dev, "no link monitoring support\n");
1840 }
1841
1842 /* already in-use? */
1843 if (netdev_is_rx_handler_busy(slave_dev)) {
1844 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1845 "Device is in use and cannot be enslaved");
1846 return -EBUSY;
1847 }
1848
1849 if (bond_dev == slave_dev) {
1850 BOND_NL_ERR(bond_dev, extack, "Cannot enslave bond to itself.");
1851 return -EPERM;
1852 }
1853
1854 /* vlan challenged mutual exclusion */
1855 /* no need to lock since we're protected by rtnl_lock */
1856 if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
1857 slave_dbg(bond_dev, slave_dev, "is NETIF_F_VLAN_CHALLENGED\n");
1858 if (vlan_uses_dev(bond_dev)) {
1859 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1860 "Can not enslave VLAN challenged device to VLAN enabled bond");
1861 return -EPERM;
1862 } else {
1863 slave_warn(bond_dev, slave_dev, "enslaved VLAN challenged slave. Adding VLANs will be blocked as long as it is part of bond.\n");
1864 }
1865 } else {
1866 slave_dbg(bond_dev, slave_dev, "is !NETIF_F_VLAN_CHALLENGED\n");
1867 }
1868
1869 if (slave_dev->features & NETIF_F_HW_ESP)
1870 slave_dbg(bond_dev, slave_dev, "is esp-hw-offload capable\n");
1871
1872 /* Old ifenslave binaries are no longer supported. These can
1873 * be identified with moderate accuracy by the state of the slave:
1874 * the current ifenslave will set the interface down prior to
1875 * enslaving it; the old ifenslave will not.
1876 */
1877 if (slave_dev->flags & IFF_UP) {
1878 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1879 "Device can not be enslaved while up");
1880 return -EPERM;
1881 }
1882
1883 /* set bonding device ether type by slave - bonding netdevices are
1884 * created with ether_setup, so when the slave type is not ARPHRD_ETHER
1885 * there is a need to override some of the type dependent attribs/funcs.
1886 *
1887 * bond ether type mutual exclusion - don't allow slaves of dissimilar
1888 * ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
1889 */
1890 if (!bond_has_slaves(bond)) {
1891 if (bond_dev->type != slave_dev->type) {
1892 slave_dbg(bond_dev, slave_dev, "change device type from %d to %d\n",
1893 bond_dev->type, slave_dev->type);
1894
1895 res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
1896 bond_dev);
1897 res = notifier_to_errno(res);
1898 if (res) {
1899 slave_err(bond_dev, slave_dev, "refused to change device type\n");
1900 return -EBUSY;
1901 }
1902
1903 /* Flush unicast and multicast addresses */
1904 dev_uc_flush(bond_dev);
1905 dev_mc_flush(bond_dev);
1906
1907 if (slave_dev->type != ARPHRD_ETHER)
1908 bond_setup_by_slave(bond_dev, slave_dev);
1909 else
1910 bond_ether_setup(bond_dev);
1911
1912 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
1913 bond_dev);
1914 }
1915 } else if (bond_dev->type != slave_dev->type) {
1916 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1917 "Device type is different from other slaves");
1918 return -EINVAL;
1919 }
1920
1921 if (slave_dev->type == ARPHRD_INFINIBAND &&
1922 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1923 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1924 "Only active-backup mode is supported for infiniband slaves");
1925 res = -EOPNOTSUPP;
1926 goto err_undo_flags;
1927 }
1928
1929 if (!slave_ops->ndo_set_mac_address ||
1930 slave_dev->type == ARPHRD_INFINIBAND) {
1931 slave_warn(bond_dev, slave_dev, "The slave device specified does not support setting the MAC address\n");
1932 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
1933 bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
1934 if (!bond_has_slaves(bond)) {
1935 bond->params.fail_over_mac = BOND_FOM_ACTIVE;
1936 slave_warn(bond_dev, slave_dev, "Setting fail_over_mac to active for active-backup mode\n");
1937 } else {
1938 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
1939 "Slave device does not support setting the MAC address, but fail_over_mac is not set to active");
1940 res = -EOPNOTSUPP;
1941 goto err_undo_flags;
1942 }
1943 }
1944 }
1945
1946 call_netdevice_notifiers(NETDEV_JOIN, slave_dev);
1947
1948 /* If this is the first slave, then we need to set the master's hardware
1949 * address to be the same as the slave's.
1950 */
1951 if (!bond_has_slaves(bond) &&
1952 bond->dev->addr_assign_type == NET_ADDR_RANDOM) {
1953 res = bond_set_dev_addr(bond->dev, slave_dev);
1954 if (res)
1955 goto err_undo_flags;
1956 }
1957
1958 new_slave = bond_alloc_slave(bond, slave_dev);
1959 if (!new_slave) {
1960 res = -ENOMEM;
1961 goto err_undo_flags;
1962 }
1963
1964 /* Set the new_slave's queue_id to be zero. Queue ID mapping
1965 * is set via sysfs or module option if desired.
1966 */
1967 new_slave->queue_id = 0;
1968
1969 /* Save slave's original mtu and then set it to match the bond */
1970 new_slave->original_mtu = slave_dev->mtu;
1971 res = dev_set_mtu(slave_dev, bond->dev->mtu);
1972 if (res) {
1973 slave_err(bond_dev, slave_dev, "Error %d calling dev_set_mtu\n", res);
1974 goto err_free;
1975 }
1976
1977 /* Save slave's original ("permanent") mac address for modes
1978 * that need it, and for restoring it upon release, and then
1979 * set it to the master's address
1980 */
1981 bond_hw_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr,
1982 slave_dev->addr_len);
1983
1984 if (!bond->params.fail_over_mac ||
1985 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1986 /* Set slave to master's mac address. The application already
1987 * set the master's mac address to that of the first slave
1988 */
1989 memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
1990 ss.ss_family = slave_dev->type;
1991 res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss,
1992 extack);
1993 if (res) {
1994 slave_err(bond_dev, slave_dev, "Error %d calling set_mac_address\n", res);
1995 goto err_restore_mtu;
1996 }
1997 }
1998
1999 /* set no_addrconf flag before open to prevent IPv6 addrconf */
2000 slave_dev->priv_flags |= IFF_NO_ADDRCONF;
2001
2002 /* open the slave since the application closed it */
2003 res = dev_open(slave_dev, extack);
2004 if (res) {
2005 slave_err(bond_dev, slave_dev, "Opening slave failed\n");
2006 goto err_restore_mac;
2007 }
2008
2009 slave_dev->priv_flags |= IFF_BONDING;
2010 /* initialize slave stats */
2011 dev_get_stats(new_slave->dev, &new_slave->slave_stats);
2012
2013 if (bond_is_lb(bond)) {
2014 /* bond_alb_init_slave() must be called before all other stages since
2015 * it might fail and we do not want to have to undo everything
2016 */
2017 res = bond_alb_init_slave(bond, new_slave);
2018 if (res)
2019 goto err_close;
2020 }
2021
2022 res = vlan_vids_add_by_dev(slave_dev, bond_dev);
2023 if (res) {
2024 slave_err(bond_dev, slave_dev, "Couldn't add bond vlan ids\n");
2025 goto err_close;
2026 }
2027
2028 prev_slave = bond_last_slave(bond);
2029
2030 new_slave->delay = 0;
2031 new_slave->link_failure_count = 0;
2032
2033 if (bond_update_speed_duplex(new_slave) &&
2034 bond_needs_speed_duplex(bond))
2035 new_slave->link = BOND_LINK_DOWN;
2036
2037 new_slave->last_rx = jiffies -
2038 (msecs_to_jiffies(bond->params.arp_interval) + 1);
2039 for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
2040 new_slave->target_last_arp_rx[i] = new_slave->last_rx;
2041
2042 new_slave->last_tx = new_slave->last_rx;
2043
2044 if (bond->params.miimon && !bond->params.use_carrier) {
2045 link_reporting = bond_check_dev_link(bond, slave_dev, 1);
2046
2047 if ((link_reporting == -1) && !bond->params.arp_interval) {
2048 /* miimon is set but a bonded network driver
2049 * does not support ETHTOOL/MII and
2050 * arp_interval is not set. Note: if
2051 * use_carrier is enabled, we will never go
2052 * here (because netif_carrier is always
2053 * supported); thus, we don't need to change
2054 * the messages for netif_carrier.
2055 */
2056 slave_warn(bond_dev, slave_dev, "MII and ETHTOOL support not available for slave, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n");
2057 } else if (link_reporting == -1) {
2058 /* unable get link status using mii/ethtool */
2059 slave_warn(bond_dev, slave_dev, "can't get link status from slave; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n");
2060 }
2061 }
2062
2063 /* check for initial state */
2064 new_slave->link = BOND_LINK_NOCHANGE;
2065 if (bond->params.miimon) {
2066 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
2067 if (bond->params.updelay) {
2068 bond_set_slave_link_state(new_slave,
2069 BOND_LINK_BACK,
2070 BOND_SLAVE_NOTIFY_NOW);
2071 new_slave->delay = bond->params.updelay;
2072 } else {
2073 bond_set_slave_link_state(new_slave,
2074 BOND_LINK_UP,
2075 BOND_SLAVE_NOTIFY_NOW);
2076 }
2077 } else {
2078 bond_set_slave_link_state(new_slave, BOND_LINK_DOWN,
2079 BOND_SLAVE_NOTIFY_NOW);
2080 }
2081 } else if (bond->params.arp_interval) {
2082 bond_set_slave_link_state(new_slave,
2083 (netif_carrier_ok(slave_dev) ?
2084 BOND_LINK_UP : BOND_LINK_DOWN),
2085 BOND_SLAVE_NOTIFY_NOW);
2086 } else {
2087 bond_set_slave_link_state(new_slave, BOND_LINK_UP,
2088 BOND_SLAVE_NOTIFY_NOW);
2089 }
2090
2091 if (new_slave->link != BOND_LINK_DOWN)
2092 new_slave->last_link_up = jiffies;
2093 slave_dbg(bond_dev, slave_dev, "Initial state of slave is BOND_LINK_%s\n",
2094 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
2095 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
2096
2097 if (bond_uses_primary(bond) && bond->params.primary[0]) {
2098 /* if there is a primary slave, remember it */
2099 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
2100 rcu_assign_pointer(bond->primary_slave, new_slave);
2101 bond->force_primary = true;
2102 }
2103 }
2104
2105 switch (BOND_MODE(bond)) {
2106 case BOND_MODE_ACTIVEBACKUP:
2107 bond_set_slave_inactive_flags(new_slave,
2108 BOND_SLAVE_NOTIFY_NOW);
2109 break;
2110 case BOND_MODE_8023AD:
2111 /* in 802.3ad mode, the internal mechanism
2112 * will activate the slaves in the selected
2113 * aggregator
2114 */
2115 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2116 /* if this is the first slave */
2117 if (!prev_slave) {
2118 SLAVE_AD_INFO(new_slave)->id = 1;
2119 /* Initialize AD with the number of times that the AD timer is called in 1 second
2120 * can be called only after the mac address of the bond is set
2121 */
2122 bond_3ad_initialize(bond);
2123 } else {
2124 SLAVE_AD_INFO(new_slave)->id =
2125 SLAVE_AD_INFO(prev_slave)->id + 1;
2126 }
2127
2128 bond_3ad_bind_slave(new_slave);
2129 break;
2130 case BOND_MODE_TLB:
2131 case BOND_MODE_ALB:
2132 bond_set_active_slave(new_slave);
2133 bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
2134 break;
2135 default:
2136 slave_dbg(bond_dev, slave_dev, "This slave is always active in trunk mode\n");
2137
2138 /* always active in trunk mode */
2139 bond_set_active_slave(new_slave);
2140
2141 /* In trunking mode there is little meaning to curr_active_slave
2142 * anyway (it holds no special properties of the bond device),
2143 * so we can change it without calling change_active_interface()
2144 */
2145 if (!rcu_access_pointer(bond->curr_active_slave) &&
2146 new_slave->link == BOND_LINK_UP)
2147 rcu_assign_pointer(bond->curr_active_slave, new_slave);
2148
2149 break;
2150 } /* switch(bond_mode) */
2151
2152#ifdef CONFIG_NET_POLL_CONTROLLER
2153 if (bond->dev->npinfo) {
2154 if (slave_enable_netpoll(new_slave)) {
2155 slave_info(bond_dev, slave_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
2156 res = -EBUSY;
2157 goto err_detach;
2158 }
2159 }
2160#endif
2161
2162 if (!(bond_dev->features & NETIF_F_LRO))
2163 dev_disable_lro(slave_dev);
2164
2165 res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
2166 new_slave);
2167 if (res) {
2168 slave_dbg(bond_dev, slave_dev, "Error %d calling netdev_rx_handler_register\n", res);
2169 goto err_detach;
2170 }
2171
2172 res = bond_master_upper_dev_link(bond, new_slave, extack);
2173 if (res) {
2174 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_master_upper_dev_link\n", res);
2175 goto err_unregister;
2176 }
2177
2178 bond_lower_state_changed(new_slave);
2179
2180 res = bond_sysfs_slave_add(new_slave);
2181 if (res) {
2182 slave_dbg(bond_dev, slave_dev, "Error %d calling bond_sysfs_slave_add\n", res);
2183 goto err_upper_unlink;
2184 }
2185
2186 /* If the mode uses primary, then the following is handled by
2187 * bond_change_active_slave().
2188 */
2189 if (!bond_uses_primary(bond)) {
2190 /* set promiscuity level to new slave */
2191 if (bond_dev->flags & IFF_PROMISC) {
2192 res = dev_set_promiscuity(slave_dev, 1);
2193 if (res)
2194 goto err_sysfs_del;
2195 }
2196
2197 /* set allmulti level to new slave */
2198 if (bond_dev->flags & IFF_ALLMULTI) {
2199 res = dev_set_allmulti(slave_dev, 1);
2200 if (res) {
2201 if (bond_dev->flags & IFF_PROMISC)
2202 dev_set_promiscuity(slave_dev, -1);
2203 goto err_sysfs_del;
2204 }
2205 }
2206
2207 if (bond_dev->flags & IFF_UP) {
2208 netif_addr_lock_bh(bond_dev);
2209 dev_mc_sync_multiple(slave_dev, bond_dev);
2210 dev_uc_sync_multiple(slave_dev, bond_dev);
2211 netif_addr_unlock_bh(bond_dev);
2212
2213 if (BOND_MODE(bond) == BOND_MODE_8023AD)
2214 dev_mc_add(slave_dev, lacpdu_mcast_addr);
2215 }
2216 }
2217
2218 bond->slave_cnt++;
2219 bond_compute_features(bond);
2220 bond_set_carrier(bond);
2221
2222 if (bond_uses_primary(bond)) {
2223 block_netpoll_tx();
2224 bond_select_active_slave(bond);
2225 unblock_netpoll_tx();
2226 }
2227
2228 if (bond_mode_can_use_xmit_hash(bond))
2229 bond_update_slave_arr(bond, NULL);
2230
2231
2232 if (!slave_dev->netdev_ops->ndo_bpf ||
2233 !slave_dev->netdev_ops->ndo_xdp_xmit) {
2234 if (bond->xdp_prog) {
2235 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2236 "Slave does not support XDP");
2237 res = -EOPNOTSUPP;
2238 goto err_sysfs_del;
2239 }
2240 } else if (bond->xdp_prog) {
2241 struct netdev_bpf xdp = {
2242 .command = XDP_SETUP_PROG,
2243 .flags = 0,
2244 .prog = bond->xdp_prog,
2245 .extack = extack,
2246 };
2247
2248 if (dev_xdp_prog_count(slave_dev) > 0) {
2249 SLAVE_NL_ERR(bond_dev, slave_dev, extack,
2250 "Slave has XDP program loaded, please unload before enslaving");
2251 res = -EOPNOTSUPP;
2252 goto err_sysfs_del;
2253 }
2254
2255 res = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
2256 if (res < 0) {
2257 /* ndo_bpf() sets extack error message */
2258 slave_dbg(bond_dev, slave_dev, "Error %d calling ndo_bpf\n", res);
2259 goto err_sysfs_del;
2260 }
2261 if (bond->xdp_prog)
2262 bpf_prog_inc(bond->xdp_prog);
2263 }
2264
2265 bond_xdp_set_features(bond_dev);
2266
2267 slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
2268 bond_is_active_slave(new_slave) ? "an active" : "a backup",
2269 new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
2270
2271 /* enslave is successful */
2272 bond_queue_slave_event(new_slave);
2273 return 0;
2274
2275/* Undo stages on error */
2276err_sysfs_del:
2277 bond_sysfs_slave_del(new_slave);
2278
2279err_upper_unlink:
2280 bond_upper_dev_unlink(bond, new_slave);
2281
2282err_unregister:
2283 netdev_rx_handler_unregister(slave_dev);
2284
2285err_detach:
2286 vlan_vids_del_by_dev(slave_dev, bond_dev);
2287 if (rcu_access_pointer(bond->primary_slave) == new_slave)
2288 RCU_INIT_POINTER(bond->primary_slave, NULL);
2289 if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
2290 block_netpoll_tx();
2291 bond_change_active_slave(bond, NULL);
2292 bond_select_active_slave(bond);
2293 unblock_netpoll_tx();
2294 }
2295 /* either primary_slave or curr_active_slave might've changed */
2296 synchronize_rcu();
2297 slave_disable_netpoll(new_slave);
2298
2299err_close:
2300 if (!netif_is_bond_master(slave_dev))
2301 slave_dev->priv_flags &= ~IFF_BONDING;
2302 dev_close(slave_dev);
2303
2304err_restore_mac:
2305 slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
2306 if (!bond->params.fail_over_mac ||
2307 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2308 /* XXX TODO - fom follow mode needs to change master's
2309 * MAC if this slave's MAC is in use by the bond, or at
2310 * least print a warning.
2311 */
2312 bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr,
2313 new_slave->dev->addr_len);
2314 ss.ss_family = slave_dev->type;
2315 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2316 }
2317
2318err_restore_mtu:
2319 dev_set_mtu(slave_dev, new_slave->original_mtu);
2320
2321err_free:
2322 kobject_put(&new_slave->kobj);
2323
2324err_undo_flags:
2325 /* Enslave of first slave has failed and we need to fix master's mac */
2326 if (!bond_has_slaves(bond)) {
2327 if (ether_addr_equal_64bits(bond_dev->dev_addr,
2328 slave_dev->dev_addr))
2329 eth_hw_addr_random(bond_dev);
2330 if (bond_dev->type != ARPHRD_ETHER) {
2331 dev_close(bond_dev);
2332 bond_ether_setup(bond_dev);
2333 }
2334 }
2335
2336 return res;
2337}
2338
2339/* Try to release the slave device <slave> from the bond device <master>
2340 * It is legal to access curr_active_slave without a lock because all the function
2341 * is RTNL-locked. If "all" is true it means that the function is being called
2342 * while destroying a bond interface and all slaves are being released.
2343 *
2344 * The rules for slave state should be:
2345 * for Active/Backup:
2346 * Active stays on all backups go down
2347 * for Bonded connections:
2348 * The first up interface should be left on and all others downed.
2349 */
2350static int __bond_release_one(struct net_device *bond_dev,
2351 struct net_device *slave_dev,
2352 bool all, bool unregister)
2353{
2354 struct bonding *bond = netdev_priv(bond_dev);
2355 struct slave *slave, *oldcurrent;
2356 struct sockaddr_storage ss;
2357 int old_flags = bond_dev->flags;
2358 netdev_features_t old_features = bond_dev->features;
2359
2360 /* slave is not a slave or master is not master of this slave */
2361 if (!(slave_dev->flags & IFF_SLAVE) ||
2362 !netdev_has_upper_dev(slave_dev, bond_dev)) {
2363 slave_dbg(bond_dev, slave_dev, "cannot release slave\n");
2364 return -EINVAL;
2365 }
2366
2367 block_netpoll_tx();
2368
2369 slave = bond_get_slave_by_dev(bond, slave_dev);
2370 if (!slave) {
2371 /* not a slave of this bond */
2372 slave_info(bond_dev, slave_dev, "interface not enslaved\n");
2373 unblock_netpoll_tx();
2374 return -EINVAL;
2375 }
2376
2377 bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
2378
2379 bond_sysfs_slave_del(slave);
2380
2381 /* recompute stats just before removing the slave */
2382 bond_get_stats(bond->dev, &bond->bond_stats);
2383
2384 if (bond->xdp_prog) {
2385 struct netdev_bpf xdp = {
2386 .command = XDP_SETUP_PROG,
2387 .flags = 0,
2388 .prog = NULL,
2389 .extack = NULL,
2390 };
2391 if (slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp))
2392 slave_warn(bond_dev, slave_dev, "failed to unload XDP program\n");
2393 }
2394
2395 /* unregister rx_handler early so bond_handle_frame wouldn't be called
2396 * for this slave anymore.
2397 */
2398 netdev_rx_handler_unregister(slave_dev);
2399
2400 if (BOND_MODE(bond) == BOND_MODE_8023AD)
2401 bond_3ad_unbind_slave(slave);
2402
2403 bond_upper_dev_unlink(bond, slave);
2404
2405 if (bond_mode_can_use_xmit_hash(bond))
2406 bond_update_slave_arr(bond, slave);
2407
2408 slave_info(bond_dev, slave_dev, "Releasing %s interface\n",
2409 bond_is_active_slave(slave) ? "active" : "backup");
2410
2411 oldcurrent = rcu_access_pointer(bond->curr_active_slave);
2412
2413 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
2414
2415 if (!all && (!bond->params.fail_over_mac ||
2416 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
2417 if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
2418 bond_has_slaves(bond))
2419 slave_warn(bond_dev, slave_dev, "the permanent HWaddr of slave - %pM - is still in use by bond - set the HWaddr of slave to a different address to avoid conflicts\n",
2420 slave->perm_hwaddr);
2421 }
2422
2423 if (rtnl_dereference(bond->primary_slave) == slave)
2424 RCU_INIT_POINTER(bond->primary_slave, NULL);
2425
2426 if (oldcurrent == slave)
2427 bond_change_active_slave(bond, NULL);
2428
2429 if (bond_is_lb(bond)) {
2430 /* Must be called only after the slave has been
2431 * detached from the list and the curr_active_slave
2432 * has been cleared (if our_slave == old_current),
2433 * but before a new active slave is selected.
2434 */
2435 bond_alb_deinit_slave(bond, slave);
2436 }
2437
2438 if (all) {
2439 RCU_INIT_POINTER(bond->curr_active_slave, NULL);
2440 } else if (oldcurrent == slave) {
2441 /* Note that we hold RTNL over this sequence, so there
2442 * is no concern that another slave add/remove event
2443 * will interfere.
2444 */
2445 bond_select_active_slave(bond);
2446 }
2447
2448 bond_set_carrier(bond);
2449 if (!bond_has_slaves(bond))
2450 eth_hw_addr_random(bond_dev);
2451
2452 unblock_netpoll_tx();
2453 synchronize_rcu();
2454 bond->slave_cnt--;
2455
2456 if (!bond_has_slaves(bond)) {
2457 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
2458 call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
2459 }
2460
2461 bond_compute_features(bond);
2462 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2463 (old_features & NETIF_F_VLAN_CHALLENGED))
2464 slave_info(bond_dev, slave_dev, "last VLAN challenged slave left bond - VLAN blocking is removed\n");
2465
2466 vlan_vids_del_by_dev(slave_dev, bond_dev);
2467
2468 /* If the mode uses primary, then this case was handled above by
2469 * bond_change_active_slave(..., NULL)
2470 */
2471 if (!bond_uses_primary(bond)) {
2472 /* unset promiscuity level from slave
2473 * NOTE: The NETDEV_CHANGEADDR call above may change the value
2474 * of the IFF_PROMISC flag in the bond_dev, but we need the
2475 * value of that flag before that change, as that was the value
2476 * when this slave was attached, so we cache at the start of the
2477 * function and use it here. Same goes for ALLMULTI below
2478 */
2479 if (old_flags & IFF_PROMISC)
2480 dev_set_promiscuity(slave_dev, -1);
2481
2482 /* unset allmulti level from slave */
2483 if (old_flags & IFF_ALLMULTI)
2484 dev_set_allmulti(slave_dev, -1);
2485
2486 if (old_flags & IFF_UP)
2487 bond_hw_addr_flush(bond_dev, slave_dev);
2488 }
2489
2490 slave_disable_netpoll(slave);
2491
2492 /* close slave before restoring its mac address */
2493 dev_close(slave_dev);
2494
2495 slave_dev->priv_flags &= ~IFF_NO_ADDRCONF;
2496
2497 if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
2498 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2499 /* restore original ("permanent") mac address */
2500 bond_hw_addr_copy(ss.__data, slave->perm_hwaddr,
2501 slave->dev->addr_len);
2502 ss.ss_family = slave_dev->type;
2503 dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL);
2504 }
2505
2506 if (unregister)
2507 __dev_set_mtu(slave_dev, slave->original_mtu);
2508 else
2509 dev_set_mtu(slave_dev, slave->original_mtu);
2510
2511 if (!netif_is_bond_master(slave_dev))
2512 slave_dev->priv_flags &= ~IFF_BONDING;
2513
2514 bond_xdp_set_features(bond_dev);
2515 kobject_put(&slave->kobj);
2516
2517 return 0;
2518}
2519
2520/* A wrapper used because of ndo_del_link */
2521int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2522{
2523 return __bond_release_one(bond_dev, slave_dev, false, false);
2524}
2525
2526/* First release a slave and then destroy the bond if no more slaves are left.
2527 * Must be under rtnl_lock when this function is called.
2528 */
2529static int bond_release_and_destroy(struct net_device *bond_dev,
2530 struct net_device *slave_dev)
2531{
2532 struct bonding *bond = netdev_priv(bond_dev);
2533 int ret;
2534
2535 ret = __bond_release_one(bond_dev, slave_dev, false, true);
2536 if (ret == 0 && !bond_has_slaves(bond) &&
2537 bond_dev->reg_state != NETREG_UNREGISTERING) {
2538 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
2539 netdev_info(bond_dev, "Destroying bond\n");
2540 bond_remove_proc_entry(bond);
2541 unregister_netdevice(bond_dev);
2542 }
2543 return ret;
2544}
2545
2546static void bond_info_query(struct net_device *bond_dev, struct ifbond *info)
2547{
2548 struct bonding *bond = netdev_priv(bond_dev);
2549
2550 bond_fill_ifbond(bond, info);
2551}
2552
2553static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
2554{
2555 struct bonding *bond = netdev_priv(bond_dev);
2556 struct list_head *iter;
2557 int i = 0, res = -ENODEV;
2558 struct slave *slave;
2559
2560 bond_for_each_slave(bond, slave, iter) {
2561 if (i++ == (int)info->slave_id) {
2562 res = 0;
2563 bond_fill_ifslave(slave, info);
2564 break;
2565 }
2566 }
2567
2568 return res;
2569}
2570
2571/*-------------------------------- Monitoring -------------------------------*/
2572
2573/* called with rcu_read_lock() */
2574static int bond_miimon_inspect(struct bonding *bond)
2575{
2576 bool ignore_updelay = false;
2577 int link_state, commit = 0;
2578 struct list_head *iter;
2579 struct slave *slave;
2580
2581 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
2582 ignore_updelay = !rcu_dereference(bond->curr_active_slave);
2583 } else {
2584 struct bond_up_slave *usable_slaves;
2585
2586 usable_slaves = rcu_dereference(bond->usable_slaves);
2587
2588 if (usable_slaves && usable_slaves->count == 0)
2589 ignore_updelay = true;
2590 }
2591
2592 bond_for_each_slave_rcu(bond, slave, iter) {
2593 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2594
2595 link_state = bond_check_dev_link(bond, slave->dev, 0);
2596
2597 switch (slave->link) {
2598 case BOND_LINK_UP:
2599 if (link_state)
2600 continue;
2601
2602 bond_propose_link_state(slave, BOND_LINK_FAIL);
2603 commit++;
2604 slave->delay = bond->params.downdelay;
2605 if (slave->delay) {
2606 slave_info(bond->dev, slave->dev, "link status down for %sinterface, disabling it in %d ms\n",
2607 (BOND_MODE(bond) ==
2608 BOND_MODE_ACTIVEBACKUP) ?
2609 (bond_is_active_slave(slave) ?
2610 "active " : "backup ") : "",
2611 bond->params.downdelay * bond->params.miimon);
2612 }
2613 fallthrough;
2614 case BOND_LINK_FAIL:
2615 if (link_state) {
2616 /* recovered before downdelay expired */
2617 bond_propose_link_state(slave, BOND_LINK_UP);
2618 slave->last_link_up = jiffies;
2619 slave_info(bond->dev, slave->dev, "link status up again after %d ms\n",
2620 (bond->params.downdelay - slave->delay) *
2621 bond->params.miimon);
2622 commit++;
2623 continue;
2624 }
2625
2626 if (slave->delay <= 0) {
2627 bond_propose_link_state(slave, BOND_LINK_DOWN);
2628 commit++;
2629 continue;
2630 }
2631
2632 slave->delay--;
2633 break;
2634
2635 case BOND_LINK_DOWN:
2636 if (!link_state)
2637 continue;
2638
2639 bond_propose_link_state(slave, BOND_LINK_BACK);
2640 commit++;
2641 slave->delay = bond->params.updelay;
2642
2643 if (slave->delay) {
2644 slave_info(bond->dev, slave->dev, "link status up, enabling it in %d ms\n",
2645 ignore_updelay ? 0 :
2646 bond->params.updelay *
2647 bond->params.miimon);
2648 }
2649 fallthrough;
2650 case BOND_LINK_BACK:
2651 if (!link_state) {
2652 bond_propose_link_state(slave, BOND_LINK_DOWN);
2653 slave_info(bond->dev, slave->dev, "link status down again after %d ms\n",
2654 (bond->params.updelay - slave->delay) *
2655 bond->params.miimon);
2656 commit++;
2657 continue;
2658 }
2659
2660 if (ignore_updelay)
2661 slave->delay = 0;
2662
2663 if (slave->delay <= 0) {
2664 bond_propose_link_state(slave, BOND_LINK_UP);
2665 commit++;
2666 ignore_updelay = false;
2667 continue;
2668 }
2669
2670 slave->delay--;
2671 break;
2672 }
2673 }
2674
2675 return commit;
2676}
2677
2678static void bond_miimon_link_change(struct bonding *bond,
2679 struct slave *slave,
2680 char link)
2681{
2682 switch (BOND_MODE(bond)) {
2683 case BOND_MODE_8023AD:
2684 bond_3ad_handle_link_change(slave, link);
2685 break;
2686 case BOND_MODE_TLB:
2687 case BOND_MODE_ALB:
2688 bond_alb_handle_link_change(bond, slave, link);
2689 break;
2690 case BOND_MODE_XOR:
2691 bond_update_slave_arr(bond, NULL);
2692 break;
2693 }
2694}
2695
2696static void bond_miimon_commit(struct bonding *bond)
2697{
2698 struct slave *slave, *primary, *active;
2699 bool do_failover = false;
2700 struct list_head *iter;
2701
2702 ASSERT_RTNL();
2703
2704 bond_for_each_slave(bond, slave, iter) {
2705 switch (slave->link_new_state) {
2706 case BOND_LINK_NOCHANGE:
2707 /* For 802.3ad mode, check current slave speed and
2708 * duplex again in case its port was disabled after
2709 * invalid speed/duplex reporting but recovered before
2710 * link monitoring could make a decision on the actual
2711 * link status
2712 */
2713 if (BOND_MODE(bond) == BOND_MODE_8023AD &&
2714 slave->link == BOND_LINK_UP)
2715 bond_3ad_adapter_speed_duplex_changed(slave);
2716 continue;
2717
2718 case BOND_LINK_UP:
2719 if (bond_update_speed_duplex(slave) &&
2720 bond_needs_speed_duplex(bond)) {
2721 slave->link = BOND_LINK_DOWN;
2722 if (net_ratelimit())
2723 slave_warn(bond->dev, slave->dev,
2724 "failed to get link speed/duplex\n");
2725 continue;
2726 }
2727 bond_set_slave_link_state(slave, BOND_LINK_UP,
2728 BOND_SLAVE_NOTIFY_NOW);
2729 slave->last_link_up = jiffies;
2730
2731 primary = rtnl_dereference(bond->primary_slave);
2732 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
2733 /* prevent it from being the active one */
2734 bond_set_backup_slave(slave);
2735 } else if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
2736 /* make it immediately active */
2737 bond_set_active_slave(slave);
2738 }
2739
2740 slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
2741 slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
2742 slave->duplex ? "full" : "half");
2743
2744 bond_miimon_link_change(bond, slave, BOND_LINK_UP);
2745
2746 active = rtnl_dereference(bond->curr_active_slave);
2747 if (!active || slave == primary || slave->prio > active->prio)
2748 do_failover = true;
2749
2750 continue;
2751
2752 case BOND_LINK_DOWN:
2753 if (slave->link_failure_count < UINT_MAX)
2754 slave->link_failure_count++;
2755
2756 bond_set_slave_link_state(slave, BOND_LINK_DOWN,
2757 BOND_SLAVE_NOTIFY_NOW);
2758
2759 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP ||
2760 BOND_MODE(bond) == BOND_MODE_8023AD)
2761 bond_set_slave_inactive_flags(slave,
2762 BOND_SLAVE_NOTIFY_NOW);
2763
2764 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
2765
2766 bond_miimon_link_change(bond, slave, BOND_LINK_DOWN);
2767
2768 if (slave == rcu_access_pointer(bond->curr_active_slave))
2769 do_failover = true;
2770
2771 continue;
2772
2773 default:
2774 slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
2775 slave->link_new_state);
2776 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2777
2778 continue;
2779 }
2780 }
2781
2782 if (do_failover) {
2783 block_netpoll_tx();
2784 bond_select_active_slave(bond);
2785 unblock_netpoll_tx();
2786 }
2787
2788 bond_set_carrier(bond);
2789}
2790
2791/* bond_mii_monitor
2792 *
2793 * Really a wrapper that splits the mii monitor into two phases: an
2794 * inspection, then (if inspection indicates something needs to be done)
2795 * an acquisition of appropriate locks followed by a commit phase to
2796 * implement whatever link state changes are indicated.
2797 */
2798static void bond_mii_monitor(struct work_struct *work)
2799{
2800 struct bonding *bond = container_of(work, struct bonding,
2801 mii_work.work);
2802 bool should_notify_peers = false;
2803 bool commit;
2804 unsigned long delay;
2805 struct slave *slave;
2806 struct list_head *iter;
2807
2808 delay = msecs_to_jiffies(bond->params.miimon);
2809
2810 if (!bond_has_slaves(bond))
2811 goto re_arm;
2812
2813 rcu_read_lock();
2814 should_notify_peers = bond_should_notify_peers(bond);
2815 commit = !!bond_miimon_inspect(bond);
2816 if (bond->send_peer_notif) {
2817 rcu_read_unlock();
2818 if (rtnl_trylock()) {
2819 bond->send_peer_notif--;
2820 rtnl_unlock();
2821 }
2822 } else {
2823 rcu_read_unlock();
2824 }
2825
2826 if (commit) {
2827 /* Race avoidance with bond_close cancel of workqueue */
2828 if (!rtnl_trylock()) {
2829 delay = 1;
2830 should_notify_peers = false;
2831 goto re_arm;
2832 }
2833
2834 bond_for_each_slave(bond, slave, iter) {
2835 bond_commit_link_state(slave, BOND_SLAVE_NOTIFY_LATER);
2836 }
2837 bond_miimon_commit(bond);
2838
2839 rtnl_unlock(); /* might sleep, hold no other locks */
2840 }
2841
2842re_arm:
2843 if (bond->params.miimon)
2844 queue_delayed_work(bond->wq, &bond->mii_work, delay);
2845
2846 if (should_notify_peers) {
2847 if (!rtnl_trylock())
2848 return;
2849 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
2850 rtnl_unlock();
2851 }
2852}
2853
2854static int bond_upper_dev_walk(struct net_device *upper,
2855 struct netdev_nested_priv *priv)
2856{
2857 __be32 ip = *(__be32 *)priv->data;
2858
2859 return ip == bond_confirm_addr(upper, 0, ip);
2860}
2861
2862static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
2863{
2864 struct netdev_nested_priv priv = {
2865 .data = (void *)&ip,
2866 };
2867 bool ret = false;
2868
2869 if (ip == bond_confirm_addr(bond->dev, 0, ip))
2870 return true;
2871
2872 rcu_read_lock();
2873 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_upper_dev_walk, &priv))
2874 ret = true;
2875 rcu_read_unlock();
2876
2877 return ret;
2878}
2879
2880#define BOND_VLAN_PROTO_NONE cpu_to_be16(0xffff)
2881
2882static bool bond_handle_vlan(struct slave *slave, struct bond_vlan_tag *tags,
2883 struct sk_buff *skb)
2884{
2885 struct net_device *bond_dev = slave->bond->dev;
2886 struct net_device *slave_dev = slave->dev;
2887 struct bond_vlan_tag *outer_tag = tags;
2888
2889 if (!tags || tags->vlan_proto == BOND_VLAN_PROTO_NONE)
2890 return true;
2891
2892 tags++;
2893
2894 /* Go through all the tags backwards and add them to the packet */
2895 while (tags->vlan_proto != BOND_VLAN_PROTO_NONE) {
2896 if (!tags->vlan_id) {
2897 tags++;
2898 continue;
2899 }
2900
2901 slave_dbg(bond_dev, slave_dev, "inner tag: proto %X vid %X\n",
2902 ntohs(outer_tag->vlan_proto), tags->vlan_id);
2903 skb = vlan_insert_tag_set_proto(skb, tags->vlan_proto,
2904 tags->vlan_id);
2905 if (!skb) {
2906 net_err_ratelimited("failed to insert inner VLAN tag\n");
2907 return false;
2908 }
2909
2910 tags++;
2911 }
2912 /* Set the outer tag */
2913 if (outer_tag->vlan_id) {
2914 slave_dbg(bond_dev, slave_dev, "outer tag: proto %X vid %X\n",
2915 ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
2916 __vlan_hwaccel_put_tag(skb, outer_tag->vlan_proto,
2917 outer_tag->vlan_id);
2918 }
2919
2920 return true;
2921}
2922
2923/* We go to the (large) trouble of VLAN tagging ARP frames because
2924 * switches in VLAN mode (especially if ports are configured as
2925 * "native" to a VLAN) might not pass non-tagged frames.
2926 */
2927static void bond_arp_send(struct slave *slave, int arp_op, __be32 dest_ip,
2928 __be32 src_ip, struct bond_vlan_tag *tags)
2929{
2930 struct net_device *bond_dev = slave->bond->dev;
2931 struct net_device *slave_dev = slave->dev;
2932 struct sk_buff *skb;
2933
2934 slave_dbg(bond_dev, slave_dev, "arp %d on slave: dst %pI4 src %pI4\n",
2935 arp_op, &dest_ip, &src_ip);
2936
2937 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
2938 NULL, slave_dev->dev_addr, NULL);
2939
2940 if (!skb) {
2941 net_err_ratelimited("ARP packet allocation failed\n");
2942 return;
2943 }
2944
2945 if (bond_handle_vlan(slave, tags, skb)) {
2946 slave_update_last_tx(slave);
2947 arp_xmit(skb);
2948 }
2949
2950 return;
2951}
2952
2953/* Validate the device path between the @start_dev and the @end_dev.
2954 * The path is valid if the @end_dev is reachable through device
2955 * stacking.
2956 * When the path is validated, collect any vlan information in the
2957 * path.
2958 */
2959struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
2960 struct net_device *end_dev,
2961 int level)
2962{
2963 struct bond_vlan_tag *tags;
2964 struct net_device *upper;
2965 struct list_head *iter;
2966
2967 if (start_dev == end_dev) {
2968 tags = kcalloc(level + 1, sizeof(*tags), GFP_ATOMIC);
2969 if (!tags)
2970 return ERR_PTR(-ENOMEM);
2971 tags[level].vlan_proto = BOND_VLAN_PROTO_NONE;
2972 return tags;
2973 }
2974
2975 netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
2976 tags = bond_verify_device_path(upper, end_dev, level + 1);
2977 if (IS_ERR_OR_NULL(tags)) {
2978 if (IS_ERR(tags))
2979 return tags;
2980 continue;
2981 }
2982 if (is_vlan_dev(upper)) {
2983 tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
2984 tags[level].vlan_id = vlan_dev_vlan_id(upper);
2985 }
2986
2987 return tags;
2988 }
2989
2990 return NULL;
2991}
2992
2993static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2994{
2995 struct rtable *rt;
2996 struct bond_vlan_tag *tags;
2997 __be32 *targets = bond->params.arp_targets, addr;
2998 int i;
2999
3000 for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
3001 slave_dbg(bond->dev, slave->dev, "%s: target %pI4\n",
3002 __func__, &targets[i]);
3003 tags = NULL;
3004
3005 /* Find out through which dev should the packet go */
3006 rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
3007 RTO_ONLINK, 0);
3008 if (IS_ERR(rt)) {
3009 /* there's no route to target - try to send arp
3010 * probe to generate any traffic (arp_validate=0)
3011 */
3012 if (bond->params.arp_validate)
3013 pr_warn_once("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
3014 bond->dev->name,
3015 &targets[i]);
3016 bond_arp_send(slave, ARPOP_REQUEST, targets[i],
3017 0, tags);
3018 continue;
3019 }
3020
3021 /* bond device itself */
3022 if (rt->dst.dev == bond->dev)
3023 goto found;
3024
3025 rcu_read_lock();
3026 tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
3027 rcu_read_unlock();
3028
3029 if (!IS_ERR_OR_NULL(tags))
3030 goto found;
3031
3032 /* Not our device - skip */
3033 slave_dbg(bond->dev, slave->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
3034 &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
3035
3036 ip_rt_put(rt);
3037 continue;
3038
3039found:
3040 addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
3041 ip_rt_put(rt);
3042 bond_arp_send(slave, ARPOP_REQUEST, targets[i], addr, tags);
3043 kfree(tags);
3044 }
3045}
3046
3047static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 sip, __be32 tip)
3048{
3049 int i;
3050
3051 if (!sip || !bond_has_this_ip(bond, tip)) {
3052 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 tip %pI4 not found\n",
3053 __func__, &sip, &tip);
3054 return;
3055 }
3056
3057 i = bond_get_targets_ip(bond->params.arp_targets, sip);
3058 if (i == -1) {
3059 slave_dbg(bond->dev, slave->dev, "%s: sip %pI4 not found in targets\n",
3060 __func__, &sip);
3061 return;
3062 }
3063 slave->last_rx = jiffies;
3064 slave->target_last_arp_rx[i] = jiffies;
3065}
3066
3067static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
3068 struct slave *slave)
3069{
3070 struct arphdr *arp = (struct arphdr *)skb->data;
3071 struct slave *curr_active_slave, *curr_arp_slave;
3072 unsigned char *arp_ptr;
3073 __be32 sip, tip;
3074 unsigned int alen;
3075
3076 alen = arp_hdr_len(bond->dev);
3077
3078 if (alen > skb_headlen(skb)) {
3079 arp = kmalloc(alen, GFP_ATOMIC);
3080 if (!arp)
3081 goto out_unlock;
3082 if (skb_copy_bits(skb, 0, arp, alen) < 0)
3083 goto out_unlock;
3084 }
3085
3086 if (arp->ar_hln != bond->dev->addr_len ||
3087 skb->pkt_type == PACKET_OTHERHOST ||
3088 skb->pkt_type == PACKET_LOOPBACK ||
3089 arp->ar_hrd != htons(ARPHRD_ETHER) ||
3090 arp->ar_pro != htons(ETH_P_IP) ||
3091 arp->ar_pln != 4)
3092 goto out_unlock;
3093
3094 arp_ptr = (unsigned char *)(arp + 1);
3095 arp_ptr += bond->dev->addr_len;
3096 memcpy(&sip, arp_ptr, 4);
3097 arp_ptr += 4 + bond->dev->addr_len;
3098 memcpy(&tip, arp_ptr, 4);
3099
3100 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
3101 __func__, slave->dev->name, bond_slave_state(slave),
3102 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
3103 &sip, &tip);
3104
3105 curr_active_slave = rcu_dereference(bond->curr_active_slave);
3106 curr_arp_slave = rcu_dereference(bond->current_arp_slave);
3107
3108 /* We 'trust' the received ARP enough to validate it if:
3109 *
3110 * (a) the slave receiving the ARP is active (which includes the
3111 * current ARP slave, if any), or
3112 *
3113 * (b) the receiving slave isn't active, but there is a currently
3114 * active slave and it received valid arp reply(s) after it became
3115 * the currently active slave, or
3116 *
3117 * (c) there is an ARP slave that sent an ARP during the prior ARP
3118 * interval, and we receive an ARP reply on any slave. We accept
3119 * these because switch FDB update delays may deliver the ARP
3120 * reply to a slave other than the sender of the ARP request.
3121 *
3122 * Note: for (b), backup slaves are receiving the broadcast ARP
3123 * request, not a reply. This request passes from the sending
3124 * slave through the L2 switch(es) to the receiving slave. Since
3125 * this is checking the request, sip/tip are swapped for
3126 * validation.
3127 *
3128 * This is done to avoid endless looping when we can't reach the
3129 * arp_ip_target and fool ourselves with our own arp requests.
3130 */
3131 if (bond_is_active_slave(slave))
3132 bond_validate_arp(bond, slave, sip, tip);
3133 else if (curr_active_slave &&
3134 time_after(slave_last_rx(bond, curr_active_slave),
3135 curr_active_slave->last_link_up))
3136 bond_validate_arp(bond, slave, tip, sip);
3137 else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
3138 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
3139 bond_validate_arp(bond, slave, sip, tip);
3140
3141out_unlock:
3142 if (arp != (struct arphdr *)skb->data)
3143 kfree(arp);
3144 return RX_HANDLER_ANOTHER;
3145}
3146
3147#if IS_ENABLED(CONFIG_IPV6)
3148static void bond_ns_send(struct slave *slave, const struct in6_addr *daddr,
3149 const struct in6_addr *saddr, struct bond_vlan_tag *tags)
3150{
3151 struct net_device *bond_dev = slave->bond->dev;
3152 struct net_device *slave_dev = slave->dev;
3153 struct in6_addr mcaddr;
3154 struct sk_buff *skb;
3155
3156 slave_dbg(bond_dev, slave_dev, "NS on slave: dst %pI6c src %pI6c\n",
3157 daddr, saddr);
3158
3159 skb = ndisc_ns_create(slave_dev, daddr, saddr, 0);
3160 if (!skb) {
3161 net_err_ratelimited("NS packet allocation failed\n");
3162 return;
3163 }
3164
3165 addrconf_addr_solict_mult(daddr, &mcaddr);
3166 if (bond_handle_vlan(slave, tags, skb)) {
3167 slave_update_last_tx(slave);
3168 ndisc_send_skb(skb, &mcaddr, saddr);
3169 }
3170}
3171
3172static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
3173{
3174 struct in6_addr *targets = bond->params.ns_targets;
3175 struct bond_vlan_tag *tags;
3176 struct dst_entry *dst;
3177 struct in6_addr saddr;
3178 struct flowi6 fl6;
3179 int i;
3180
3181 for (i = 0; i < BOND_MAX_NS_TARGETS && !ipv6_addr_any(&targets[i]); i++) {
3182 slave_dbg(bond->dev, slave->dev, "%s: target %pI6c\n",
3183 __func__, &targets[i]);
3184 tags = NULL;
3185
3186 /* Find out through which dev should the packet go */
3187 memset(&fl6, 0, sizeof(struct flowi6));
3188 fl6.daddr = targets[i];
3189 fl6.flowi6_oif = bond->dev->ifindex;
3190
3191 dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
3192 if (dst->error) {
3193 dst_release(dst);
3194 /* there's no route to target - try to send arp
3195 * probe to generate any traffic (arp_validate=0)
3196 */
3197 if (bond->params.arp_validate)
3198 pr_warn_once("%s: no route to ns_ip6_target %pI6c and arp_validate is set\n",
3199 bond->dev->name,
3200 &targets[i]);
3201 bond_ns_send(slave, &targets[i], &in6addr_any, tags);
3202 continue;
3203 }
3204
3205 /* bond device itself */
3206 if (dst->dev == bond->dev)
3207 goto found;
3208
3209 rcu_read_lock();
3210 tags = bond_verify_device_path(bond->dev, dst->dev, 0);
3211 rcu_read_unlock();
3212
3213 if (!IS_ERR_OR_NULL(tags))
3214 goto found;
3215
3216 /* Not our device - skip */
3217 slave_dbg(bond->dev, slave->dev, "no path to ns_ip6_target %pI6c via dst->dev %s\n",
3218 &targets[i], dst->dev ? dst->dev->name : "NULL");
3219
3220 dst_release(dst);
3221 continue;
3222
3223found:
3224 if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
3225 bond_ns_send(slave, &targets[i], &saddr, tags);
3226 else
3227 bond_ns_send(slave, &targets[i], &in6addr_any, tags);
3228
3229 dst_release(dst);
3230 kfree(tags);
3231 }
3232}
3233
3234static int bond_confirm_addr6(struct net_device *dev,
3235 struct netdev_nested_priv *priv)
3236{
3237 struct in6_addr *addr = (struct in6_addr *)priv->data;
3238
3239 return ipv6_chk_addr(dev_net(dev), addr, dev, 0);
3240}
3241
3242static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
3243{
3244 struct netdev_nested_priv priv = {
3245 .data = addr,
3246 };
3247 int ret = false;
3248
3249 if (bond_confirm_addr6(bond->dev, &priv))
3250 return true;
3251
3252 rcu_read_lock();
3253 if (netdev_walk_all_upper_dev_rcu(bond->dev, bond_confirm_addr6, &priv))
3254 ret = true;
3255 rcu_read_unlock();
3256
3257 return ret;
3258}
3259
3260static void bond_validate_na(struct bonding *bond, struct slave *slave,
3261 struct in6_addr *saddr, struct in6_addr *daddr)
3262{
3263 int i;
3264
3265 /* Ignore NAs that:
3266 * 1. Source address is unspecified address.
3267 * 2. Dest address is neither all-nodes multicast address nor
3268 * exist on bond interface.
3269 */
3270 if (ipv6_addr_any(saddr) ||
3271 (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
3272 !bond_has_this_ip6(bond, daddr))) {
3273 slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
3274 __func__, saddr, daddr);
3275 return;
3276 }
3277
3278 i = bond_get_targets_ip6(bond->params.ns_targets, saddr);
3279 if (i == -1) {
3280 slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c not found in targets\n",
3281 __func__, saddr);
3282 return;
3283 }
3284 slave->last_rx = jiffies;
3285 slave->target_last_arp_rx[i] = jiffies;
3286}
3287
3288static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
3289 struct slave *slave)
3290{
3291 struct slave *curr_active_slave, *curr_arp_slave;
3292 struct in6_addr *saddr, *daddr;
3293 struct {
3294 struct ipv6hdr ip6;
3295 struct icmp6hdr icmp6;
3296 } *combined, _combined;
3297
3298 if (skb->pkt_type == PACKET_OTHERHOST ||
3299 skb->pkt_type == PACKET_LOOPBACK)
3300 goto out;
3301
3302 combined = skb_header_pointer(skb, 0, sizeof(_combined), &_combined);
3303 if (!combined || combined->ip6.nexthdr != NEXTHDR_ICMP ||
3304 (combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION &&
3305 combined->icmp6.icmp6_type != NDISC_NEIGHBOUR_ADVERTISEMENT))
3306 goto out;
3307
3308 saddr = &combined->ip6.saddr;
3309 daddr = &combined->ip6.daddr;
3310
3311 slave_dbg(bond->dev, slave->dev, "%s: %s/%d av %d sv %d sip %pI6c tip %pI6c\n",
3312 __func__, slave->dev->name, bond_slave_state(slave),
3313 bond->params.arp_validate, slave_do_arp_validate(bond, slave),
3314 saddr, daddr);
3315
3316 curr_active_slave = rcu_dereference(bond->curr_active_slave);
3317 curr_arp_slave = rcu_dereference(bond->current_arp_slave);
3318
3319 /* We 'trust' the received ARP enough to validate it if:
3320 * see bond_arp_rcv().
3321 */
3322 if (bond_is_active_slave(slave))
3323 bond_validate_na(bond, slave, saddr, daddr);
3324 else if (curr_active_slave &&
3325 time_after(slave_last_rx(bond, curr_active_slave),
3326 curr_active_slave->last_link_up))
3327 bond_validate_na(bond, slave, daddr, saddr);
3328 else if (curr_arp_slave &&
3329 bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
3330 bond_validate_na(bond, slave, saddr, daddr);
3331
3332out:
3333 return RX_HANDLER_ANOTHER;
3334}
3335#endif
3336
3337int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond,
3338 struct slave *slave)
3339{
3340#if IS_ENABLED(CONFIG_IPV6)
3341 bool is_ipv6 = skb->protocol == __cpu_to_be16(ETH_P_IPV6);
3342#endif
3343 bool is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
3344
3345 slave_dbg(bond->dev, slave->dev, "%s: skb->dev %s\n",
3346 __func__, skb->dev->name);
3347
3348 /* Use arp validate logic for both ARP and NS */
3349 if (!slave_do_arp_validate(bond, slave)) {
3350 if ((slave_do_arp_validate_only(bond) && is_arp) ||
3351#if IS_ENABLED(CONFIG_IPV6)
3352 (slave_do_arp_validate_only(bond) && is_ipv6) ||
3353#endif
3354 !slave_do_arp_validate_only(bond))
3355 slave->last_rx = jiffies;
3356 return RX_HANDLER_ANOTHER;
3357 } else if (is_arp) {
3358 return bond_arp_rcv(skb, bond, slave);
3359#if IS_ENABLED(CONFIG_IPV6)
3360 } else if (is_ipv6) {
3361 return bond_na_rcv(skb, bond, slave);
3362#endif
3363 } else {
3364 return RX_HANDLER_ANOTHER;
3365 }
3366}
3367
3368static void bond_send_validate(struct bonding *bond, struct slave *slave)
3369{
3370 bond_arp_send_all(bond, slave);
3371#if IS_ENABLED(CONFIG_IPV6)
3372 bond_ns_send_all(bond, slave);
3373#endif
3374}
3375
3376/* function to verify if we're in the arp_interval timeslice, returns true if
3377 * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
3378 * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
3379 */
3380static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
3381 int mod)
3382{
3383 int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3384
3385 return time_in_range(jiffies,
3386 last_act - delta_in_ticks,
3387 last_act + mod * delta_in_ticks + delta_in_ticks/2);
3388}
3389
3390/* This function is called regularly to monitor each slave's link
3391 * ensuring that traffic is being sent and received when arp monitoring
3392 * is used in load-balancing mode. if the adapter has been dormant, then an
3393 * arp is transmitted to generate traffic. see activebackup_arp_monitor for
3394 * arp monitoring in active backup mode.
3395 */
3396static void bond_loadbalance_arp_mon(struct bonding *bond)
3397{
3398 struct slave *slave, *oldcurrent;
3399 struct list_head *iter;
3400 int do_failover = 0, slave_state_changed = 0;
3401
3402 if (!bond_has_slaves(bond))
3403 goto re_arm;
3404
3405 rcu_read_lock();
3406
3407 oldcurrent = rcu_dereference(bond->curr_active_slave);
3408 /* see if any of the previous devices are up now (i.e. they have
3409 * xmt and rcv traffic). the curr_active_slave does not come into
3410 * the picture unless it is null. also, slave->last_link_up is not
3411 * needed here because we send an arp on each slave and give a slave
3412 * as long as it needs to get the tx/rx within the delta.
3413 * TODO: what about up/down delay in arp mode? it wasn't here before
3414 * so it can wait
3415 */
3416 bond_for_each_slave_rcu(bond, slave, iter) {
3417 unsigned long last_tx = slave_last_tx(slave);
3418
3419 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3420
3421 if (slave->link != BOND_LINK_UP) {
3422 if (bond_time_in_interval(bond, last_tx, 1) &&
3423 bond_time_in_interval(bond, slave->last_rx, 1)) {
3424
3425 bond_propose_link_state(slave, BOND_LINK_UP);
3426 slave_state_changed = 1;
3427
3428 /* primary_slave has no meaning in round-robin
3429 * mode. the window of a slave being up and
3430 * curr_active_slave being null after enslaving
3431 * is closed.
3432 */
3433 if (!oldcurrent) {
3434 slave_info(bond->dev, slave->dev, "link status definitely up\n");
3435 do_failover = 1;
3436 } else {
3437 slave_info(bond->dev, slave->dev, "interface is now up\n");
3438 }
3439 }
3440 } else {
3441 /* slave->link == BOND_LINK_UP */
3442
3443 /* not all switches will respond to an arp request
3444 * when the source ip is 0, so don't take the link down
3445 * if we don't know our ip yet
3446 */
3447 if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
3448 !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) {
3449
3450 bond_propose_link_state(slave, BOND_LINK_DOWN);
3451 slave_state_changed = 1;
3452
3453 if (slave->link_failure_count < UINT_MAX)
3454 slave->link_failure_count++;
3455
3456 slave_info(bond->dev, slave->dev, "interface is now down\n");
3457
3458 if (slave == oldcurrent)
3459 do_failover = 1;
3460 }
3461 }
3462
3463 /* note: if switch is in round-robin mode, all links
3464 * must tx arp to ensure all links rx an arp - otherwise
3465 * links may oscillate or not come up at all; if switch is
3466 * in something like xor mode, there is nothing we can
3467 * do - all replies will be rx'ed on same link causing slaves
3468 * to be unstable during low/no traffic periods
3469 */
3470 if (bond_slave_is_up(slave))
3471 bond_send_validate(bond, slave);
3472 }
3473
3474 rcu_read_unlock();
3475
3476 if (do_failover || slave_state_changed) {
3477 if (!rtnl_trylock())
3478 goto re_arm;
3479
3480 bond_for_each_slave(bond, slave, iter) {
3481 if (slave->link_new_state != BOND_LINK_NOCHANGE)
3482 slave->link = slave->link_new_state;
3483 }
3484
3485 if (slave_state_changed) {
3486 bond_slave_state_change(bond);
3487 if (BOND_MODE(bond) == BOND_MODE_XOR)
3488 bond_update_slave_arr(bond, NULL);
3489 }
3490 if (do_failover) {
3491 block_netpoll_tx();
3492 bond_select_active_slave(bond);
3493 unblock_netpoll_tx();
3494 }
3495 rtnl_unlock();
3496 }
3497
3498re_arm:
3499 if (bond->params.arp_interval)
3500 queue_delayed_work(bond->wq, &bond->arp_work,
3501 msecs_to_jiffies(bond->params.arp_interval));
3502}
3503
3504/* Called to inspect slaves for active-backup mode ARP monitor link state
3505 * changes. Sets proposed link state in slaves to specify what action
3506 * should take place for the slave. Returns 0 if no changes are found, >0
3507 * if changes to link states must be committed.
3508 *
3509 * Called with rcu_read_lock held.
3510 */
3511static int bond_ab_arp_inspect(struct bonding *bond)
3512{
3513 unsigned long last_tx, last_rx;
3514 struct list_head *iter;
3515 struct slave *slave;
3516 int commit = 0;
3517
3518 bond_for_each_slave_rcu(bond, slave, iter) {
3519 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
3520 last_rx = slave_last_rx(bond, slave);
3521
3522 if (slave->link != BOND_LINK_UP) {
3523 if (bond_time_in_interval(bond, last_rx, 1)) {
3524 bond_propose_link_state(slave, BOND_LINK_UP);
3525 commit++;
3526 } else if (slave->link == BOND_LINK_BACK) {
3527 bond_propose_link_state(slave, BOND_LINK_FAIL);
3528 commit++;
3529 }
3530 continue;
3531 }
3532
3533 /* Give slaves 2*delta after being enslaved or made
3534 * active. This avoids bouncing, as the last receive
3535 * times need a full ARP monitor cycle to be updated.
3536 */
3537 if (bond_time_in_interval(bond, slave->last_link_up, 2))
3538 continue;
3539
3540 /* Backup slave is down if:
3541 * - No current_arp_slave AND
3542 * - more than (missed_max+1)*delta since last receive AND
3543 * - the bond has an IP address
3544 *
3545 * Note: a non-null current_arp_slave indicates
3546 * the curr_active_slave went down and we are
3547 * searching for a new one; under this condition
3548 * we only take the curr_active_slave down - this
3549 * gives each slave a chance to tx/rx traffic
3550 * before being taken out
3551 */
3552 if (!bond_is_active_slave(slave) &&
3553 !rcu_access_pointer(bond->current_arp_slave) &&
3554 !bond_time_in_interval(bond, last_rx, bond->params.missed_max + 1)) {
3555 bond_propose_link_state(slave, BOND_LINK_DOWN);
3556 commit++;
3557 }
3558
3559 /* Active slave is down if:
3560 * - more than missed_max*delta since transmitting OR
3561 * - (more than missed_max*delta since receive AND
3562 * the bond has an IP address)
3563 */
3564 last_tx = slave_last_tx(slave);
3565 if (bond_is_active_slave(slave) &&
3566 (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) ||
3567 !bond_time_in_interval(bond, last_rx, bond->params.missed_max))) {
3568 bond_propose_link_state(slave, BOND_LINK_DOWN);
3569 commit++;
3570 }
3571 }
3572
3573 return commit;
3574}
3575
3576/* Called to commit link state changes noted by inspection step of
3577 * active-backup mode ARP monitor.
3578 *
3579 * Called with RTNL hold.
3580 */
3581static void bond_ab_arp_commit(struct bonding *bond)
3582{
3583 bool do_failover = false;
3584 struct list_head *iter;
3585 unsigned long last_tx;
3586 struct slave *slave;
3587
3588 bond_for_each_slave(bond, slave, iter) {
3589 switch (slave->link_new_state) {
3590 case BOND_LINK_NOCHANGE:
3591 continue;
3592
3593 case BOND_LINK_UP:
3594 last_tx = slave_last_tx(slave);
3595 if (rtnl_dereference(bond->curr_active_slave) != slave ||
3596 (!rtnl_dereference(bond->curr_active_slave) &&
3597 bond_time_in_interval(bond, last_tx, 1))) {
3598 struct slave *current_arp_slave;
3599
3600 current_arp_slave = rtnl_dereference(bond->current_arp_slave);
3601 bond_set_slave_link_state(slave, BOND_LINK_UP,
3602 BOND_SLAVE_NOTIFY_NOW);
3603 if (current_arp_slave) {
3604 bond_set_slave_inactive_flags(
3605 current_arp_slave,
3606 BOND_SLAVE_NOTIFY_NOW);
3607 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3608 }
3609
3610 slave_info(bond->dev, slave->dev, "link status definitely up\n");
3611
3612 if (!rtnl_dereference(bond->curr_active_slave) ||
3613 slave == rtnl_dereference(bond->primary_slave) ||
3614 slave->prio > rtnl_dereference(bond->curr_active_slave)->prio)
3615 do_failover = true;
3616
3617 }
3618
3619 continue;
3620
3621 case BOND_LINK_DOWN:
3622 if (slave->link_failure_count < UINT_MAX)
3623 slave->link_failure_count++;
3624
3625 bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3626 BOND_SLAVE_NOTIFY_NOW);
3627 bond_set_slave_inactive_flags(slave,
3628 BOND_SLAVE_NOTIFY_NOW);
3629
3630 slave_info(bond->dev, slave->dev, "link status definitely down, disabling slave\n");
3631
3632 if (slave == rtnl_dereference(bond->curr_active_slave)) {
3633 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3634 do_failover = true;
3635 }
3636
3637 continue;
3638
3639 case BOND_LINK_FAIL:
3640 bond_set_slave_link_state(slave, BOND_LINK_FAIL,
3641 BOND_SLAVE_NOTIFY_NOW);
3642 bond_set_slave_inactive_flags(slave,
3643 BOND_SLAVE_NOTIFY_NOW);
3644
3645 /* A slave has just been enslaved and has become
3646 * the current active slave.
3647 */
3648 if (rtnl_dereference(bond->curr_active_slave))
3649 RCU_INIT_POINTER(bond->current_arp_slave, NULL);
3650 continue;
3651
3652 default:
3653 slave_err(bond->dev, slave->dev,
3654 "impossible: link_new_state %d on slave\n",
3655 slave->link_new_state);
3656 continue;
3657 }
3658 }
3659
3660 if (do_failover) {
3661 block_netpoll_tx();
3662 bond_select_active_slave(bond);
3663 unblock_netpoll_tx();
3664 }
3665
3666 bond_set_carrier(bond);
3667}
3668
3669/* Send ARP probes for active-backup mode ARP monitor.
3670 *
3671 * Called with rcu_read_lock held.
3672 */
3673static bool bond_ab_arp_probe(struct bonding *bond)
3674{
3675 struct slave *slave, *before = NULL, *new_slave = NULL,
3676 *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
3677 *curr_active_slave = rcu_dereference(bond->curr_active_slave);
3678 struct list_head *iter;
3679 bool found = false;
3680 bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
3681
3682 if (curr_arp_slave && curr_active_slave)
3683 netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
3684 curr_arp_slave->dev->name,
3685 curr_active_slave->dev->name);
3686
3687 if (curr_active_slave) {
3688 bond_send_validate(bond, curr_active_slave);
3689 return should_notify_rtnl;
3690 }
3691
3692 /* if we don't have a curr_active_slave, search for the next available
3693 * backup slave from the current_arp_slave and make it the candidate
3694 * for becoming the curr_active_slave
3695 */
3696
3697 if (!curr_arp_slave) {
3698 curr_arp_slave = bond_first_slave_rcu(bond);
3699 if (!curr_arp_slave)
3700 return should_notify_rtnl;
3701 }
3702
3703 bond_for_each_slave_rcu(bond, slave, iter) {
3704 if (!found && !before && bond_slave_is_up(slave))
3705 before = slave;
3706
3707 if (found && !new_slave && bond_slave_is_up(slave))
3708 new_slave = slave;
3709 /* if the link state is up at this point, we
3710 * mark it down - this can happen if we have
3711 * simultaneous link failures and
3712 * reselect_active_interface doesn't make this
3713 * one the current slave so it is still marked
3714 * up when it is actually down
3715 */
3716 if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
3717 bond_set_slave_link_state(slave, BOND_LINK_DOWN,
3718 BOND_SLAVE_NOTIFY_LATER);
3719 if (slave->link_failure_count < UINT_MAX)
3720 slave->link_failure_count++;
3721
3722 bond_set_slave_inactive_flags(slave,
3723 BOND_SLAVE_NOTIFY_LATER);
3724
3725 slave_info(bond->dev, slave->dev, "backup interface is now down\n");
3726 }
3727 if (slave == curr_arp_slave)
3728 found = true;
3729 }
3730
3731 if (!new_slave && before)
3732 new_slave = before;
3733
3734 if (!new_slave)
3735 goto check_state;
3736
3737 bond_set_slave_link_state(new_slave, BOND_LINK_BACK,
3738 BOND_SLAVE_NOTIFY_LATER);
3739 bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
3740 bond_send_validate(bond, new_slave);
3741 new_slave->last_link_up = jiffies;
3742 rcu_assign_pointer(bond->current_arp_slave, new_slave);
3743
3744check_state:
3745 bond_for_each_slave_rcu(bond, slave, iter) {
3746 if (slave->should_notify || slave->should_notify_link) {
3747 should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
3748 break;
3749 }
3750 }
3751 return should_notify_rtnl;
3752}
3753
3754static void bond_activebackup_arp_mon(struct bonding *bond)
3755{
3756 bool should_notify_peers = false;
3757 bool should_notify_rtnl = false;
3758 int delta_in_ticks;
3759
3760 delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
3761
3762 if (!bond_has_slaves(bond))
3763 goto re_arm;
3764
3765 rcu_read_lock();
3766
3767 should_notify_peers = bond_should_notify_peers(bond);
3768
3769 if (bond_ab_arp_inspect(bond)) {
3770 rcu_read_unlock();
3771
3772 /* Race avoidance with bond_close flush of workqueue */
3773 if (!rtnl_trylock()) {
3774 delta_in_ticks = 1;
3775 should_notify_peers = false;
3776 goto re_arm;
3777 }
3778
3779 bond_ab_arp_commit(bond);
3780
3781 rtnl_unlock();
3782 rcu_read_lock();
3783 }
3784
3785 should_notify_rtnl = bond_ab_arp_probe(bond);
3786 rcu_read_unlock();
3787
3788re_arm:
3789 if (bond->params.arp_interval)
3790 queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
3791
3792 if (should_notify_peers || should_notify_rtnl) {
3793 if (!rtnl_trylock())
3794 return;
3795
3796 if (should_notify_peers) {
3797 bond->send_peer_notif--;
3798 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
3799 bond->dev);
3800 }
3801 if (should_notify_rtnl) {
3802 bond_slave_state_notify(bond);
3803 bond_slave_link_notify(bond);
3804 }
3805
3806 rtnl_unlock();
3807 }
3808}
3809
3810static void bond_arp_monitor(struct work_struct *work)
3811{
3812 struct bonding *bond = container_of(work, struct bonding,
3813 arp_work.work);
3814
3815 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
3816 bond_activebackup_arp_mon(bond);
3817 else
3818 bond_loadbalance_arp_mon(bond);
3819}
3820
3821/*-------------------------- netdev event handling --------------------------*/
3822
3823/* Change device name */
3824static int bond_event_changename(struct bonding *bond)
3825{
3826 bond_remove_proc_entry(bond);
3827 bond_create_proc_entry(bond);
3828
3829 bond_debug_reregister(bond);
3830
3831 return NOTIFY_DONE;
3832}
3833
3834static int bond_master_netdev_event(unsigned long event,
3835 struct net_device *bond_dev)
3836{
3837 struct bonding *event_bond = netdev_priv(bond_dev);
3838
3839 netdev_dbg(bond_dev, "%s called\n", __func__);
3840
3841 switch (event) {
3842 case NETDEV_CHANGENAME:
3843 return bond_event_changename(event_bond);
3844 case NETDEV_UNREGISTER:
3845 bond_remove_proc_entry(event_bond);
3846#ifdef CONFIG_XFRM_OFFLOAD
3847 xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
3848#endif /* CONFIG_XFRM_OFFLOAD */
3849 break;
3850 case NETDEV_REGISTER:
3851 bond_create_proc_entry(event_bond);
3852 break;
3853 default:
3854 break;
3855 }
3856
3857 return NOTIFY_DONE;
3858}
3859
3860static int bond_slave_netdev_event(unsigned long event,
3861 struct net_device *slave_dev)
3862{
3863 struct slave *slave = bond_slave_get_rtnl(slave_dev), *primary;
3864 struct bonding *bond;
3865 struct net_device *bond_dev;
3866
3867 /* A netdev event can be generated while enslaving a device
3868 * before netdev_rx_handler_register is called in which case
3869 * slave will be NULL
3870 */
3871 if (!slave) {
3872 netdev_dbg(slave_dev, "%s called on NULL slave\n", __func__);
3873 return NOTIFY_DONE;
3874 }
3875
3876 bond_dev = slave->bond->dev;
3877 bond = slave->bond;
3878 primary = rtnl_dereference(bond->primary_slave);
3879
3880 slave_dbg(bond_dev, slave_dev, "%s called\n", __func__);
3881
3882 switch (event) {
3883 case NETDEV_UNREGISTER:
3884 if (bond_dev->type != ARPHRD_ETHER)
3885 bond_release_and_destroy(bond_dev, slave_dev);
3886 else
3887 __bond_release_one(bond_dev, slave_dev, false, true);
3888 break;
3889 case NETDEV_UP:
3890 case NETDEV_CHANGE:
3891 /* For 802.3ad mode only:
3892 * Getting invalid Speed/Duplex values here will put slave
3893 * in weird state. Mark it as link-fail if the link was
3894 * previously up or link-down if it hasn't yet come up, and
3895 * let link-monitoring (miimon) set it right when correct
3896 * speeds/duplex are available.
3897 */
3898 if (bond_update_speed_duplex(slave) &&
3899 BOND_MODE(bond) == BOND_MODE_8023AD) {
3900 if (slave->last_link_up)
3901 slave->link = BOND_LINK_FAIL;
3902 else
3903 slave->link = BOND_LINK_DOWN;
3904 }
3905
3906 if (BOND_MODE(bond) == BOND_MODE_8023AD)
3907 bond_3ad_adapter_speed_duplex_changed(slave);
3908 fallthrough;
3909 case NETDEV_DOWN:
3910 /* Refresh slave-array if applicable!
3911 * If the setup does not use miimon or arpmon (mode-specific!),
3912 * then these events will not cause the slave-array to be
3913 * refreshed. This will cause xmit to use a slave that is not
3914 * usable. Avoid such situation by refeshing the array at these
3915 * events. If these (miimon/arpmon) parameters are configured
3916 * then array gets refreshed twice and that should be fine!
3917 */
3918 if (bond_mode_can_use_xmit_hash(bond))
3919 bond_update_slave_arr(bond, NULL);
3920 break;
3921 case NETDEV_CHANGEMTU:
3922 /* TODO: Should slaves be allowed to
3923 * independently alter their MTU? For
3924 * an active-backup bond, slaves need
3925 * not be the same type of device, so
3926 * MTUs may vary. For other modes,
3927 * slaves arguably should have the
3928 * same MTUs. To do this, we'd need to
3929 * take over the slave's change_mtu
3930 * function for the duration of their
3931 * servitude.
3932 */
3933 break;
3934 case NETDEV_CHANGENAME:
3935 /* we don't care if we don't have primary set */
3936 if (!bond_uses_primary(bond) ||
3937 !bond->params.primary[0])
3938 break;
3939
3940 if (slave == primary) {
3941 /* slave's name changed - he's no longer primary */
3942 RCU_INIT_POINTER(bond->primary_slave, NULL);
3943 } else if (!strcmp(slave_dev->name, bond->params.primary)) {
3944 /* we have a new primary slave */
3945 rcu_assign_pointer(bond->primary_slave, slave);
3946 } else { /* we didn't change primary - exit */
3947 break;
3948 }
3949
3950 netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
3951 primary ? slave_dev->name : "none");
3952
3953 block_netpoll_tx();
3954 bond_select_active_slave(bond);
3955 unblock_netpoll_tx();
3956 break;
3957 case NETDEV_FEAT_CHANGE:
3958 if (!bond->notifier_ctx) {
3959 bond->notifier_ctx = true;
3960 bond_compute_features(bond);
3961 bond->notifier_ctx = false;
3962 }
3963 break;
3964 case NETDEV_RESEND_IGMP:
3965 /* Propagate to master device */
3966 call_netdevice_notifiers(event, slave->bond->dev);
3967 break;
3968 case NETDEV_XDP_FEAT_CHANGE:
3969 bond_xdp_set_features(bond_dev);
3970 break;
3971 default:
3972 break;
3973 }
3974
3975 return NOTIFY_DONE;
3976}
3977
3978/* bond_netdev_event: handle netdev notifier chain events.
3979 *
3980 * This function receives events for the netdev chain. The caller (an
3981 * ioctl handler calling blocking_notifier_call_chain) holds the necessary
3982 * locks for us to safely manipulate the slave devices (RTNL lock,
3983 * dev_probe_lock).
3984 */
3985static int bond_netdev_event(struct notifier_block *this,
3986 unsigned long event, void *ptr)
3987{
3988 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
3989
3990 netdev_dbg(event_dev, "%s received %s\n",
3991 __func__, netdev_cmd_to_name(event));
3992
3993 if (!(event_dev->priv_flags & IFF_BONDING))
3994 return NOTIFY_DONE;
3995
3996 if (event_dev->flags & IFF_MASTER) {
3997 int ret;
3998
3999 ret = bond_master_netdev_event(event, event_dev);
4000 if (ret != NOTIFY_DONE)
4001 return ret;
4002 }
4003
4004 if (event_dev->flags & IFF_SLAVE)
4005 return bond_slave_netdev_event(event, event_dev);
4006
4007 return NOTIFY_DONE;
4008}
4009
4010static struct notifier_block bond_netdev_notifier = {
4011 .notifier_call = bond_netdev_event,
4012};
4013
4014/*---------------------------- Hashing Policies -----------------------------*/
4015
4016/* Helper to access data in a packet, with or without a backing skb.
4017 * If skb is given the data is linearized if necessary via pskb_may_pull.
4018 */
4019static inline const void *bond_pull_data(struct sk_buff *skb,
4020 const void *data, int hlen, int n)
4021{
4022 if (likely(n <= hlen))
4023 return data;
4024 else if (skb && likely(pskb_may_pull(skb, n)))
4025 return skb->head;
4026
4027 return NULL;
4028}
4029
4030/* L2 hash helper */
4031static inline u32 bond_eth_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
4032{
4033 struct ethhdr *ep;
4034
4035 data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
4036 if (!data)
4037 return 0;
4038
4039 ep = (struct ethhdr *)(data + mhoff);
4040 return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
4041}
4042
4043static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
4044 int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
4045{
4046 const struct ipv6hdr *iph6;
4047 const struct iphdr *iph;
4048
4049 if (l2_proto == htons(ETH_P_IP)) {
4050 data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph));
4051 if (!data)
4052 return false;
4053
4054 iph = (const struct iphdr *)(data + *nhoff);
4055 iph_to_flow_copy_v4addrs(fk, iph);
4056 *nhoff += iph->ihl << 2;
4057 if (!ip_is_fragment(iph))
4058 *ip_proto = iph->protocol;
4059 } else if (l2_proto == htons(ETH_P_IPV6)) {
4060 data = bond_pull_data(skb, data, hlen, *nhoff + sizeof(*iph6));
4061 if (!data)
4062 return false;
4063
4064 iph6 = (const struct ipv6hdr *)(data + *nhoff);
4065 iph_to_flow_copy_v6addrs(fk, iph6);
4066 *nhoff += sizeof(*iph6);
4067 *ip_proto = iph6->nexthdr;
4068 } else {
4069 return false;
4070 }
4071
4072 if (l34 && *ip_proto >= 0)
4073 fk->ports.ports = __skb_flow_get_ports(skb, *nhoff, *ip_proto, data, hlen);
4074
4075 return true;
4076}
4077
4078static u32 bond_vlan_srcmac_hash(struct sk_buff *skb, const void *data, int mhoff, int hlen)
4079{
4080 u32 srcmac_vendor = 0, srcmac_dev = 0;
4081 struct ethhdr *mac_hdr;
4082 u16 vlan = 0;
4083 int i;
4084
4085 data = bond_pull_data(skb, data, hlen, mhoff + sizeof(struct ethhdr));
4086 if (!data)
4087 return 0;
4088 mac_hdr = (struct ethhdr *)(data + mhoff);
4089
4090 for (i = 0; i < 3; i++)
4091 srcmac_vendor = (srcmac_vendor << 8) | mac_hdr->h_source[i];
4092
4093 for (i = 3; i < ETH_ALEN; i++)
4094 srcmac_dev = (srcmac_dev << 8) | mac_hdr->h_source[i];
4095
4096 if (skb && skb_vlan_tag_present(skb))
4097 vlan = skb_vlan_tag_get(skb);
4098
4099 return vlan ^ srcmac_vendor ^ srcmac_dev;
4100}
4101
4102/* Extract the appropriate headers based on bond's xmit policy */
4103static bool bond_flow_dissect(struct bonding *bond, struct sk_buff *skb, const void *data,
4104 __be16 l2_proto, int nhoff, int hlen, struct flow_keys *fk)
4105{
4106 bool l34 = bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34;
4107 int ip_proto = -1;
4108
4109 switch (bond->params.xmit_policy) {
4110 case BOND_XMIT_POLICY_ENCAP23:
4111 case BOND_XMIT_POLICY_ENCAP34:
4112 memset(fk, 0, sizeof(*fk));
4113 return __skb_flow_dissect(NULL, skb, &flow_keys_bonding,
4114 fk, data, l2_proto, nhoff, hlen, 0);
4115 default:
4116 break;
4117 }
4118
4119 fk->ports.ports = 0;
4120 memset(&fk->icmp, 0, sizeof(fk->icmp));
4121 if (!bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34))
4122 return false;
4123
4124 /* ICMP error packets contains at least 8 bytes of the header
4125 * of the packet which generated the error. Use this information
4126 * to correlate ICMP error packets within the same flow which
4127 * generated the error.
4128 */
4129 if (ip_proto == IPPROTO_ICMP || ip_proto == IPPROTO_ICMPV6) {
4130 skb_flow_get_icmp_tci(skb, &fk->icmp, data, nhoff, hlen);
4131 if (ip_proto == IPPROTO_ICMP) {
4132 if (!icmp_is_err(fk->icmp.type))
4133 return true;
4134
4135 nhoff += sizeof(struct icmphdr);
4136 } else if (ip_proto == IPPROTO_ICMPV6) {
4137 if (!icmpv6_is_err(fk->icmp.type))
4138 return true;
4139
4140 nhoff += sizeof(struct icmp6hdr);
4141 }
4142 return bond_flow_ip(skb, fk, data, hlen, l2_proto, &nhoff, &ip_proto, l34);
4143 }
4144
4145 return true;
4146}
4147
4148static u32 bond_ip_hash(u32 hash, struct flow_keys *flow, int xmit_policy)
4149{
4150 hash ^= (__force u32)flow_get_u32_dst(flow) ^
4151 (__force u32)flow_get_u32_src(flow);
4152 hash ^= (hash >> 16);
4153 hash ^= (hash >> 8);
4154
4155 /* discard lowest hash bit to deal with the common even ports pattern */
4156 if (xmit_policy == BOND_XMIT_POLICY_LAYER34 ||
4157 xmit_policy == BOND_XMIT_POLICY_ENCAP34)
4158 return hash >> 1;
4159
4160 return hash;
4161}
4162
4163/* Generate hash based on xmit policy. If @skb is given it is used to linearize
4164 * the data as required, but this function can be used without it if the data is
4165 * known to be linear (e.g. with xdp_buff).
4166 */
4167static u32 __bond_xmit_hash(struct bonding *bond, struct sk_buff *skb, const void *data,
4168 __be16 l2_proto, int mhoff, int nhoff, int hlen)
4169{
4170 struct flow_keys flow;
4171 u32 hash;
4172
4173 if (bond->params.xmit_policy == BOND_XMIT_POLICY_VLAN_SRCMAC)
4174 return bond_vlan_srcmac_hash(skb, data, mhoff, hlen);
4175
4176 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
4177 !bond_flow_dissect(bond, skb, data, l2_proto, nhoff, hlen, &flow))
4178 return bond_eth_hash(skb, data, mhoff, hlen);
4179
4180 if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23 ||
4181 bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP23) {
4182 hash = bond_eth_hash(skb, data, mhoff, hlen);
4183 } else {
4184 if (flow.icmp.id)
4185 memcpy(&hash, &flow.icmp, sizeof(hash));
4186 else
4187 memcpy(&hash, &flow.ports.ports, sizeof(hash));
4188 }
4189
4190 return bond_ip_hash(hash, &flow, bond->params.xmit_policy);
4191}
4192
4193/**
4194 * bond_xmit_hash - generate a hash value based on the xmit policy
4195 * @bond: bonding device
4196 * @skb: buffer to use for headers
4197 *
4198 * This function will extract the necessary headers from the skb buffer and use
4199 * them to generate a hash based on the xmit_policy set in the bonding device
4200 */
4201u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
4202{
4203 if (bond->params.xmit_policy == BOND_XMIT_POLICY_ENCAP34 &&
4204 skb->l4_hash)
4205 return skb->hash;
4206
4207 return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
4208 0, skb_network_offset(skb),
4209 skb_headlen(skb));
4210}
4211
4212/**
4213 * bond_xmit_hash_xdp - generate a hash value based on the xmit policy
4214 * @bond: bonding device
4215 * @xdp: buffer to use for headers
4216 *
4217 * The XDP variant of bond_xmit_hash.
4218 */
4219static u32 bond_xmit_hash_xdp(struct bonding *bond, struct xdp_buff *xdp)
4220{
4221 struct ethhdr *eth;
4222
4223 if (xdp->data + sizeof(struct ethhdr) > xdp->data_end)
4224 return 0;
4225
4226 eth = (struct ethhdr *)xdp->data;
4227
4228 return __bond_xmit_hash(bond, NULL, xdp->data, eth->h_proto, 0,
4229 sizeof(struct ethhdr), xdp->data_end - xdp->data);
4230}
4231
4232/*-------------------------- Device entry points ----------------------------*/
4233
4234void bond_work_init_all(struct bonding *bond)
4235{
4236 INIT_DELAYED_WORK(&bond->mcast_work,
4237 bond_resend_igmp_join_requests_delayed);
4238 INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
4239 INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor);
4240 INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
4241 INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
4242 INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
4243}
4244
4245static void bond_work_cancel_all(struct bonding *bond)
4246{
4247 cancel_delayed_work_sync(&bond->mii_work);
4248 cancel_delayed_work_sync(&bond->arp_work);
4249 cancel_delayed_work_sync(&bond->alb_work);
4250 cancel_delayed_work_sync(&bond->ad_work);
4251 cancel_delayed_work_sync(&bond->mcast_work);
4252 cancel_delayed_work_sync(&bond->slave_arr_work);
4253}
4254
4255static int bond_open(struct net_device *bond_dev)
4256{
4257 struct bonding *bond = netdev_priv(bond_dev);
4258 struct list_head *iter;
4259 struct slave *slave;
4260
4261 if (BOND_MODE(bond) == BOND_MODE_ROUNDROBIN && !bond->rr_tx_counter) {
4262 bond->rr_tx_counter = alloc_percpu(u32);
4263 if (!bond->rr_tx_counter)
4264 return -ENOMEM;
4265 }
4266
4267 /* reset slave->backup and slave->inactive */
4268 if (bond_has_slaves(bond)) {
4269 bond_for_each_slave(bond, slave, iter) {
4270 if (bond_uses_primary(bond) &&
4271 slave != rcu_access_pointer(bond->curr_active_slave)) {
4272 bond_set_slave_inactive_flags(slave,
4273 BOND_SLAVE_NOTIFY_NOW);
4274 } else if (BOND_MODE(bond) != BOND_MODE_8023AD) {
4275 bond_set_slave_active_flags(slave,
4276 BOND_SLAVE_NOTIFY_NOW);
4277 }
4278 }
4279 }
4280
4281 if (bond_is_lb(bond)) {
4282 /* bond_alb_initialize must be called before the timer
4283 * is started.
4284 */
4285 if (bond_alb_initialize(bond, (BOND_MODE(bond) == BOND_MODE_ALB)))
4286 return -ENOMEM;
4287 if (bond->params.tlb_dynamic_lb || BOND_MODE(bond) == BOND_MODE_ALB)
4288 queue_delayed_work(bond->wq, &bond->alb_work, 0);
4289 }
4290
4291 if (bond->params.miimon) /* link check interval, in milliseconds. */
4292 queue_delayed_work(bond->wq, &bond->mii_work, 0);
4293
4294 if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
4295 queue_delayed_work(bond->wq, &bond->arp_work, 0);
4296 bond->recv_probe = bond_rcv_validate;
4297 }
4298
4299 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
4300 queue_delayed_work(bond->wq, &bond->ad_work, 0);
4301 /* register to receive LACPDUs */
4302 bond->recv_probe = bond_3ad_lacpdu_recv;
4303 bond_3ad_initiate_agg_selection(bond, 1);
4304
4305 bond_for_each_slave(bond, slave, iter)
4306 dev_mc_add(slave->dev, lacpdu_mcast_addr);
4307 }
4308
4309 if (bond_mode_can_use_xmit_hash(bond))
4310 bond_update_slave_arr(bond, NULL);
4311
4312 return 0;
4313}
4314
4315static int bond_close(struct net_device *bond_dev)
4316{
4317 struct bonding *bond = netdev_priv(bond_dev);
4318 struct slave *slave;
4319
4320 bond_work_cancel_all(bond);
4321 bond->send_peer_notif = 0;
4322 if (bond_is_lb(bond))
4323 bond_alb_deinitialize(bond);
4324 bond->recv_probe = NULL;
4325
4326 if (bond_uses_primary(bond)) {
4327 rcu_read_lock();
4328 slave = rcu_dereference(bond->curr_active_slave);
4329 if (slave)
4330 bond_hw_addr_flush(bond_dev, slave->dev);
4331 rcu_read_unlock();
4332 } else {
4333 struct list_head *iter;
4334
4335 bond_for_each_slave(bond, slave, iter)
4336 bond_hw_addr_flush(bond_dev, slave->dev);
4337 }
4338
4339 return 0;
4340}
4341
4342/* fold stats, assuming all rtnl_link_stats64 fields are u64, but
4343 * that some drivers can provide 32bit values only.
4344 */
4345static void bond_fold_stats(struct rtnl_link_stats64 *_res,
4346 const struct rtnl_link_stats64 *_new,
4347 const struct rtnl_link_stats64 *_old)
4348{
4349 const u64 *new = (const u64 *)_new;
4350 const u64 *old = (const u64 *)_old;
4351 u64 *res = (u64 *)_res;
4352 int i;
4353
4354 for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) {
4355 u64 nv = new[i];
4356 u64 ov = old[i];
4357 s64 delta = nv - ov;
4358
4359 /* detects if this particular field is 32bit only */
4360 if (((nv | ov) >> 32) == 0)
4361 delta = (s64)(s32)((u32)nv - (u32)ov);
4362
4363 /* filter anomalies, some drivers reset their stats
4364 * at down/up events.
4365 */
4366 if (delta > 0)
4367 res[i] += delta;
4368 }
4369}
4370
4371#ifdef CONFIG_LOCKDEP
4372static int bond_get_lowest_level_rcu(struct net_device *dev)
4373{
4374 struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
4375 struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
4376 int cur = 0, max = 0;
4377
4378 now = dev;
4379 iter = &dev->adj_list.lower;
4380
4381 while (1) {
4382 next = NULL;
4383 while (1) {
4384 ldev = netdev_next_lower_dev_rcu(now, &iter);
4385 if (!ldev)
4386 break;
4387
4388 next = ldev;
4389 niter = &ldev->adj_list.lower;
4390 dev_stack[cur] = now;
4391 iter_stack[cur++] = iter;
4392 if (max <= cur)
4393 max = cur;
4394 break;
4395 }
4396
4397 if (!next) {
4398 if (!cur)
4399 return max;
4400 next = dev_stack[--cur];
4401 niter = iter_stack[cur];
4402 }
4403
4404 now = next;
4405 iter = niter;
4406 }
4407
4408 return max;
4409}
4410#endif
4411
4412static void bond_get_stats(struct net_device *bond_dev,
4413 struct rtnl_link_stats64 *stats)
4414{
4415 struct bonding *bond = netdev_priv(bond_dev);
4416 struct rtnl_link_stats64 temp;
4417 struct list_head *iter;
4418 struct slave *slave;
4419 int nest_level = 0;
4420
4421
4422 rcu_read_lock();
4423#ifdef CONFIG_LOCKDEP
4424 nest_level = bond_get_lowest_level_rcu(bond_dev);
4425#endif
4426
4427 spin_lock_nested(&bond->stats_lock, nest_level);
4428 memcpy(stats, &bond->bond_stats, sizeof(*stats));
4429
4430 bond_for_each_slave_rcu(bond, slave, iter) {
4431 const struct rtnl_link_stats64 *new =
4432 dev_get_stats(slave->dev, &temp);
4433
4434 bond_fold_stats(stats, new, &slave->slave_stats);
4435
4436 /* save off the slave stats for the next run */
4437 memcpy(&slave->slave_stats, new, sizeof(*new));
4438 }
4439
4440 memcpy(&bond->bond_stats, stats, sizeof(*stats));
4441 spin_unlock(&bond->stats_lock);
4442 rcu_read_unlock();
4443}
4444
4445static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4446{
4447 struct bonding *bond = netdev_priv(bond_dev);
4448 struct mii_ioctl_data *mii = NULL;
4449 const struct net_device_ops *ops;
4450 struct net_device *real_dev;
4451 struct hwtstamp_config cfg;
4452 struct ifreq ifrr;
4453 int res = 0;
4454
4455 netdev_dbg(bond_dev, "bond_eth_ioctl: cmd=%d\n", cmd);
4456
4457 switch (cmd) {
4458 case SIOCGMIIPHY:
4459 mii = if_mii(ifr);
4460 if (!mii)
4461 return -EINVAL;
4462
4463 mii->phy_id = 0;
4464 fallthrough;
4465 case SIOCGMIIREG:
4466 /* We do this again just in case we were called by SIOCGMIIREG
4467 * instead of SIOCGMIIPHY.
4468 */
4469 mii = if_mii(ifr);
4470 if (!mii)
4471 return -EINVAL;
4472
4473 if (mii->reg_num == 1) {
4474 mii->val_out = 0;
4475 if (netif_carrier_ok(bond->dev))
4476 mii->val_out = BMSR_LSTATUS;
4477 }
4478
4479 break;
4480 case SIOCSHWTSTAMP:
4481 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
4482 return -EFAULT;
4483
4484 if (!(cfg.flags & HWTSTAMP_FLAG_BONDED_PHC_INDEX))
4485 return -EOPNOTSUPP;
4486
4487 fallthrough;
4488 case SIOCGHWTSTAMP:
4489 real_dev = bond_option_active_slave_get_rcu(bond);
4490 if (!real_dev)
4491 return -EOPNOTSUPP;
4492
4493 strscpy_pad(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
4494 ifrr.ifr_ifru = ifr->ifr_ifru;
4495
4496 ops = real_dev->netdev_ops;
4497 if (netif_device_present(real_dev) && ops->ndo_eth_ioctl) {
4498 res = ops->ndo_eth_ioctl(real_dev, &ifrr, cmd);
4499 if (res)
4500 return res;
4501
4502 ifr->ifr_ifru = ifrr.ifr_ifru;
4503 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
4504 return -EFAULT;
4505
4506 /* Set the BOND_PHC_INDEX flag to notify user space */
4507 cfg.flags |= HWTSTAMP_FLAG_BONDED_PHC_INDEX;
4508
4509 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ?
4510 -EFAULT : 0;
4511 }
4512 fallthrough;
4513 default:
4514 res = -EOPNOTSUPP;
4515 }
4516
4517 return res;
4518}
4519
4520static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
4521{
4522 struct bonding *bond = netdev_priv(bond_dev);
4523 struct net_device *slave_dev = NULL;
4524 struct ifbond k_binfo;
4525 struct ifbond __user *u_binfo = NULL;
4526 struct ifslave k_sinfo;
4527 struct ifslave __user *u_sinfo = NULL;
4528 struct bond_opt_value newval;
4529 struct net *net;
4530 int res = 0;
4531
4532 netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
4533
4534 switch (cmd) {
4535 case SIOCBONDINFOQUERY:
4536 u_binfo = (struct ifbond __user *)ifr->ifr_data;
4537
4538 if (copy_from_user(&k_binfo, u_binfo, sizeof(ifbond)))
4539 return -EFAULT;
4540
4541 bond_info_query(bond_dev, &k_binfo);
4542 if (copy_to_user(u_binfo, &k_binfo, sizeof(ifbond)))
4543 return -EFAULT;
4544
4545 return 0;
4546 case SIOCBONDSLAVEINFOQUERY:
4547 u_sinfo = (struct ifslave __user *)ifr->ifr_data;
4548
4549 if (copy_from_user(&k_sinfo, u_sinfo, sizeof(ifslave)))
4550 return -EFAULT;
4551
4552 res = bond_slave_info_query(bond_dev, &k_sinfo);
4553 if (res == 0 &&
4554 copy_to_user(u_sinfo, &k_sinfo, sizeof(ifslave)))
4555 return -EFAULT;
4556
4557 return res;
4558 default:
4559 break;
4560 }
4561
4562 net = dev_net(bond_dev);
4563
4564 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4565 return -EPERM;
4566
4567 slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
4568
4569 slave_dbg(bond_dev, slave_dev, "slave_dev=%p:\n", slave_dev);
4570
4571 if (!slave_dev)
4572 return -ENODEV;
4573
4574 switch (cmd) {
4575 case SIOCBONDENSLAVE:
4576 res = bond_enslave(bond_dev, slave_dev, NULL);
4577 break;
4578 case SIOCBONDRELEASE:
4579 res = bond_release(bond_dev, slave_dev);
4580 break;
4581 case SIOCBONDSETHWADDR:
4582 res = bond_set_dev_addr(bond_dev, slave_dev);
4583 break;
4584 case SIOCBONDCHANGEACTIVE:
4585 bond_opt_initstr(&newval, slave_dev->name);
4586 res = __bond_opt_set_notify(bond, BOND_OPT_ACTIVE_SLAVE,
4587 &newval);
4588 break;
4589 default:
4590 res = -EOPNOTSUPP;
4591 }
4592
4593 return res;
4594}
4595
4596static int bond_siocdevprivate(struct net_device *bond_dev, struct ifreq *ifr,
4597 void __user *data, int cmd)
4598{
4599 struct ifreq ifrdata = { .ifr_data = data };
4600
4601 switch (cmd) {
4602 case BOND_INFO_QUERY_OLD:
4603 return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDINFOQUERY);
4604 case BOND_SLAVE_INFO_QUERY_OLD:
4605 return bond_do_ioctl(bond_dev, &ifrdata, SIOCBONDSLAVEINFOQUERY);
4606 case BOND_ENSLAVE_OLD:
4607 return bond_do_ioctl(bond_dev, ifr, SIOCBONDENSLAVE);
4608 case BOND_RELEASE_OLD:
4609 return bond_do_ioctl(bond_dev, ifr, SIOCBONDRELEASE);
4610 case BOND_SETHWADDR_OLD:
4611 return bond_do_ioctl(bond_dev, ifr, SIOCBONDSETHWADDR);
4612 case BOND_CHANGE_ACTIVE_OLD:
4613 return bond_do_ioctl(bond_dev, ifr, SIOCBONDCHANGEACTIVE);
4614 }
4615
4616 return -EOPNOTSUPP;
4617}
4618
4619static void bond_change_rx_flags(struct net_device *bond_dev, int change)
4620{
4621 struct bonding *bond = netdev_priv(bond_dev);
4622
4623 if (change & IFF_PROMISC)
4624 bond_set_promiscuity(bond,
4625 bond_dev->flags & IFF_PROMISC ? 1 : -1);
4626
4627 if (change & IFF_ALLMULTI)
4628 bond_set_allmulti(bond,
4629 bond_dev->flags & IFF_ALLMULTI ? 1 : -1);
4630}
4631
4632static void bond_set_rx_mode(struct net_device *bond_dev)
4633{
4634 struct bonding *bond = netdev_priv(bond_dev);
4635 struct list_head *iter;
4636 struct slave *slave;
4637
4638 rcu_read_lock();
4639 if (bond_uses_primary(bond)) {
4640 slave = rcu_dereference(bond->curr_active_slave);
4641 if (slave) {
4642 dev_uc_sync(slave->dev, bond_dev);
4643 dev_mc_sync(slave->dev, bond_dev);
4644 }
4645 } else {
4646 bond_for_each_slave_rcu(bond, slave, iter) {
4647 dev_uc_sync_multiple(slave->dev, bond_dev);
4648 dev_mc_sync_multiple(slave->dev, bond_dev);
4649 }
4650 }
4651 rcu_read_unlock();
4652}
4653
4654static int bond_neigh_init(struct neighbour *n)
4655{
4656 struct bonding *bond = netdev_priv(n->dev);
4657 const struct net_device_ops *slave_ops;
4658 struct neigh_parms parms;
4659 struct slave *slave;
4660 int ret = 0;
4661
4662 rcu_read_lock();
4663 slave = bond_first_slave_rcu(bond);
4664 if (!slave)
4665 goto out;
4666 slave_ops = slave->dev->netdev_ops;
4667 if (!slave_ops->ndo_neigh_setup)
4668 goto out;
4669
4670 /* TODO: find another way [1] to implement this.
4671 * Passing a zeroed structure is fragile,
4672 * but at least we do not pass garbage.
4673 *
4674 * [1] One way would be that ndo_neigh_setup() never touch
4675 * struct neigh_parms, but propagate the new neigh_setup()
4676 * back to ___neigh_create() / neigh_parms_alloc()
4677 */
4678 memset(&parms, 0, sizeof(parms));
4679 ret = slave_ops->ndo_neigh_setup(slave->dev, &parms);
4680
4681 if (ret)
4682 goto out;
4683
4684 if (parms.neigh_setup)
4685 ret = parms.neigh_setup(n);
4686out:
4687 rcu_read_unlock();
4688 return ret;
4689}
4690
4691/* The bonding ndo_neigh_setup is called at init time beofre any
4692 * slave exists. So we must declare proxy setup function which will
4693 * be used at run time to resolve the actual slave neigh param setup.
4694 *
4695 * It's also called by master devices (such as vlans) to setup their
4696 * underlying devices. In that case - do nothing, we're already set up from
4697 * our init.
4698 */
4699static int bond_neigh_setup(struct net_device *dev,
4700 struct neigh_parms *parms)
4701{
4702 /* modify only our neigh_parms */
4703 if (parms->dev == dev)
4704 parms->neigh_setup = bond_neigh_init;
4705
4706 return 0;
4707}
4708
4709/* Change the MTU of all of a master's slaves to match the master */
4710static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
4711{
4712 struct bonding *bond = netdev_priv(bond_dev);
4713 struct slave *slave, *rollback_slave;
4714 struct list_head *iter;
4715 int res = 0;
4716
4717 netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
4718
4719 bond_for_each_slave(bond, slave, iter) {
4720 slave_dbg(bond_dev, slave->dev, "s %p c_m %p\n",
4721 slave, slave->dev->netdev_ops->ndo_change_mtu);
4722
4723 res = dev_set_mtu(slave->dev, new_mtu);
4724
4725 if (res) {
4726 /* If we failed to set the slave's mtu to the new value
4727 * we must abort the operation even in ACTIVE_BACKUP
4728 * mode, because if we allow the backup slaves to have
4729 * different mtu values than the active slave we'll
4730 * need to change their mtu when doing a failover. That
4731 * means changing their mtu from timer context, which
4732 * is probably not a good idea.
4733 */
4734 slave_dbg(bond_dev, slave->dev, "err %d setting mtu to %d\n",
4735 res, new_mtu);
4736 goto unwind;
4737 }
4738 }
4739
4740 bond_dev->mtu = new_mtu;
4741
4742 return 0;
4743
4744unwind:
4745 /* unwind from head to the slave that failed */
4746 bond_for_each_slave(bond, rollback_slave, iter) {
4747 int tmp_res;
4748
4749 if (rollback_slave == slave)
4750 break;
4751
4752 tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
4753 if (tmp_res)
4754 slave_dbg(bond_dev, rollback_slave->dev, "unwind err %d\n",
4755 tmp_res);
4756 }
4757
4758 return res;
4759}
4760
4761/* Change HW address
4762 *
4763 * Note that many devices must be down to change the HW address, and
4764 * downing the master releases all slaves. We can make bonds full of
4765 * bonding devices to test this, however.
4766 */
4767static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
4768{
4769 struct bonding *bond = netdev_priv(bond_dev);
4770 struct slave *slave, *rollback_slave;
4771 struct sockaddr_storage *ss = addr, tmp_ss;
4772 struct list_head *iter;
4773 int res = 0;
4774
4775 if (BOND_MODE(bond) == BOND_MODE_ALB)
4776 return bond_alb_set_mac_address(bond_dev, addr);
4777
4778
4779 netdev_dbg(bond_dev, "%s: bond=%p\n", __func__, bond);
4780
4781 /* If fail_over_mac is enabled, do nothing and return success.
4782 * Returning an error causes ifenslave to fail.
4783 */
4784 if (bond->params.fail_over_mac &&
4785 BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
4786 return 0;
4787
4788 if (!is_valid_ether_addr(ss->__data))
4789 return -EADDRNOTAVAIL;
4790
4791 bond_for_each_slave(bond, slave, iter) {
4792 slave_dbg(bond_dev, slave->dev, "%s: slave=%p\n",
4793 __func__, slave);
4794 res = dev_set_mac_address(slave->dev, addr, NULL);
4795 if (res) {
4796 /* TODO: consider downing the slave
4797 * and retry ?
4798 * User should expect communications
4799 * breakage anyway until ARP finish
4800 * updating, so...
4801 */
4802 slave_dbg(bond_dev, slave->dev, "%s: err %d\n",
4803 __func__, res);
4804 goto unwind;
4805 }
4806 }
4807
4808 /* success */
4809 dev_addr_set(bond_dev, ss->__data);
4810 return 0;
4811
4812unwind:
4813 memcpy(tmp_ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
4814 tmp_ss.ss_family = bond_dev->type;
4815
4816 /* unwind from head to the slave that failed */
4817 bond_for_each_slave(bond, rollback_slave, iter) {
4818 int tmp_res;
4819
4820 if (rollback_slave == slave)
4821 break;
4822
4823 tmp_res = dev_set_mac_address(rollback_slave->dev,
4824 (struct sockaddr *)&tmp_ss, NULL);
4825 if (tmp_res) {
4826 slave_dbg(bond_dev, rollback_slave->dev, "%s: unwind err %d\n",
4827 __func__, tmp_res);
4828 }
4829 }
4830
4831 return res;
4832}
4833
4834/**
4835 * bond_get_slave_by_id - get xmit slave with slave_id
4836 * @bond: bonding device that is transmitting
4837 * @slave_id: slave id up to slave_cnt-1 through which to transmit
4838 *
4839 * This function tries to get slave with slave_id but in case
4840 * it fails, it tries to find the first available slave for transmission.
4841 */
4842static struct slave *bond_get_slave_by_id(struct bonding *bond,
4843 int slave_id)
4844{
4845 struct list_head *iter;
4846 struct slave *slave;
4847 int i = slave_id;
4848
4849 /* Here we start from the slave with slave_id */
4850 bond_for_each_slave_rcu(bond, slave, iter) {
4851 if (--i < 0) {
4852 if (bond_slave_can_tx(slave))
4853 return slave;
4854 }
4855 }
4856
4857 /* Here we start from the first slave up to slave_id */
4858 i = slave_id;
4859 bond_for_each_slave_rcu(bond, slave, iter) {
4860 if (--i < 0)
4861 break;
4862 if (bond_slave_can_tx(slave))
4863 return slave;
4864 }
4865 /* no slave that can tx has been found */
4866 return NULL;
4867}
4868
4869/**
4870 * bond_rr_gen_slave_id - generate slave id based on packets_per_slave
4871 * @bond: bonding device to use
4872 *
4873 * Based on the value of the bonding device's packets_per_slave parameter
4874 * this function generates a slave id, which is usually used as the next
4875 * slave to transmit through.
4876 */
4877static u32 bond_rr_gen_slave_id(struct bonding *bond)
4878{
4879 u32 slave_id;
4880 struct reciprocal_value reciprocal_packets_per_slave;
4881 int packets_per_slave = bond->params.packets_per_slave;
4882
4883 switch (packets_per_slave) {
4884 case 0:
4885 slave_id = get_random_u32();
4886 break;
4887 case 1:
4888 slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4889 break;
4890 default:
4891 reciprocal_packets_per_slave =
4892 bond->params.reciprocal_packets_per_slave;
4893 slave_id = this_cpu_inc_return(*bond->rr_tx_counter);
4894 slave_id = reciprocal_divide(slave_id,
4895 reciprocal_packets_per_slave);
4896 break;
4897 }
4898
4899 return slave_id;
4900}
4901
4902static struct slave *bond_xmit_roundrobin_slave_get(struct bonding *bond,
4903 struct sk_buff *skb)
4904{
4905 struct slave *slave;
4906 int slave_cnt;
4907 u32 slave_id;
4908
4909 /* Start with the curr_active_slave that joined the bond as the
4910 * default for sending IGMP traffic. For failover purposes one
4911 * needs to maintain some consistency for the interface that will
4912 * send the join/membership reports. The curr_active_slave found
4913 * will send all of this type of traffic.
4914 */
4915 if (skb->protocol == htons(ETH_P_IP)) {
4916 int noff = skb_network_offset(skb);
4917 struct iphdr *iph;
4918
4919 if (unlikely(!pskb_may_pull(skb, noff + sizeof(*iph))))
4920 goto non_igmp;
4921
4922 iph = ip_hdr(skb);
4923 if (iph->protocol == IPPROTO_IGMP) {
4924 slave = rcu_dereference(bond->curr_active_slave);
4925 if (slave)
4926 return slave;
4927 return bond_get_slave_by_id(bond, 0);
4928 }
4929 }
4930
4931non_igmp:
4932 slave_cnt = READ_ONCE(bond->slave_cnt);
4933 if (likely(slave_cnt)) {
4934 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4935 return bond_get_slave_by_id(bond, slave_id);
4936 }
4937 return NULL;
4938}
4939
4940static struct slave *bond_xdp_xmit_roundrobin_slave_get(struct bonding *bond,
4941 struct xdp_buff *xdp)
4942{
4943 struct slave *slave;
4944 int slave_cnt;
4945 u32 slave_id;
4946 const struct ethhdr *eth;
4947 void *data = xdp->data;
4948
4949 if (data + sizeof(struct ethhdr) > xdp->data_end)
4950 goto non_igmp;
4951
4952 eth = (struct ethhdr *)data;
4953 data += sizeof(struct ethhdr);
4954
4955 /* See comment on IGMP in bond_xmit_roundrobin_slave_get() */
4956 if (eth->h_proto == htons(ETH_P_IP)) {
4957 const struct iphdr *iph;
4958
4959 if (data + sizeof(struct iphdr) > xdp->data_end)
4960 goto non_igmp;
4961
4962 iph = (struct iphdr *)data;
4963
4964 if (iph->protocol == IPPROTO_IGMP) {
4965 slave = rcu_dereference(bond->curr_active_slave);
4966 if (slave)
4967 return slave;
4968 return bond_get_slave_by_id(bond, 0);
4969 }
4970 }
4971
4972non_igmp:
4973 slave_cnt = READ_ONCE(bond->slave_cnt);
4974 if (likely(slave_cnt)) {
4975 slave_id = bond_rr_gen_slave_id(bond) % slave_cnt;
4976 return bond_get_slave_by_id(bond, slave_id);
4977 }
4978 return NULL;
4979}
4980
4981static netdev_tx_t bond_xmit_roundrobin(struct sk_buff *skb,
4982 struct net_device *bond_dev)
4983{
4984 struct bonding *bond = netdev_priv(bond_dev);
4985 struct slave *slave;
4986
4987 slave = bond_xmit_roundrobin_slave_get(bond, skb);
4988 if (likely(slave))
4989 return bond_dev_queue_xmit(bond, skb, slave->dev);
4990
4991 return bond_tx_drop(bond_dev, skb);
4992}
4993
4994static struct slave *bond_xmit_activebackup_slave_get(struct bonding *bond)
4995{
4996 return rcu_dereference(bond->curr_active_slave);
4997}
4998
4999/* In active-backup mode, we know that bond->curr_active_slave is always valid if
5000 * the bond has a usable interface.
5001 */
5002static netdev_tx_t bond_xmit_activebackup(struct sk_buff *skb,
5003 struct net_device *bond_dev)
5004{
5005 struct bonding *bond = netdev_priv(bond_dev);
5006 struct slave *slave;
5007
5008 slave = bond_xmit_activebackup_slave_get(bond);
5009 if (slave)
5010 return bond_dev_queue_xmit(bond, skb, slave->dev);
5011
5012 return bond_tx_drop(bond_dev, skb);
5013}
5014
5015/* Use this to update slave_array when (a) it's not appropriate to update
5016 * slave_array right away (note that update_slave_array() may sleep)
5017 * and / or (b) RTNL is not held.
5018 */
5019void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay)
5020{
5021 queue_delayed_work(bond->wq, &bond->slave_arr_work, delay);
5022}
5023
5024/* Slave array work handler. Holds only RTNL */
5025static void bond_slave_arr_handler(struct work_struct *work)
5026{
5027 struct bonding *bond = container_of(work, struct bonding,
5028 slave_arr_work.work);
5029 int ret;
5030
5031 if (!rtnl_trylock())
5032 goto err;
5033
5034 ret = bond_update_slave_arr(bond, NULL);
5035 rtnl_unlock();
5036 if (ret) {
5037 pr_warn_ratelimited("Failed to update slave array from WT\n");
5038 goto err;
5039 }
5040 return;
5041
5042err:
5043 bond_slave_arr_work_rearm(bond, 1);
5044}
5045
5046static void bond_skip_slave(struct bond_up_slave *slaves,
5047 struct slave *skipslave)
5048{
5049 int idx;
5050
5051 /* Rare situation where caller has asked to skip a specific
5052 * slave but allocation failed (most likely!). BTW this is
5053 * only possible when the call is initiated from
5054 * __bond_release_one(). In this situation; overwrite the
5055 * skipslave entry in the array with the last entry from the
5056 * array to avoid a situation where the xmit path may choose
5057 * this to-be-skipped slave to send a packet out.
5058 */
5059 for (idx = 0; slaves && idx < slaves->count; idx++) {
5060 if (skipslave == slaves->arr[idx]) {
5061 slaves->arr[idx] =
5062 slaves->arr[slaves->count - 1];
5063 slaves->count--;
5064 break;
5065 }
5066 }
5067}
5068
5069static void bond_set_slave_arr(struct bonding *bond,
5070 struct bond_up_slave *usable_slaves,
5071 struct bond_up_slave *all_slaves)
5072{
5073 struct bond_up_slave *usable, *all;
5074
5075 usable = rtnl_dereference(bond->usable_slaves);
5076 rcu_assign_pointer(bond->usable_slaves, usable_slaves);
5077 kfree_rcu(usable, rcu);
5078
5079 all = rtnl_dereference(bond->all_slaves);
5080 rcu_assign_pointer(bond->all_slaves, all_slaves);
5081 kfree_rcu(all, rcu);
5082}
5083
5084static void bond_reset_slave_arr(struct bonding *bond)
5085{
5086 struct bond_up_slave *usable, *all;
5087
5088 usable = rtnl_dereference(bond->usable_slaves);
5089 if (usable) {
5090 RCU_INIT_POINTER(bond->usable_slaves, NULL);
5091 kfree_rcu(usable, rcu);
5092 }
5093
5094 all = rtnl_dereference(bond->all_slaves);
5095 if (all) {
5096 RCU_INIT_POINTER(bond->all_slaves, NULL);
5097 kfree_rcu(all, rcu);
5098 }
5099}
5100
5101/* Build the usable slaves array in control path for modes that use xmit-hash
5102 * to determine the slave interface -
5103 * (a) BOND_MODE_8023AD
5104 * (b) BOND_MODE_XOR
5105 * (c) (BOND_MODE_TLB || BOND_MODE_ALB) && tlb_dynamic_lb == 0
5106 *
5107 * The caller is expected to hold RTNL only and NO other lock!
5108 */
5109int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave)
5110{
5111 struct bond_up_slave *usable_slaves = NULL, *all_slaves = NULL;
5112 struct slave *slave;
5113 struct list_head *iter;
5114 int agg_id = 0;
5115 int ret = 0;
5116
5117 might_sleep();
5118
5119 usable_slaves = kzalloc(struct_size(usable_slaves, arr,
5120 bond->slave_cnt), GFP_KERNEL);
5121 all_slaves = kzalloc(struct_size(all_slaves, arr,
5122 bond->slave_cnt), GFP_KERNEL);
5123 if (!usable_slaves || !all_slaves) {
5124 ret = -ENOMEM;
5125 goto out;
5126 }
5127 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
5128 struct ad_info ad_info;
5129
5130 spin_lock_bh(&bond->mode_lock);
5131 if (bond_3ad_get_active_agg_info(bond, &ad_info)) {
5132 spin_unlock_bh(&bond->mode_lock);
5133 pr_debug("bond_3ad_get_active_agg_info failed\n");
5134 /* No active aggragator means it's not safe to use
5135 * the previous array.
5136 */
5137 bond_reset_slave_arr(bond);
5138 goto out;
5139 }
5140 spin_unlock_bh(&bond->mode_lock);
5141 agg_id = ad_info.aggregator_id;
5142 }
5143 bond_for_each_slave(bond, slave, iter) {
5144 if (skipslave == slave)
5145 continue;
5146
5147 all_slaves->arr[all_slaves->count++] = slave;
5148 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
5149 struct aggregator *agg;
5150
5151 agg = SLAVE_AD_INFO(slave)->port.aggregator;
5152 if (!agg || agg->aggregator_identifier != agg_id)
5153 continue;
5154 }
5155 if (!bond_slave_can_tx(slave))
5156 continue;
5157
5158 slave_dbg(bond->dev, slave->dev, "Adding slave to tx hash array[%d]\n",
5159 usable_slaves->count);
5160
5161 usable_slaves->arr[usable_slaves->count++] = slave;
5162 }
5163
5164 bond_set_slave_arr(bond, usable_slaves, all_slaves);
5165 return ret;
5166out:
5167 if (ret != 0 && skipslave) {
5168 bond_skip_slave(rtnl_dereference(bond->all_slaves),
5169 skipslave);
5170 bond_skip_slave(rtnl_dereference(bond->usable_slaves),
5171 skipslave);
5172 }
5173 kfree_rcu(all_slaves, rcu);
5174 kfree_rcu(usable_slaves, rcu);
5175
5176 return ret;
5177}
5178
5179static struct slave *bond_xmit_3ad_xor_slave_get(struct bonding *bond,
5180 struct sk_buff *skb,
5181 struct bond_up_slave *slaves)
5182{
5183 struct slave *slave;
5184 unsigned int count;
5185 u32 hash;
5186
5187 hash = bond_xmit_hash(bond, skb);
5188 count = slaves ? READ_ONCE(slaves->count) : 0;
5189 if (unlikely(!count))
5190 return NULL;
5191
5192 slave = slaves->arr[hash % count];
5193 return slave;
5194}
5195
5196static struct slave *bond_xdp_xmit_3ad_xor_slave_get(struct bonding *bond,
5197 struct xdp_buff *xdp)
5198{
5199 struct bond_up_slave *slaves;
5200 unsigned int count;
5201 u32 hash;
5202
5203 hash = bond_xmit_hash_xdp(bond, xdp);
5204 slaves = rcu_dereference(bond->usable_slaves);
5205 count = slaves ? READ_ONCE(slaves->count) : 0;
5206 if (unlikely(!count))
5207 return NULL;
5208
5209 return slaves->arr[hash % count];
5210}
5211
5212/* Use this Xmit function for 3AD as well as XOR modes. The current
5213 * usable slave array is formed in the control path. The xmit function
5214 * just calculates hash and sends the packet out.
5215 */
5216static netdev_tx_t bond_3ad_xor_xmit(struct sk_buff *skb,
5217 struct net_device *dev)
5218{
5219 struct bonding *bond = netdev_priv(dev);
5220 struct bond_up_slave *slaves;
5221 struct slave *slave;
5222
5223 slaves = rcu_dereference(bond->usable_slaves);
5224 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
5225 if (likely(slave))
5226 return bond_dev_queue_xmit(bond, skb, slave->dev);
5227
5228 return bond_tx_drop(dev, skb);
5229}
5230
5231/* in broadcast mode, we send everything to all usable interfaces. */
5232static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
5233 struct net_device *bond_dev)
5234{
5235 struct bonding *bond = netdev_priv(bond_dev);
5236 struct slave *slave = NULL;
5237 struct list_head *iter;
5238 bool xmit_suc = false;
5239 bool skb_used = false;
5240
5241 bond_for_each_slave_rcu(bond, slave, iter) {
5242 struct sk_buff *skb2;
5243
5244 if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
5245 continue;
5246
5247 if (bond_is_last_slave(bond, slave)) {
5248 skb2 = skb;
5249 skb_used = true;
5250 } else {
5251 skb2 = skb_clone(skb, GFP_ATOMIC);
5252 if (!skb2) {
5253 net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
5254 bond_dev->name, __func__);
5255 continue;
5256 }
5257 }
5258
5259 if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK)
5260 xmit_suc = true;
5261 }
5262
5263 if (!skb_used)
5264 dev_kfree_skb_any(skb);
5265
5266 if (xmit_suc)
5267 return NETDEV_TX_OK;
5268
5269 dev_core_stats_tx_dropped_inc(bond_dev);
5270 return NET_XMIT_DROP;
5271}
5272
5273/*------------------------- Device initialization ---------------------------*/
5274
5275/* Lookup the slave that corresponds to a qid */
5276static inline int bond_slave_override(struct bonding *bond,
5277 struct sk_buff *skb)
5278{
5279 struct slave *slave = NULL;
5280 struct list_head *iter;
5281
5282 if (!skb_rx_queue_recorded(skb))
5283 return 1;
5284
5285 /* Find out if any slaves have the same mapping as this skb. */
5286 bond_for_each_slave_rcu(bond, slave, iter) {
5287 if (slave->queue_id == skb_get_queue_mapping(skb)) {
5288 if (bond_slave_is_up(slave) &&
5289 slave->link == BOND_LINK_UP) {
5290 bond_dev_queue_xmit(bond, skb, slave->dev);
5291 return 0;
5292 }
5293 /* If the slave isn't UP, use default transmit policy. */
5294 break;
5295 }
5296 }
5297
5298 return 1;
5299}
5300
5301
5302static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
5303 struct net_device *sb_dev)
5304{
5305 /* This helper function exists to help dev_pick_tx get the correct
5306 * destination queue. Using a helper function skips a call to
5307 * skb_tx_hash and will put the skbs in the queue we expect on their
5308 * way down to the bonding driver.
5309 */
5310 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
5311
5312 /* Save the original txq to restore before passing to the driver */
5313 qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb_get_queue_mapping(skb);
5314
5315 if (unlikely(txq >= dev->real_num_tx_queues)) {
5316 do {
5317 txq -= dev->real_num_tx_queues;
5318 } while (txq >= dev->real_num_tx_queues);
5319 }
5320 return txq;
5321}
5322
5323static struct net_device *bond_xmit_get_slave(struct net_device *master_dev,
5324 struct sk_buff *skb,
5325 bool all_slaves)
5326{
5327 struct bonding *bond = netdev_priv(master_dev);
5328 struct bond_up_slave *slaves;
5329 struct slave *slave = NULL;
5330
5331 switch (BOND_MODE(bond)) {
5332 case BOND_MODE_ROUNDROBIN:
5333 slave = bond_xmit_roundrobin_slave_get(bond, skb);
5334 break;
5335 case BOND_MODE_ACTIVEBACKUP:
5336 slave = bond_xmit_activebackup_slave_get(bond);
5337 break;
5338 case BOND_MODE_8023AD:
5339 case BOND_MODE_XOR:
5340 if (all_slaves)
5341 slaves = rcu_dereference(bond->all_slaves);
5342 else
5343 slaves = rcu_dereference(bond->usable_slaves);
5344 slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
5345 break;
5346 case BOND_MODE_BROADCAST:
5347 break;
5348 case BOND_MODE_ALB:
5349 slave = bond_xmit_alb_slave_get(bond, skb);
5350 break;
5351 case BOND_MODE_TLB:
5352 slave = bond_xmit_tlb_slave_get(bond, skb);
5353 break;
5354 default:
5355 /* Should never happen, mode already checked */
5356 WARN_ONCE(true, "Unknown bonding mode");
5357 break;
5358 }
5359
5360 if (slave)
5361 return slave->dev;
5362 return NULL;
5363}
5364
5365static void bond_sk_to_flow(struct sock *sk, struct flow_keys *flow)
5366{
5367 switch (sk->sk_family) {
5368#if IS_ENABLED(CONFIG_IPV6)
5369 case AF_INET6:
5370 if (ipv6_only_sock(sk) ||
5371 ipv6_addr_type(&sk->sk_v6_daddr) != IPV6_ADDR_MAPPED) {
5372 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
5373 flow->addrs.v6addrs.src = inet6_sk(sk)->saddr;
5374 flow->addrs.v6addrs.dst = sk->sk_v6_daddr;
5375 break;
5376 }
5377 fallthrough;
5378#endif
5379 default: /* AF_INET */
5380 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
5381 flow->addrs.v4addrs.src = inet_sk(sk)->inet_rcv_saddr;
5382 flow->addrs.v4addrs.dst = inet_sk(sk)->inet_daddr;
5383 break;
5384 }
5385
5386 flow->ports.src = inet_sk(sk)->inet_sport;
5387 flow->ports.dst = inet_sk(sk)->inet_dport;
5388}
5389
5390/**
5391 * bond_sk_hash_l34 - generate a hash value based on the socket's L3 and L4 fields
5392 * @sk: socket to use for headers
5393 *
5394 * This function will extract the necessary field from the socket and use
5395 * them to generate a hash based on the LAYER34 xmit_policy.
5396 * Assumes that sk is a TCP or UDP socket.
5397 */
5398static u32 bond_sk_hash_l34(struct sock *sk)
5399{
5400 struct flow_keys flow;
5401 u32 hash;
5402
5403 bond_sk_to_flow(sk, &flow);
5404
5405 /* L4 */
5406 memcpy(&hash, &flow.ports.ports, sizeof(hash));
5407 /* L3 */
5408 return bond_ip_hash(hash, &flow, BOND_XMIT_POLICY_LAYER34);
5409}
5410
5411static struct net_device *__bond_sk_get_lower_dev(struct bonding *bond,
5412 struct sock *sk)
5413{
5414 struct bond_up_slave *slaves;
5415 struct slave *slave;
5416 unsigned int count;
5417 u32 hash;
5418
5419 slaves = rcu_dereference(bond->usable_slaves);
5420 count = slaves ? READ_ONCE(slaves->count) : 0;
5421 if (unlikely(!count))
5422 return NULL;
5423
5424 hash = bond_sk_hash_l34(sk);
5425 slave = slaves->arr[hash % count];
5426
5427 return slave->dev;
5428}
5429
5430static struct net_device *bond_sk_get_lower_dev(struct net_device *dev,
5431 struct sock *sk)
5432{
5433 struct bonding *bond = netdev_priv(dev);
5434 struct net_device *lower = NULL;
5435
5436 rcu_read_lock();
5437 if (bond_sk_check(bond))
5438 lower = __bond_sk_get_lower_dev(bond, sk);
5439 rcu_read_unlock();
5440
5441 return lower;
5442}
5443
5444#if IS_ENABLED(CONFIG_TLS_DEVICE)
5445static netdev_tx_t bond_tls_device_xmit(struct bonding *bond, struct sk_buff *skb,
5446 struct net_device *dev)
5447{
5448 struct net_device *tls_netdev = rcu_dereference(tls_get_ctx(skb->sk)->netdev);
5449
5450 /* tls_netdev might become NULL, even if tls_is_skb_tx_device_offloaded
5451 * was true, if tls_device_down is running in parallel, but it's OK,
5452 * because bond_get_slave_by_dev has a NULL check.
5453 */
5454 if (likely(bond_get_slave_by_dev(bond, tls_netdev)))
5455 return bond_dev_queue_xmit(bond, skb, tls_netdev);
5456 return bond_tx_drop(dev, skb);
5457}
5458#endif
5459
5460static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5461{
5462 struct bonding *bond = netdev_priv(dev);
5463
5464 if (bond_should_override_tx_queue(bond) &&
5465 !bond_slave_override(bond, skb))
5466 return NETDEV_TX_OK;
5467
5468#if IS_ENABLED(CONFIG_TLS_DEVICE)
5469 if (tls_is_skb_tx_device_offloaded(skb))
5470 return bond_tls_device_xmit(bond, skb, dev);
5471#endif
5472
5473 switch (BOND_MODE(bond)) {
5474 case BOND_MODE_ROUNDROBIN:
5475 return bond_xmit_roundrobin(skb, dev);
5476 case BOND_MODE_ACTIVEBACKUP:
5477 return bond_xmit_activebackup(skb, dev);
5478 case BOND_MODE_8023AD:
5479 case BOND_MODE_XOR:
5480 return bond_3ad_xor_xmit(skb, dev);
5481 case BOND_MODE_BROADCAST:
5482 return bond_xmit_broadcast(skb, dev);
5483 case BOND_MODE_ALB:
5484 return bond_alb_xmit(skb, dev);
5485 case BOND_MODE_TLB:
5486 return bond_tlb_xmit(skb, dev);
5487 default:
5488 /* Should never happen, mode already checked */
5489 netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
5490 WARN_ON_ONCE(1);
5491 return bond_tx_drop(dev, skb);
5492 }
5493}
5494
5495static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
5496{
5497 struct bonding *bond = netdev_priv(dev);
5498 netdev_tx_t ret = NETDEV_TX_OK;
5499
5500 /* If we risk deadlock from transmitting this in the
5501 * netpoll path, tell netpoll to queue the frame for later tx
5502 */
5503 if (unlikely(is_netpoll_tx_blocked(dev)))
5504 return NETDEV_TX_BUSY;
5505
5506 rcu_read_lock();
5507 if (bond_has_slaves(bond))
5508 ret = __bond_start_xmit(skb, dev);
5509 else
5510 ret = bond_tx_drop(dev, skb);
5511 rcu_read_unlock();
5512
5513 return ret;
5514}
5515
5516static struct net_device *
5517bond_xdp_get_xmit_slave(struct net_device *bond_dev, struct xdp_buff *xdp)
5518{
5519 struct bonding *bond = netdev_priv(bond_dev);
5520 struct slave *slave;
5521
5522 /* Caller needs to hold rcu_read_lock() */
5523
5524 switch (BOND_MODE(bond)) {
5525 case BOND_MODE_ROUNDROBIN:
5526 slave = bond_xdp_xmit_roundrobin_slave_get(bond, xdp);
5527 break;
5528
5529 case BOND_MODE_ACTIVEBACKUP:
5530 slave = bond_xmit_activebackup_slave_get(bond);
5531 break;
5532
5533 case BOND_MODE_8023AD:
5534 case BOND_MODE_XOR:
5535 slave = bond_xdp_xmit_3ad_xor_slave_get(bond, xdp);
5536 break;
5537
5538 default:
5539 /* Should never happen. Mode guarded by bond_xdp_check() */
5540 netdev_err(bond_dev, "Unknown bonding mode %d for xdp xmit\n", BOND_MODE(bond));
5541 WARN_ON_ONCE(1);
5542 return NULL;
5543 }
5544
5545 if (slave)
5546 return slave->dev;
5547
5548 return NULL;
5549}
5550
5551static int bond_xdp_xmit(struct net_device *bond_dev,
5552 int n, struct xdp_frame **frames, u32 flags)
5553{
5554 int nxmit, err = -ENXIO;
5555
5556 rcu_read_lock();
5557
5558 for (nxmit = 0; nxmit < n; nxmit++) {
5559 struct xdp_frame *frame = frames[nxmit];
5560 struct xdp_frame *frames1[] = {frame};
5561 struct net_device *slave_dev;
5562 struct xdp_buff xdp;
5563
5564 xdp_convert_frame_to_buff(frame, &xdp);
5565
5566 slave_dev = bond_xdp_get_xmit_slave(bond_dev, &xdp);
5567 if (!slave_dev) {
5568 err = -ENXIO;
5569 break;
5570 }
5571
5572 err = slave_dev->netdev_ops->ndo_xdp_xmit(slave_dev, 1, frames1, flags);
5573 if (err < 1)
5574 break;
5575 }
5576
5577 rcu_read_unlock();
5578
5579 /* If error happened on the first frame then we can pass the error up, otherwise
5580 * report the number of frames that were xmitted.
5581 */
5582 if (err < 0)
5583 return (nxmit == 0 ? err : nxmit);
5584
5585 return nxmit;
5586}
5587
5588static int bond_xdp_set(struct net_device *dev, struct bpf_prog *prog,
5589 struct netlink_ext_ack *extack)
5590{
5591 struct bonding *bond = netdev_priv(dev);
5592 struct list_head *iter;
5593 struct slave *slave, *rollback_slave;
5594 struct bpf_prog *old_prog;
5595 struct netdev_bpf xdp = {
5596 .command = XDP_SETUP_PROG,
5597 .flags = 0,
5598 .prog = prog,
5599 .extack = extack,
5600 };
5601 int err;
5602
5603 ASSERT_RTNL();
5604
5605 if (!bond_xdp_check(bond))
5606 return -EOPNOTSUPP;
5607
5608 old_prog = bond->xdp_prog;
5609 bond->xdp_prog = prog;
5610
5611 bond_for_each_slave(bond, slave, iter) {
5612 struct net_device *slave_dev = slave->dev;
5613
5614 if (!slave_dev->netdev_ops->ndo_bpf ||
5615 !slave_dev->netdev_ops->ndo_xdp_xmit) {
5616 SLAVE_NL_ERR(dev, slave_dev, extack,
5617 "Slave device does not support XDP");
5618 err = -EOPNOTSUPP;
5619 goto err;
5620 }
5621
5622 if (dev_xdp_prog_count(slave_dev) > 0) {
5623 SLAVE_NL_ERR(dev, slave_dev, extack,
5624 "Slave has XDP program loaded, please unload before enslaving");
5625 err = -EOPNOTSUPP;
5626 goto err;
5627 }
5628
5629 err = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5630 if (err < 0) {
5631 /* ndo_bpf() sets extack error message */
5632 slave_err(dev, slave_dev, "Error %d calling ndo_bpf\n", err);
5633 goto err;
5634 }
5635 if (prog)
5636 bpf_prog_inc(prog);
5637 }
5638
5639 if (prog) {
5640 static_branch_inc(&bpf_master_redirect_enabled_key);
5641 } else if (old_prog) {
5642 bpf_prog_put(old_prog);
5643 static_branch_dec(&bpf_master_redirect_enabled_key);
5644 }
5645
5646 return 0;
5647
5648err:
5649 /* unwind the program changes */
5650 bond->xdp_prog = old_prog;
5651 xdp.prog = old_prog;
5652 xdp.extack = NULL; /* do not overwrite original error */
5653
5654 bond_for_each_slave(bond, rollback_slave, iter) {
5655 struct net_device *slave_dev = rollback_slave->dev;
5656 int err_unwind;
5657
5658 if (slave == rollback_slave)
5659 break;
5660
5661 err_unwind = slave_dev->netdev_ops->ndo_bpf(slave_dev, &xdp);
5662 if (err_unwind < 0)
5663 slave_err(dev, slave_dev,
5664 "Error %d when unwinding XDP program change\n", err_unwind);
5665 else if (xdp.prog)
5666 bpf_prog_inc(xdp.prog);
5667 }
5668 return err;
5669}
5670
5671static int bond_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5672{
5673 switch (xdp->command) {
5674 case XDP_SETUP_PROG:
5675 return bond_xdp_set(dev, xdp->prog, xdp->extack);
5676 default:
5677 return -EINVAL;
5678 }
5679}
5680
5681static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
5682{
5683 if (speed == 0 || speed == SPEED_UNKNOWN)
5684 speed = slave->speed;
5685 else
5686 speed = min(speed, slave->speed);
5687
5688 return speed;
5689}
5690
5691static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
5692 struct ethtool_link_ksettings *cmd)
5693{
5694 struct bonding *bond = netdev_priv(bond_dev);
5695 struct list_head *iter;
5696 struct slave *slave;
5697 u32 speed = 0;
5698
5699 cmd->base.duplex = DUPLEX_UNKNOWN;
5700 cmd->base.port = PORT_OTHER;
5701
5702 /* Since bond_slave_can_tx returns false for all inactive or down slaves, we
5703 * do not need to check mode. Though link speed might not represent
5704 * the true receive or transmit bandwidth (not all modes are symmetric)
5705 * this is an accurate maximum.
5706 */
5707 bond_for_each_slave(bond, slave, iter) {
5708 if (bond_slave_can_tx(slave)) {
5709 if (slave->speed != SPEED_UNKNOWN) {
5710 if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
5711 speed = bond_mode_bcast_speed(slave,
5712 speed);
5713 else
5714 speed += slave->speed;
5715 }
5716 if (cmd->base.duplex == DUPLEX_UNKNOWN &&
5717 slave->duplex != DUPLEX_UNKNOWN)
5718 cmd->base.duplex = slave->duplex;
5719 }
5720 }
5721 cmd->base.speed = speed ? : SPEED_UNKNOWN;
5722
5723 return 0;
5724}
5725
5726static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
5727 struct ethtool_drvinfo *drvinfo)
5728{
5729 strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
5730 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
5731 BOND_ABI_VERSION);
5732}
5733
5734static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
5735 struct ethtool_ts_info *info)
5736{
5737 struct bonding *bond = netdev_priv(bond_dev);
5738 struct ethtool_ts_info ts_info;
5739 const struct ethtool_ops *ops;
5740 struct net_device *real_dev;
5741 bool sw_tx_support = false;
5742 struct phy_device *phydev;
5743 struct list_head *iter;
5744 struct slave *slave;
5745 int ret = 0;
5746
5747 rcu_read_lock();
5748 real_dev = bond_option_active_slave_get_rcu(bond);
5749 dev_hold(real_dev);
5750 rcu_read_unlock();
5751
5752 if (real_dev) {
5753 ops = real_dev->ethtool_ops;
5754 phydev = real_dev->phydev;
5755
5756 if (phy_has_tsinfo(phydev)) {
5757 ret = phy_ts_info(phydev, info);
5758 goto out;
5759 } else if (ops->get_ts_info) {
5760 ret = ops->get_ts_info(real_dev, info);
5761 goto out;
5762 }
5763 } else {
5764 /* Check if all slaves support software tx timestamping */
5765 rcu_read_lock();
5766 bond_for_each_slave_rcu(bond, slave, iter) {
5767 ret = -1;
5768 ops = slave->dev->ethtool_ops;
5769 phydev = slave->dev->phydev;
5770
5771 if (phy_has_tsinfo(phydev))
5772 ret = phy_ts_info(phydev, &ts_info);
5773 else if (ops->get_ts_info)
5774 ret = ops->get_ts_info(slave->dev, &ts_info);
5775
5776 if (!ret && (ts_info.so_timestamping & SOF_TIMESTAMPING_TX_SOFTWARE)) {
5777 sw_tx_support = true;
5778 continue;
5779 }
5780
5781 sw_tx_support = false;
5782 break;
5783 }
5784 rcu_read_unlock();
5785 }
5786
5787 ret = 0;
5788 info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
5789 SOF_TIMESTAMPING_SOFTWARE;
5790 if (sw_tx_support)
5791 info->so_timestamping |= SOF_TIMESTAMPING_TX_SOFTWARE;
5792
5793 info->phc_index = -1;
5794
5795out:
5796 dev_put(real_dev);
5797 return ret;
5798}
5799
5800static const struct ethtool_ops bond_ethtool_ops = {
5801 .get_drvinfo = bond_ethtool_get_drvinfo,
5802 .get_link = ethtool_op_get_link,
5803 .get_link_ksettings = bond_ethtool_get_link_ksettings,
5804 .get_ts_info = bond_ethtool_get_ts_info,
5805};
5806
5807static const struct net_device_ops bond_netdev_ops = {
5808 .ndo_init = bond_init,
5809 .ndo_uninit = bond_uninit,
5810 .ndo_open = bond_open,
5811 .ndo_stop = bond_close,
5812 .ndo_start_xmit = bond_start_xmit,
5813 .ndo_select_queue = bond_select_queue,
5814 .ndo_get_stats64 = bond_get_stats,
5815 .ndo_eth_ioctl = bond_eth_ioctl,
5816 .ndo_siocbond = bond_do_ioctl,
5817 .ndo_siocdevprivate = bond_siocdevprivate,
5818 .ndo_change_rx_flags = bond_change_rx_flags,
5819 .ndo_set_rx_mode = bond_set_rx_mode,
5820 .ndo_change_mtu = bond_change_mtu,
5821 .ndo_set_mac_address = bond_set_mac_address,
5822 .ndo_neigh_setup = bond_neigh_setup,
5823 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
5824 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
5825#ifdef CONFIG_NET_POLL_CONTROLLER
5826 .ndo_netpoll_setup = bond_netpoll_setup,
5827 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
5828 .ndo_poll_controller = bond_poll_controller,
5829#endif
5830 .ndo_add_slave = bond_enslave,
5831 .ndo_del_slave = bond_release,
5832 .ndo_fix_features = bond_fix_features,
5833 .ndo_features_check = passthru_features_check,
5834 .ndo_get_xmit_slave = bond_xmit_get_slave,
5835 .ndo_sk_get_lower_dev = bond_sk_get_lower_dev,
5836 .ndo_bpf = bond_xdp,
5837 .ndo_xdp_xmit = bond_xdp_xmit,
5838 .ndo_xdp_get_xmit_slave = bond_xdp_get_xmit_slave,
5839};
5840
5841static const struct device_type bond_type = {
5842 .name = "bond",
5843};
5844
5845static void bond_destructor(struct net_device *bond_dev)
5846{
5847 struct bonding *bond = netdev_priv(bond_dev);
5848
5849 if (bond->wq)
5850 destroy_workqueue(bond->wq);
5851
5852 if (bond->rr_tx_counter)
5853 free_percpu(bond->rr_tx_counter);
5854}
5855
5856void bond_setup(struct net_device *bond_dev)
5857{
5858 struct bonding *bond = netdev_priv(bond_dev);
5859
5860 spin_lock_init(&bond->mode_lock);
5861 bond->params = bonding_defaults;
5862
5863 /* Initialize pointers */
5864 bond->dev = bond_dev;
5865
5866 /* Initialize the device entry points */
5867 ether_setup(bond_dev);
5868 bond_dev->max_mtu = ETH_MAX_MTU;
5869 bond_dev->netdev_ops = &bond_netdev_ops;
5870 bond_dev->ethtool_ops = &bond_ethtool_ops;
5871
5872 bond_dev->needs_free_netdev = true;
5873 bond_dev->priv_destructor = bond_destructor;
5874
5875 SET_NETDEV_DEVTYPE(bond_dev, &bond_type);
5876
5877 /* Initialize the device options */
5878 bond_dev->flags |= IFF_MASTER;
5879 bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT | IFF_NO_QUEUE;
5880 bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
5881
5882#ifdef CONFIG_XFRM_OFFLOAD
5883 /* set up xfrm device ops (only supported in active-backup right now) */
5884 bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
5885 INIT_LIST_HEAD(&bond->ipsec_list);
5886 spin_lock_init(&bond->ipsec_lock);
5887#endif /* CONFIG_XFRM_OFFLOAD */
5888
5889 /* don't acquire bond device's netif_tx_lock when transmitting */
5890 bond_dev->features |= NETIF_F_LLTX;
5891
5892 /* By default, we declare the bond to be fully
5893 * VLAN hardware accelerated capable. Special
5894 * care is taken in the various xmit functions
5895 * when there are slaves that are not hw accel
5896 * capable
5897 */
5898
5899 /* Don't allow bond devices to change network namespaces. */
5900 bond_dev->features |= NETIF_F_NETNS_LOCAL;
5901
5902 bond_dev->hw_features = BOND_VLAN_FEATURES |
5903 NETIF_F_HW_VLAN_CTAG_RX |
5904 NETIF_F_HW_VLAN_CTAG_FILTER;
5905
5906 bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
5907 bond_dev->features |= bond_dev->hw_features;
5908 bond_dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
5909#ifdef CONFIG_XFRM_OFFLOAD
5910 bond_dev->hw_features |= BOND_XFRM_FEATURES;
5911 /* Only enable XFRM features if this is an active-backup config */
5912 if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP)
5913 bond_dev->features |= BOND_XFRM_FEATURES;
5914#endif /* CONFIG_XFRM_OFFLOAD */
5915
5916 if (bond_xdp_check(bond))
5917 bond_dev->xdp_features = NETDEV_XDP_ACT_MASK;
5918}
5919
5920/* Destroy a bonding device.
5921 * Must be under rtnl_lock when this function is called.
5922 */
5923static void bond_uninit(struct net_device *bond_dev)
5924{
5925 struct bonding *bond = netdev_priv(bond_dev);
5926 struct bond_up_slave *usable, *all;
5927 struct list_head *iter;
5928 struct slave *slave;
5929
5930 bond_netpoll_cleanup(bond_dev);
5931
5932 /* Release the bonded slaves */
5933 bond_for_each_slave(bond, slave, iter)
5934 __bond_release_one(bond_dev, slave->dev, true, true);
5935 netdev_info(bond_dev, "Released all slaves\n");
5936
5937 usable = rtnl_dereference(bond->usable_slaves);
5938 if (usable) {
5939 RCU_INIT_POINTER(bond->usable_slaves, NULL);
5940 kfree_rcu(usable, rcu);
5941 }
5942
5943 all = rtnl_dereference(bond->all_slaves);
5944 if (all) {
5945 RCU_INIT_POINTER(bond->all_slaves, NULL);
5946 kfree_rcu(all, rcu);
5947 }
5948
5949 list_del(&bond->bond_list);
5950
5951 bond_debug_unregister(bond);
5952}
5953
5954/*------------------------- Module initialization ---------------------------*/
5955
5956static int bond_check_params(struct bond_params *params)
5957{
5958 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
5959 struct bond_opt_value newval;
5960 const struct bond_opt_value *valptr;
5961 int arp_all_targets_value = 0;
5962 u16 ad_actor_sys_prio = 0;
5963 u16 ad_user_port_key = 0;
5964 __be32 arp_target[BOND_MAX_ARP_TARGETS] = { 0 };
5965 int arp_ip_count;
5966 int bond_mode = BOND_MODE_ROUNDROBIN;
5967 int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
5968 int lacp_fast = 0;
5969 int tlb_dynamic_lb;
5970
5971 /* Convert string parameters. */
5972 if (mode) {
5973 bond_opt_initstr(&newval, mode);
5974 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_MODE), &newval);
5975 if (!valptr) {
5976 pr_err("Error: Invalid bonding mode \"%s\"\n", mode);
5977 return -EINVAL;
5978 }
5979 bond_mode = valptr->value;
5980 }
5981
5982 if (xmit_hash_policy) {
5983 if (bond_mode == BOND_MODE_ROUNDROBIN ||
5984 bond_mode == BOND_MODE_ACTIVEBACKUP ||
5985 bond_mode == BOND_MODE_BROADCAST) {
5986 pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
5987 bond_mode_name(bond_mode));
5988 } else {
5989 bond_opt_initstr(&newval, xmit_hash_policy);
5990 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
5991 &newval);
5992 if (!valptr) {
5993 pr_err("Error: Invalid xmit_hash_policy \"%s\"\n",
5994 xmit_hash_policy);
5995 return -EINVAL;
5996 }
5997 xmit_hashtype = valptr->value;
5998 }
5999 }
6000
6001 if (lacp_rate) {
6002 if (bond_mode != BOND_MODE_8023AD) {
6003 pr_info("lacp_rate param is irrelevant in mode %s\n",
6004 bond_mode_name(bond_mode));
6005 } else {
6006 bond_opt_initstr(&newval, lacp_rate);
6007 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_LACP_RATE),
6008 &newval);
6009 if (!valptr) {
6010 pr_err("Error: Invalid lacp rate \"%s\"\n",
6011 lacp_rate);
6012 return -EINVAL;
6013 }
6014 lacp_fast = valptr->value;
6015 }
6016 }
6017
6018 if (ad_select) {
6019 bond_opt_initstr(&newval, ad_select);
6020 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
6021 &newval);
6022 if (!valptr) {
6023 pr_err("Error: Invalid ad_select \"%s\"\n", ad_select);
6024 return -EINVAL;
6025 }
6026 params->ad_select = valptr->value;
6027 if (bond_mode != BOND_MODE_8023AD)
6028 pr_warn("ad_select param only affects 802.3ad mode\n");
6029 } else {
6030 params->ad_select = BOND_AD_STABLE;
6031 }
6032
6033 if (max_bonds < 0) {
6034 pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
6035 max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
6036 max_bonds = BOND_DEFAULT_MAX_BONDS;
6037 }
6038
6039 if (miimon < 0) {
6040 pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6041 miimon, INT_MAX);
6042 miimon = 0;
6043 }
6044
6045 if (updelay < 0) {
6046 pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6047 updelay, INT_MAX);
6048 updelay = 0;
6049 }
6050
6051 if (downdelay < 0) {
6052 pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6053 downdelay, INT_MAX);
6054 downdelay = 0;
6055 }
6056
6057 if ((use_carrier != 0) && (use_carrier != 1)) {
6058 pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
6059 use_carrier);
6060 use_carrier = 1;
6061 }
6062
6063 if (num_peer_notif < 0 || num_peer_notif > 255) {
6064 pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
6065 num_peer_notif);
6066 num_peer_notif = 1;
6067 }
6068
6069 /* reset values for 802.3ad/TLB/ALB */
6070 if (!bond_mode_uses_arp(bond_mode)) {
6071 if (!miimon) {
6072 pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
6073 pr_warn("Forcing miimon to 100msec\n");
6074 miimon = BOND_DEFAULT_MIIMON;
6075 }
6076 }
6077
6078 if (tx_queues < 1 || tx_queues > 255) {
6079 pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
6080 tx_queues, BOND_DEFAULT_TX_QUEUES);
6081 tx_queues = BOND_DEFAULT_TX_QUEUES;
6082 }
6083
6084 if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
6085 pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
6086 all_slaves_active);
6087 all_slaves_active = 0;
6088 }
6089
6090 if (resend_igmp < 0 || resend_igmp > 255) {
6091 pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
6092 resend_igmp, BOND_DEFAULT_RESEND_IGMP);
6093 resend_igmp = BOND_DEFAULT_RESEND_IGMP;
6094 }
6095
6096 bond_opt_initval(&newval, packets_per_slave);
6097 if (!bond_opt_parse(bond_opt_get(BOND_OPT_PACKETS_PER_SLAVE), &newval)) {
6098 pr_warn("Warning: packets_per_slave (%d) should be between 0 and %u resetting to 1\n",
6099 packets_per_slave, USHRT_MAX);
6100 packets_per_slave = 1;
6101 }
6102
6103 if (bond_mode == BOND_MODE_ALB) {
6104 pr_notice("In ALB mode you might experience client disconnections upon reconnection of a link if the bonding module updelay parameter (%d msec) is incompatible with the forwarding delay time of the switch\n",
6105 updelay);
6106 }
6107
6108 if (!miimon) {
6109 if (updelay || downdelay) {
6110 /* just warn the user the up/down delay will have
6111 * no effect since miimon is zero...
6112 */
6113 pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
6114 updelay, downdelay);
6115 }
6116 } else {
6117 /* don't allow arp monitoring */
6118 if (arp_interval) {
6119 pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
6120 miimon, arp_interval);
6121 arp_interval = 0;
6122 }
6123
6124 if ((updelay % miimon) != 0) {
6125 pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
6126 updelay, miimon, (updelay / miimon) * miimon);
6127 }
6128
6129 updelay /= miimon;
6130
6131 if ((downdelay % miimon) != 0) {
6132 pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
6133 downdelay, miimon,
6134 (downdelay / miimon) * miimon);
6135 }
6136
6137 downdelay /= miimon;
6138 }
6139
6140 if (arp_interval < 0) {
6141 pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
6142 arp_interval, INT_MAX);
6143 arp_interval = 0;
6144 }
6145
6146 for (arp_ip_count = 0, i = 0;
6147 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
6148 __be32 ip;
6149
6150 /* not a complete check, but good enough to catch mistakes */
6151 if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
6152 !bond_is_ip_target_ok(ip)) {
6153 pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
6154 arp_ip_target[i]);
6155 arp_interval = 0;
6156 } else {
6157 if (bond_get_targets_ip(arp_target, ip) == -1)
6158 arp_target[arp_ip_count++] = ip;
6159 else
6160 pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
6161 &ip);
6162 }
6163 }
6164
6165 if (arp_interval && !arp_ip_count) {
6166 /* don't allow arping if no arp_ip_target given... */
6167 pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
6168 arp_interval);
6169 arp_interval = 0;
6170 }
6171
6172 if (arp_validate) {
6173 if (!arp_interval) {
6174 pr_err("arp_validate requires arp_interval\n");
6175 return -EINVAL;
6176 }
6177
6178 bond_opt_initstr(&newval, arp_validate);
6179 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_VALIDATE),
6180 &newval);
6181 if (!valptr) {
6182 pr_err("Error: invalid arp_validate \"%s\"\n",
6183 arp_validate);
6184 return -EINVAL;
6185 }
6186 arp_validate_value = valptr->value;
6187 } else {
6188 arp_validate_value = 0;
6189 }
6190
6191 if (arp_all_targets) {
6192 bond_opt_initstr(&newval, arp_all_targets);
6193 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_ARP_ALL_TARGETS),
6194 &newval);
6195 if (!valptr) {
6196 pr_err("Error: invalid arp_all_targets_value \"%s\"\n",
6197 arp_all_targets);
6198 arp_all_targets_value = 0;
6199 } else {
6200 arp_all_targets_value = valptr->value;
6201 }
6202 }
6203
6204 if (miimon) {
6205 pr_info("MII link monitoring set to %d ms\n", miimon);
6206 } else if (arp_interval) {
6207 valptr = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
6208 arp_validate_value);
6209 pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
6210 arp_interval, valptr->string, arp_ip_count);
6211
6212 for (i = 0; i < arp_ip_count; i++)
6213 pr_cont(" %s", arp_ip_target[i]);
6214
6215 pr_cont("\n");
6216
6217 } else if (max_bonds) {
6218 /* miimon and arp_interval not set, we need one so things
6219 * work as expected, see bonding.txt for details
6220 */
6221 pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
6222 }
6223
6224 if (primary && !bond_mode_uses_primary(bond_mode)) {
6225 /* currently, using a primary only makes sense
6226 * in active backup, TLB or ALB modes
6227 */
6228 pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
6229 primary, bond_mode_name(bond_mode));
6230 primary = NULL;
6231 }
6232
6233 if (primary && primary_reselect) {
6234 bond_opt_initstr(&newval, primary_reselect);
6235 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_PRIMARY_RESELECT),
6236 &newval);
6237 if (!valptr) {
6238 pr_err("Error: Invalid primary_reselect \"%s\"\n",
6239 primary_reselect);
6240 return -EINVAL;
6241 }
6242 primary_reselect_value = valptr->value;
6243 } else {
6244 primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
6245 }
6246
6247 if (fail_over_mac) {
6248 bond_opt_initstr(&newval, fail_over_mac);
6249 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_FAIL_OVER_MAC),
6250 &newval);
6251 if (!valptr) {
6252 pr_err("Error: invalid fail_over_mac \"%s\"\n",
6253 fail_over_mac);
6254 return -EINVAL;
6255 }
6256 fail_over_mac_value = valptr->value;
6257 if (bond_mode != BOND_MODE_ACTIVEBACKUP)
6258 pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
6259 } else {
6260 fail_over_mac_value = BOND_FOM_NONE;
6261 }
6262
6263 bond_opt_initstr(&newval, "default");
6264 valptr = bond_opt_parse(
6265 bond_opt_get(BOND_OPT_AD_ACTOR_SYS_PRIO),
6266 &newval);
6267 if (!valptr) {
6268 pr_err("Error: No ad_actor_sys_prio default value");
6269 return -EINVAL;
6270 }
6271 ad_actor_sys_prio = valptr->value;
6272
6273 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_USER_PORT_KEY),
6274 &newval);
6275 if (!valptr) {
6276 pr_err("Error: No ad_user_port_key default value");
6277 return -EINVAL;
6278 }
6279 ad_user_port_key = valptr->value;
6280
6281 bond_opt_initstr(&newval, "default");
6282 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval);
6283 if (!valptr) {
6284 pr_err("Error: No tlb_dynamic_lb default value");
6285 return -EINVAL;
6286 }
6287 tlb_dynamic_lb = valptr->value;
6288
6289 if (lp_interval == 0) {
6290 pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
6291 INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
6292 lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
6293 }
6294
6295 /* fill params struct with the proper values */
6296 params->mode = bond_mode;
6297 params->xmit_policy = xmit_hashtype;
6298 params->miimon = miimon;
6299 params->num_peer_notif = num_peer_notif;
6300 params->arp_interval = arp_interval;
6301 params->arp_validate = arp_validate_value;
6302 params->arp_all_targets = arp_all_targets_value;
6303 params->missed_max = 2;
6304 params->updelay = updelay;
6305 params->downdelay = downdelay;
6306 params->peer_notif_delay = 0;
6307 params->use_carrier = use_carrier;
6308 params->lacp_active = 1;
6309 params->lacp_fast = lacp_fast;
6310 params->primary[0] = 0;
6311 params->primary_reselect = primary_reselect_value;
6312 params->fail_over_mac = fail_over_mac_value;
6313 params->tx_queues = tx_queues;
6314 params->all_slaves_active = all_slaves_active;
6315 params->resend_igmp = resend_igmp;
6316 params->min_links = min_links;
6317 params->lp_interval = lp_interval;
6318 params->packets_per_slave = packets_per_slave;
6319 params->tlb_dynamic_lb = tlb_dynamic_lb;
6320 params->ad_actor_sys_prio = ad_actor_sys_prio;
6321 eth_zero_addr(params->ad_actor_system);
6322 params->ad_user_port_key = ad_user_port_key;
6323 if (packets_per_slave > 0) {
6324 params->reciprocal_packets_per_slave =
6325 reciprocal_value(packets_per_slave);
6326 } else {
6327 /* reciprocal_packets_per_slave is unused if
6328 * packets_per_slave is 0 or 1, just initialize it
6329 */
6330 params->reciprocal_packets_per_slave =
6331 (struct reciprocal_value) { 0 };
6332 }
6333
6334 if (primary)
6335 strscpy_pad(params->primary, primary, sizeof(params->primary));
6336
6337 memcpy(params->arp_targets, arp_target, sizeof(arp_target));
6338#if IS_ENABLED(CONFIG_IPV6)
6339 memset(params->ns_targets, 0, sizeof(struct in6_addr) * BOND_MAX_NS_TARGETS);
6340#endif
6341
6342 return 0;
6343}
6344
6345/* Called from registration process */
6346static int bond_init(struct net_device *bond_dev)
6347{
6348 struct bonding *bond = netdev_priv(bond_dev);
6349 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
6350
6351 netdev_dbg(bond_dev, "Begin bond_init\n");
6352
6353 bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM);
6354 if (!bond->wq)
6355 return -ENOMEM;
6356
6357 bond->notifier_ctx = false;
6358
6359 spin_lock_init(&bond->stats_lock);
6360 netdev_lockdep_set_classes(bond_dev);
6361
6362 list_add_tail(&bond->bond_list, &bn->dev_list);
6363
6364 bond_prepare_sysfs_group(bond);
6365
6366 bond_debug_register(bond);
6367
6368 /* Ensure valid dev_addr */
6369 if (is_zero_ether_addr(bond_dev->dev_addr) &&
6370 bond_dev->addr_assign_type == NET_ADDR_PERM)
6371 eth_hw_addr_random(bond_dev);
6372
6373 return 0;
6374}
6375
6376unsigned int bond_get_num_tx_queues(void)
6377{
6378 return tx_queues;
6379}
6380
6381/* Create a new bond based on the specified name and bonding parameters.
6382 * If name is NULL, obtain a suitable "bond%d" name for us.
6383 * Caller must NOT hold rtnl_lock; we need to release it here before we
6384 * set up our sysfs entries.
6385 */
6386int bond_create(struct net *net, const char *name)
6387{
6388 struct net_device *bond_dev;
6389 struct bonding *bond;
6390 int res = -ENOMEM;
6391
6392 rtnl_lock();
6393
6394 bond_dev = alloc_netdev_mq(sizeof(struct bonding),
6395 name ? name : "bond%d", NET_NAME_UNKNOWN,
6396 bond_setup, tx_queues);
6397 if (!bond_dev)
6398 goto out;
6399
6400 bond = netdev_priv(bond_dev);
6401 dev_net_set(bond_dev, net);
6402 bond_dev->rtnl_link_ops = &bond_link_ops;
6403
6404 res = register_netdevice(bond_dev);
6405 if (res < 0) {
6406 free_netdev(bond_dev);
6407 goto out;
6408 }
6409
6410 netif_carrier_off(bond_dev);
6411
6412 bond_work_init_all(bond);
6413
6414out:
6415 rtnl_unlock();
6416 return res;
6417}
6418
6419static int __net_init bond_net_init(struct net *net)
6420{
6421 struct bond_net *bn = net_generic(net, bond_net_id);
6422
6423 bn->net = net;
6424 INIT_LIST_HEAD(&bn->dev_list);
6425
6426 bond_create_proc_dir(bn);
6427 bond_create_sysfs(bn);
6428
6429 return 0;
6430}
6431
6432static void __net_exit bond_net_exit_batch(struct list_head *net_list)
6433{
6434 struct bond_net *bn;
6435 struct net *net;
6436 LIST_HEAD(list);
6437
6438 list_for_each_entry(net, net_list, exit_list) {
6439 bn = net_generic(net, bond_net_id);
6440 bond_destroy_sysfs(bn);
6441 }
6442
6443 /* Kill off any bonds created after unregistering bond rtnl ops */
6444 rtnl_lock();
6445 list_for_each_entry(net, net_list, exit_list) {
6446 struct bonding *bond, *tmp_bond;
6447
6448 bn = net_generic(net, bond_net_id);
6449 list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
6450 unregister_netdevice_queue(bond->dev, &list);
6451 }
6452 unregister_netdevice_many(&list);
6453 rtnl_unlock();
6454
6455 list_for_each_entry(net, net_list, exit_list) {
6456 bn = net_generic(net, bond_net_id);
6457 bond_destroy_proc_dir(bn);
6458 }
6459}
6460
6461static struct pernet_operations bond_net_ops = {
6462 .init = bond_net_init,
6463 .exit_batch = bond_net_exit_batch,
6464 .id = &bond_net_id,
6465 .size = sizeof(struct bond_net),
6466};
6467
6468static int __init bonding_init(void)
6469{
6470 int i;
6471 int res;
6472
6473 res = bond_check_params(&bonding_defaults);
6474 if (res)
6475 goto out;
6476
6477 res = register_pernet_subsys(&bond_net_ops);
6478 if (res)
6479 goto out;
6480
6481 res = bond_netlink_init();
6482 if (res)
6483 goto err_link;
6484
6485 bond_create_debugfs();
6486
6487 for (i = 0; i < max_bonds; i++) {
6488 res = bond_create(&init_net, NULL);
6489 if (res)
6490 goto err;
6491 }
6492
6493 skb_flow_dissector_init(&flow_keys_bonding,
6494 flow_keys_bonding_keys,
6495 ARRAY_SIZE(flow_keys_bonding_keys));
6496
6497 register_netdevice_notifier(&bond_netdev_notifier);
6498out:
6499 return res;
6500err:
6501 bond_destroy_debugfs();
6502 bond_netlink_fini();
6503err_link:
6504 unregister_pernet_subsys(&bond_net_ops);
6505 goto out;
6506
6507}
6508
6509static void __exit bonding_exit(void)
6510{
6511 unregister_netdevice_notifier(&bond_netdev_notifier);
6512
6513 bond_destroy_debugfs();
6514
6515 bond_netlink_fini();
6516 unregister_pernet_subsys(&bond_net_ops);
6517
6518#ifdef CONFIG_NET_POLL_CONTROLLER
6519 /* Make sure we don't have an imbalance on our netpoll blocking */
6520 WARN_ON(atomic_read(&netpoll_block_tx));
6521#endif
6522}
6523
6524module_init(bonding_init);
6525module_exit(bonding_exit);
6526MODULE_LICENSE("GPL");
6527MODULE_DESCRIPTION(DRV_DESCRIPTION);
6528MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");