Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'ipv6-devconf-lockless'

Eric Dumazet says:

====================
ipv6: lockless accesses to devconf

- First patch puts in a cacheline_group the fields used in fast paths.

- Annotate all data races around idev->cnf fields.

- Last patch in this series removes RTNL use for RTM_GETNETCONF dumps.

v3: addressed Jakub Kicinski feedback in addrconf_disable_ipv6()
Added tags from Jiri and Florian.

v2: addressed Jiri Pirko feedback
- Added "ipv6: addrconf_disable_ipv6() optimizations"
and "ipv6: addrconf_disable_policy() optimization"
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+245 -229
+1 -1
drivers/net/ethernet/netronome/nfp/flower/action.c
··· 460 460 set_tun->ttl = ip6_dst_hoplimit(dst); 461 461 dst_release(dst); 462 462 } else { 463 - set_tun->ttl = net->ipv6.devconf_all->hop_limit; 463 + set_tun->ttl = READ_ONCE(net->ipv6.devconf_all->hop_limit); 464 464 } 465 465 #endif 466 466 } else {
+1 -1
drivers/net/usb/cdc_mbim.c
··· 339 339 in6_dev = in6_dev_get(netdev); 340 340 if (!in6_dev) 341 341 goto out; 342 - is_router = !!in6_dev->cnf.forwarding; 342 + is_router = !!READ_ONCE(in6_dev->cnf.forwarding); 343 343 in6_dev_put(in6_dev); 344 344 345 345 /* ipv6_stub != NULL if in6_dev_get returned an inet6_dev */
+9 -4
include/linux/ipv6.h
··· 3 3 #define _IPV6_H 4 4 5 5 #include <uapi/linux/ipv6.h> 6 + #include <linux/cache.h> 6 7 7 8 #define ipv6_optlen(p) (((p)->hdrlen+1) << 3) 8 9 #define ipv6_authlen(p) (((p)->hdrlen+2) << 2) ··· 11 10 * This structure contains configuration options per IPv6 link. 12 11 */ 13 12 struct ipv6_devconf { 14 - __s32 forwarding; 13 + /* RX & TX fastpath fields. */ 14 + __cacheline_group_begin(ipv6_devconf_read_txrx); 15 + __s32 disable_ipv6; 15 16 __s32 hop_limit; 16 17 __s32 mtu6; 18 + __s32 forwarding; 19 + __s32 disable_policy; 20 + __s32 proxy_ndp; 21 + __cacheline_group_end(ipv6_devconf_read_txrx); 22 + 17 23 __s32 accept_ra; 18 24 __s32 accept_redirects; 19 25 __s32 autoconf; ··· 53 45 __s32 accept_ra_rt_info_max_plen; 54 46 #endif 55 47 #endif 56 - __s32 proxy_ndp; 57 48 __s32 accept_source_route; 58 49 __s32 accept_ra_from_local; 59 50 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD ··· 62 55 #ifdef CONFIG_IPV6_MROUTE 63 56 atomic_t mc_forwarding; 64 57 #endif 65 - __s32 disable_ipv6; 66 58 __s32 drop_unicast_in_l2_multicast; 67 59 __s32 accept_dad; 68 60 __s32 force_tllao; ··· 82 76 #endif 83 77 __u32 enhanced_dad; 84 78 __u32 addr_gen_mode; 85 - __s32 disable_policy; 86 79 __s32 ndisc_tclass; 87 80 __s32 rpl_seg_enabled; 88 81 __u32 ioam6_id;
+1 -1
include/net/addrconf.h
··· 417 417 if (unlikely(!idev)) 418 418 return true; 419 419 420 - return !!idev->cnf.ignore_routes_with_linkdown; 420 + return !!READ_ONCE(idev->cnf.ignore_routes_with_linkdown); 421 421 } 422 422 423 423 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
+1 -1
include/net/ip6_route.h
··· 332 332 rcu_read_lock(); 333 333 idev = __in6_dev_get(dst->dev); 334 334 if (idev) 335 - mtu = idev->cnf.mtu6; 335 + mtu = READ_ONCE(idev->cnf.mtu6); 336 336 rcu_read_unlock(); 337 337 338 338 out:
+5 -3
include/net/ipv6.h
··· 534 534 return 0; 535 535 } 536 536 537 - static inline bool ipv6_accept_ra(struct inet6_dev *idev) 537 + static inline bool ipv6_accept_ra(const struct inet6_dev *idev) 538 538 { 539 + s32 accept_ra = READ_ONCE(idev->cnf.accept_ra); 540 + 539 541 /* If forwarding is enabled, RA are not accepted unless the special 540 542 * hybrid mode (accept_ra=2) is enabled. 541 543 */ 542 - return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 : 543 - idev->cnf.accept_ra; 544 + return READ_ONCE(idev->cnf.forwarding) ? accept_ra == 2 : 545 + accept_ra; 544 546 } 545 547 546 548 #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
+1 -1
net/core/filter.c
··· 5988 5988 return -ENODEV; 5989 5989 5990 5990 idev = __in6_dev_get_safely(dev); 5991 - if (unlikely(!idev || !idev->cnf.forwarding)) 5991 + if (unlikely(!idev || !READ_ONCE(idev->cnf.forwarding))) 5992 5992 return BPF_FIB_LKUP_RET_FWD_DISABLED; 5993 5993 5994 5994 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
+139 -141
net/ipv6/addrconf.c
··· 551 551 goto out; 552 552 553 553 if ((all || type == NETCONFA_FORWARDING) && 554 - nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0) 554 + nla_put_s32(skb, NETCONFA_FORWARDING, 555 + READ_ONCE(devconf->forwarding)) < 0) 555 556 goto nla_put_failure; 556 557 #ifdef CONFIG_IPV6_MROUTE 557 558 if ((all || type == NETCONFA_MC_FORWARDING) && ··· 561 560 goto nla_put_failure; 562 561 #endif 563 562 if ((all || type == NETCONFA_PROXY_NEIGH) && 564 - nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0) 563 + nla_put_s32(skb, NETCONFA_PROXY_NEIGH, 564 + READ_ONCE(devconf->proxy_ndp)) < 0) 565 565 goto nla_put_failure; 566 566 567 567 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) && 568 568 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN, 569 - devconf->ignore_routes_with_linkdown) < 0) 569 + READ_ONCE(devconf->ignore_routes_with_linkdown)) < 0) 570 570 goto nla_put_failure; 571 571 572 572 out: ··· 727 725 return res; 728 726 } 729 727 730 - 731 728 static int inet6_netconf_dump_devconf(struct sk_buff *skb, 732 729 struct netlink_callback *cb) 733 730 { 734 731 const struct nlmsghdr *nlh = cb->nlh; 735 732 struct net *net = sock_net(skb->sk); 736 - int h, s_h; 737 - int idx, s_idx; 733 + struct { 734 + unsigned long ifindex; 735 + unsigned int all_default; 736 + } *ctx = (void *)cb->ctx; 738 737 struct net_device *dev; 739 738 struct inet6_dev *idev; 740 - struct hlist_head *head; 739 + int err = 0; 741 740 742 741 if (cb->strict_check) { 743 742 struct netlink_ext_ack *extack = cb->extack; ··· 755 752 } 756 753 } 757 754 758 - s_h = cb->args[0]; 759 - s_idx = idx = cb->args[1]; 760 - 761 - for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 762 - idx = 0; 763 - head = &net->dev_index_head[h]; 764 - rcu_read_lock(); 765 - cb->seq = inet6_base_seq(net); 766 - hlist_for_each_entry_rcu(dev, head, index_hlist) { 767 - if (idx < s_idx) 768 - goto cont; 769 - idev = __in6_dev_get(dev); 770 - if (!idev) 771 - goto cont; 772 - 773 - if (inet6_netconf_fill_devconf(skb, dev->ifindex, 774 - &idev->cnf, 775 - NETLINK_CB(cb->skb).portid, 776 - nlh->nlmsg_seq, 777 - RTM_NEWNETCONF, 778 - NLM_F_MULTI, 779 - NETCONFA_ALL) < 0) { 780 - rcu_read_unlock(); 781 - goto done; 782 - } 783 - nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 784 - cont: 785 - idx++; 786 - } 787 - rcu_read_unlock(); 788 - } 789 - if (h == NETDEV_HASHENTRIES) { 790 - if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL, 791 - net->ipv6.devconf_all, 792 - NETLINK_CB(cb->skb).portid, 793 - nlh->nlmsg_seq, 794 - RTM_NEWNETCONF, NLM_F_MULTI, 795 - NETCONFA_ALL) < 0) 755 + rcu_read_lock(); 756 + for_each_netdev_dump(net, dev, ctx->ifindex) { 757 + idev = __in6_dev_get(dev); 758 + if (!idev) 759 + continue; 760 + err = inet6_netconf_fill_devconf(skb, dev->ifindex, 761 + &idev->cnf, 762 + NETLINK_CB(cb->skb).portid, 763 + nlh->nlmsg_seq, 764 + RTM_NEWNETCONF, 765 + NLM_F_MULTI, 766 + NETCONFA_ALL); 767 + if (err < 0) 796 768 goto done; 797 - else 798 - h++; 799 769 } 800 - if (h == NETDEV_HASHENTRIES + 1) { 801 - if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT, 802 - net->ipv6.devconf_dflt, 803 - NETLINK_CB(cb->skb).portid, 804 - nlh->nlmsg_seq, 805 - RTM_NEWNETCONF, NLM_F_MULTI, 806 - NETCONFA_ALL) < 0) 770 + if (ctx->all_default == 0) { 771 + err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL, 772 + net->ipv6.devconf_all, 773 + NETLINK_CB(cb->skb).portid, 774 + nlh->nlmsg_seq, 775 + RTM_NEWNETCONF, NLM_F_MULTI, 776 + NETCONFA_ALL); 777 + if (err < 0) 807 778 goto done; 808 - else 809 - h++; 779 + ctx->all_default++; 780 + } 781 + if (ctx->all_default == 1) { 782 + err = inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT, 783 + net->ipv6.devconf_dflt, 784 + NETLINK_CB(cb->skb).portid, 785 + nlh->nlmsg_seq, 786 + RTM_NEWNETCONF, NLM_F_MULTI, 787 + NETCONFA_ALL); 788 + if (err < 0) 789 + goto done; 790 + ctx->all_default++; 810 791 } 811 792 done: 812 - cb->args[0] = h; 813 - cb->args[1] = idx; 814 - 815 - return skb->len; 793 + if (err < 0 && likely(skb->len)) 794 + err = skb->len; 795 + rcu_read_unlock(); 796 + return err; 816 797 } 817 798 818 799 #ifdef CONFIG_SYSCTL ··· 856 869 idev = __in6_dev_get(dev); 857 870 if (idev) { 858 871 int changed = (!idev->cnf.forwarding) ^ (!newf); 859 - idev->cnf.forwarding = newf; 872 + 873 + WRITE_ONCE(idev->cnf.forwarding, newf); 860 874 if (changed) 861 875 dev_forward_change(idev); 862 876 } ··· 874 886 875 887 net = (struct net *)table->extra2; 876 888 old = *p; 877 - *p = newf; 889 + WRITE_ONCE(*p, newf); 878 890 879 891 if (p == &net->ipv6.devconf_dflt->forwarding) { 880 892 if ((!newf) ^ (!old)) ··· 889 901 if (p == &net->ipv6.devconf_all->forwarding) { 890 902 int old_dflt = net->ipv6.devconf_dflt->forwarding; 891 903 892 - net->ipv6.devconf_dflt->forwarding = newf; 904 + WRITE_ONCE(net->ipv6.devconf_dflt->forwarding, newf); 893 905 if ((!newf) ^ (!old_dflt)) 894 906 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, 895 907 NETCONFA_FORWARDING, ··· 921 933 if (idev) { 922 934 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf); 923 935 924 - idev->cnf.ignore_routes_with_linkdown = newf; 936 + WRITE_ONCE(idev->cnf.ignore_routes_with_linkdown, newf); 925 937 if (changed) 926 938 inet6_netconf_notify_devconf(dev_net(dev), 927 939 RTM_NEWNETCONF, ··· 942 954 943 955 net = (struct net *)table->extra2; 944 956 old = *p; 945 - *p = newf; 957 + WRITE_ONCE(*p, newf); 946 958 947 959 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) { 948 960 if ((!newf) ^ (!old)) ··· 956 968 } 957 969 958 970 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) { 959 - net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf; 971 + WRITE_ONCE(net->ipv6.devconf_dflt->ignore_routes_with_linkdown, newf); 960 972 addrconf_linkdown_change(net, newf); 961 973 if ((!newf) ^ (!old)) 962 974 inet6_netconf_notify_devconf(net, ··· 1344 1356 in6_ifa_put(ifp); 1345 1357 } 1346 1358 1347 - static unsigned long ipv6_get_regen_advance(struct inet6_dev *idev) 1359 + static unsigned long ipv6_get_regen_advance(const struct inet6_dev *idev) 1348 1360 { 1349 - return idev->cnf.regen_min_advance + idev->cnf.regen_max_retry * 1350 - idev->cnf.dad_transmits * 1351 - max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ; 1361 + return READ_ONCE(idev->cnf.regen_min_advance) + 1362 + READ_ONCE(idev->cnf.regen_max_retry) * 1363 + READ_ONCE(idev->cnf.dad_transmits) * 1364 + max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ; 1352 1365 } 1353 1366 1354 1367 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block) ··· 1370 1381 1371 1382 retry: 1372 1383 in6_dev_hold(idev); 1373 - if (idev->cnf.use_tempaddr <= 0) { 1384 + if (READ_ONCE(idev->cnf.use_tempaddr) <= 0) { 1374 1385 write_unlock_bh(&idev->lock); 1375 1386 pr_info("%s: use_tempaddr is disabled\n", __func__); 1376 1387 in6_dev_put(idev); ··· 1378 1389 goto out; 1379 1390 } 1380 1391 spin_lock_bh(&ifp->lock); 1381 - if (ifp->regen_count++ >= idev->cnf.regen_max_retry) { 1382 - idev->cnf.use_tempaddr = -1; /*XXX*/ 1392 + if (ifp->regen_count++ >= READ_ONCE(idev->cnf.regen_max_retry)) { 1393 + WRITE_ONCE(idev->cnf.use_tempaddr, -1); /*XXX*/ 1383 1394 spin_unlock_bh(&ifp->lock); 1384 1395 write_unlock_bh(&idev->lock); 1385 1396 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n", ··· 1401 1412 */ 1402 1413 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft); 1403 1414 max_desync_factor = min_t(long, 1404 - idev->cnf.max_desync_factor, 1415 + READ_ONCE(idev->cnf.max_desync_factor), 1405 1416 cnf_temp_preferred_lft - regen_advance); 1406 1417 1407 1418 if (unlikely(idev->desync_factor > max_desync_factor)) { ··· 1418 1429 1419 1430 memset(&cfg, 0, sizeof(cfg)); 1420 1431 cfg.valid_lft = min_t(__u32, ifp->valid_lft, 1421 - idev->cnf.temp_valid_lft + age); 1432 + READ_ONCE(idev->cnf.temp_valid_lft) + age); 1422 1433 cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor; 1423 1434 cfg.preferred_lft = min_t(__u32, if_public_preferred_lft, cfg.preferred_lft); 1424 1435 cfg.preferred_lft = min_t(__u32, cfg.valid_lft, cfg.preferred_lft); ··· 1542 1553 return 0; 1543 1554 } 1544 1555 1545 - static bool ipv6_use_optimistic_addr(struct net *net, 1546 - struct inet6_dev *idev) 1556 + static bool ipv6_use_optimistic_addr(const struct net *net, 1557 + const struct inet6_dev *idev) 1547 1558 { 1548 1559 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD 1549 1560 if (!idev) 1550 1561 return false; 1551 - if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad) 1562 + if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) && 1563 + !READ_ONCE(idev->cnf.optimistic_dad)) 1552 1564 return false; 1553 - if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic) 1565 + if (!READ_ONCE(net->ipv6.devconf_all->use_optimistic) && 1566 + !READ_ONCE(idev->cnf.use_optimistic)) 1554 1567 return false; 1555 1568 1556 1569 return true; ··· 1561 1570 #endif 1562 1571 } 1563 1572 1564 - static bool ipv6_allow_optimistic_dad(struct net *net, 1565 - struct inet6_dev *idev) 1573 + static bool ipv6_allow_optimistic_dad(const struct net *net, 1574 + const struct inet6_dev *idev) 1566 1575 { 1567 1576 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD 1568 1577 if (!idev) 1569 1578 return false; 1570 - if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad) 1579 + if (!READ_ONCE(net->ipv6.devconf_all->optimistic_dad) && 1580 + !READ_ONCE(idev->cnf.optimistic_dad)) 1571 1581 return false; 1572 1582 1573 1583 return true; ··· 1674 1682 */ 1675 1683 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ? 1676 1684 !!(dst->prefs & IPV6_PREFER_SRC_TMP) : 1677 - score->ifa->idev->cnf.use_tempaddr >= 2; 1685 + READ_ONCE(score->ifa->idev->cnf.use_tempaddr) >= 2; 1678 1686 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp; 1679 1687 break; 1680 1688 } ··· 1850 1858 idev = __in6_dev_get(dst_dev); 1851 1859 if ((dst_type & IPV6_ADDR_MULTICAST) || 1852 1860 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL || 1853 - (idev && idev->cnf.use_oif_addrs_only)) { 1861 + (idev && READ_ONCE(idev->cnf.use_oif_addrs_only))) { 1854 1862 use_oif_addr = true; 1855 1863 } 1856 1864 } ··· 2157 2165 { 2158 2166 struct inet6_dev *idev = ifp->idev; 2159 2167 struct net *net = dev_net(idev->dev); 2168 + int max_addresses; 2160 2169 2161 2170 if (addrconf_dad_end(ifp)) { 2162 2171 in6_ifa_put(ifp); ··· 2195 2202 2196 2203 spin_unlock_bh(&ifp->lock); 2197 2204 2198 - if (idev->cnf.max_addresses && 2199 - ipv6_count_addresses(idev) >= 2200 - idev->cnf.max_addresses) 2205 + max_addresses = READ_ONCE(idev->cnf.max_addresses); 2206 + if (max_addresses && 2207 + ipv6_count_addresses(idev) >= max_addresses) 2201 2208 goto lock_errdad; 2202 2209 2203 2210 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n", ··· 2594 2601 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively. 2595 2602 */ 2596 2603 age = (now - ift->cstamp) / HZ; 2597 - max_valid = idev->cnf.temp_valid_lft - age; 2604 + max_valid = READ_ONCE(idev->cnf.temp_valid_lft) - age; 2598 2605 if (max_valid < 0) 2599 2606 max_valid = 0; 2600 2607 2601 - max_prefered = idev->cnf.temp_prefered_lft - 2608 + max_prefered = READ_ONCE(idev->cnf.temp_prefered_lft) - 2602 2609 idev->desync_factor - age; 2603 2610 if (max_prefered < 0) 2604 2611 max_prefered = 0; ··· 2631 2638 if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft)) 2632 2639 create = true; 2633 2640 2634 - if (create && idev->cnf.use_tempaddr > 0) { 2641 + if (create && READ_ONCE(idev->cnf.use_tempaddr) > 0) { 2635 2642 /* When a new public address is created as described 2636 2643 * in [ADDRCONF], also create a new temporary address. 2637 2644 */ ··· 2659 2666 int create = 0, update_lft = 0; 2660 2667 2661 2668 if (!ifp && valid_lft) { 2662 - int max_addresses = in6_dev->cnf.max_addresses; 2669 + int max_addresses = READ_ONCE(in6_dev->cnf.max_addresses); 2663 2670 struct ifa6_config cfg = { 2664 2671 .pfx = addr, 2665 2672 .plen = pinfo->prefix_len, ··· 2671 2678 }; 2672 2679 2673 2680 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD 2674 - if ((net->ipv6.devconf_all->optimistic_dad || 2675 - in6_dev->cnf.optimistic_dad) && 2681 + if ((READ_ONCE(net->ipv6.devconf_all->optimistic_dad) || 2682 + READ_ONCE(in6_dev->cnf.optimistic_dad)) && 2676 2683 !net->ipv6.devconf_all->forwarding && sllao) 2677 2684 cfg.ifa_flags |= IFA_F_OPTIMISTIC; 2678 2685 #endif ··· 2721 2728 */ 2722 2729 update_lft = !create && stored_lft; 2723 2730 2724 - if (update_lft && !in6_dev->cnf.ra_honor_pio_life) { 2731 + if (update_lft && !READ_ONCE(in6_dev->cnf.ra_honor_pio_life)) { 2725 2732 const u32 minimum_lft = min_t(u32, 2726 2733 stored_lft, MIN_VALID_LIFETIME); 2727 2734 valid_lft = max(valid_lft, minimum_lft); ··· 3305 3312 struct inet6_ifaddr *ifp; 3306 3313 3307 3314 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD 3308 - if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad || 3309 - idev->cnf.optimistic_dad) && 3315 + if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad) || 3316 + READ_ONCE(idev->cnf.optimistic_dad)) && 3310 3317 !dev_net(idev->dev)->ipv6.devconf_all->forwarding) 3311 3318 cfg.ifa_flags |= IFA_F_OPTIMISTIC; 3312 3319 #endif ··· 3664 3671 3665 3672 if (idev) { 3666 3673 rt6_mtu_change(dev, dev->mtu); 3667 - idev->cnf.mtu6 = dev->mtu; 3674 + WRITE_ONCE(idev->cnf.mtu6, dev->mtu); 3668 3675 break; 3669 3676 } 3670 3677 ··· 3756 3763 if (idev->cnf.mtu6 != dev->mtu && 3757 3764 dev->mtu >= IPV6_MIN_MTU) { 3758 3765 rt6_mtu_change(dev, dev->mtu); 3759 - idev->cnf.mtu6 = dev->mtu; 3766 + WRITE_ONCE(idev->cnf.mtu6, dev->mtu); 3760 3767 } 3761 3768 WRITE_ONCE(idev->tstamp, jiffies); 3762 3769 inet6_ifinfo_notify(RTM_NEWLINK, idev); ··· 3878 3885 */ 3879 3886 if (!unregister && !idev->cnf.disable_ipv6) { 3880 3887 /* aggregate the system setting and interface setting */ 3881 - int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down; 3888 + int _keep_addr = READ_ONCE(net->ipv6.devconf_all->keep_addr_on_down); 3882 3889 3883 3890 if (!_keep_addr) 3884 - _keep_addr = idev->cnf.keep_addr_on_down; 3891 + _keep_addr = READ_ONCE(idev->cnf.keep_addr_on_down); 3885 3892 3886 3893 keep_addr = (_keep_addr > 0); 3887 3894 } ··· 4018 4025 struct inet6_dev *idev = from_timer(idev, t, rs_timer); 4019 4026 struct net_device *dev = idev->dev; 4020 4027 struct in6_addr lladdr; 4028 + int rtr_solicits; 4021 4029 4022 4030 write_lock(&idev->lock); 4023 4031 if (idev->dead || !(idev->if_flags & IF_READY)) ··· 4031 4037 if (idev->if_flags & IF_RA_RCVD) 4032 4038 goto out; 4033 4039 4034 - if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) { 4040 + rtr_solicits = READ_ONCE(idev->cnf.rtr_solicits); 4041 + 4042 + if (idev->rs_probes++ < rtr_solicits || rtr_solicits < 0) { 4035 4043 write_unlock(&idev->lock); 4036 4044 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE)) 4037 4045 ndisc_send_rs(dev, &lladdr, ··· 4043 4047 4044 4048 write_lock(&idev->lock); 4045 4049 idev->rs_interval = rfc3315_s14_backoff_update( 4046 - idev->rs_interval, idev->cnf.rtr_solicit_max_interval); 4050 + idev->rs_interval, 4051 + READ_ONCE(idev->cnf.rtr_solicit_max_interval)); 4047 4052 /* The wait after the last probe can be shorter */ 4048 4053 addrconf_mod_rs_timer(idev, (idev->rs_probes == 4049 - idev->cnf.rtr_solicits) ? 4050 - idev->cnf.rtr_solicit_delay : 4054 + READ_ONCE(idev->cnf.rtr_solicits)) ? 4055 + READ_ONCE(idev->cnf.rtr_solicit_delay) : 4051 4056 idev->rs_interval); 4052 4057 } else { 4053 4058 /* ··· 4069 4072 */ 4070 4073 static void addrconf_dad_kick(struct inet6_ifaddr *ifp) 4071 4074 { 4072 - unsigned long rand_num; 4073 4075 struct inet6_dev *idev = ifp->idev; 4076 + unsigned long rand_num; 4074 4077 u64 nonce; 4075 4078 4076 4079 if (ifp->flags & IFA_F_OPTIMISTIC) 4077 4080 rand_num = 0; 4078 4081 else 4079 - rand_num = get_random_u32_below(idev->cnf.rtr_solicit_delay ? : 1); 4082 + rand_num = get_random_u32_below( 4083 + READ_ONCE(idev->cnf.rtr_solicit_delay) ? : 1); 4080 4084 4081 4085 nonce = 0; 4082 - if (idev->cnf.enhanced_dad || 4083 - dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) { 4086 + if (READ_ONCE(idev->cnf.enhanced_dad) || 4087 + READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad)) { 4084 4088 do 4085 4089 get_random_bytes(&nonce, 6); 4086 4090 while (nonce == 0); 4087 4091 } 4088 4092 ifp->dad_nonce = nonce; 4089 - ifp->dad_probes = idev->cnf.dad_transmits; 4093 + ifp->dad_probes = READ_ONCE(idev->cnf.dad_transmits); 4090 4094 addrconf_mod_dad_work(ifp, rand_num); 4091 4095 } 4092 4096 ··· 4107 4109 4108 4110 net = dev_net(dev); 4109 4111 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || 4110 - (net->ipv6.devconf_all->accept_dad < 1 && 4111 - idev->cnf.accept_dad < 1) || 4112 + (READ_ONCE(net->ipv6.devconf_all->accept_dad) < 1 && 4113 + READ_ONCE(idev->cnf.accept_dad) < 1) || 4112 4114 !(ifp->flags&IFA_F_TENTATIVE) || 4113 4115 ifp->flags & IFA_F_NODAD) { 4114 4116 bool send_na = false; ··· 4200 4202 action = DAD_ABORT; 4201 4203 ifp->state = INET6_IFADDR_STATE_POSTDAD; 4202 4204 4203 - if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 || 4204 - idev->cnf.accept_dad > 1) && 4205 + if ((READ_ONCE(dev_net(idev->dev)->ipv6.devconf_all->accept_dad) > 1 || 4206 + READ_ONCE(idev->cnf.accept_dad) > 1) && 4205 4207 !idev->cnf.disable_ipv6 && 4206 4208 !(ifp->flags & IFA_F_STABLE_PRIVACY)) { 4207 4209 struct in6_addr addr; ··· 4212 4214 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) && 4213 4215 ipv6_addr_equal(&ifp->addr, &addr)) { 4214 4216 /* DAD failed for link-local based on MAC */ 4215 - idev->cnf.disable_ipv6 = 1; 4217 + WRITE_ONCE(idev->cnf.disable_ipv6, 1); 4216 4218 4217 4219 pr_info("%s: IPv6 being disabled!\n", 4218 4220 ifp->idev->dev->name); ··· 4326 4328 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp); 4327 4329 send_rs = send_mld && 4328 4330 ipv6_accept_ra(ifp->idev) && 4329 - ifp->idev->cnf.rtr_solicits != 0 && 4331 + READ_ONCE(ifp->idev->cnf.rtr_solicits) != 0 && 4330 4332 (dev->flags & IFF_LOOPBACK) == 0 && 4331 4333 (dev->type != ARPHRD_TUNNEL) && 4332 4334 !netif_is_team_port(dev); ··· 4340 4342 4341 4343 /* send unsolicited NA if enabled */ 4342 4344 if (send_na && 4343 - (ifp->idev->cnf.ndisc_notify || 4344 - dev_net(dev)->ipv6.devconf_all->ndisc_notify)) { 4345 + (READ_ONCE(ifp->idev->cnf.ndisc_notify) || 4346 + READ_ONCE(dev_net(dev)->ipv6.devconf_all->ndisc_notify))) { 4345 4347 ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr, 4346 4348 /*router=*/ !!ifp->idev->cnf.forwarding, 4347 4349 /*solicited=*/ false, /*override=*/ true, ··· 4361 4363 write_lock_bh(&ifp->idev->lock); 4362 4364 spin_lock(&ifp->lock); 4363 4365 ifp->idev->rs_interval = rfc3315_s14_backoff_init( 4364 - ifp->idev->cnf.rtr_solicit_interval); 4366 + READ_ONCE(ifp->idev->cnf.rtr_solicit_interval)); 4365 4367 ifp->idev->rs_probes = 1; 4366 4368 ifp->idev->if_flags |= IF_RS_SENT; 4367 4369 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval); ··· 5910 5912 return -EINVAL; 5911 5913 } 5912 5914 5913 - if (idev->cnf.rtr_solicits == 0) { 5915 + if (READ_ONCE(idev->cnf.rtr_solicits) == 0) { 5914 5916 NL_SET_ERR_MSG(extack, 5915 5917 "Router solicitation is disabled on device"); 5916 5918 return -EINVAL; ··· 5943 5945 if (update_rs) { 5944 5946 idev->if_flags |= IF_RS_SENT; 5945 5947 idev->rs_interval = rfc3315_s14_backoff_init( 5946 - idev->cnf.rtr_solicit_interval); 5948 + READ_ONCE(idev->cnf.rtr_solicit_interval)); 5947 5949 idev->rs_probes = 1; 5948 5950 addrconf_mod_rs_timer(idev, idev->rs_interval); 5949 5951 } ··· 6387 6389 idev = __in6_dev_get(dev); 6388 6390 if (idev) { 6389 6391 int changed = (!idev->cnf.disable_ipv6) ^ (!newf); 6390 - idev->cnf.disable_ipv6 = newf; 6392 + 6393 + WRITE_ONCE(idev->cnf.disable_ipv6, newf); 6391 6394 if (changed) 6392 6395 dev_disable_change(idev); 6393 6396 } ··· 6397 6398 6398 6399 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) 6399 6400 { 6400 - struct net *net; 6401 + struct net *net = (struct net *)table->extra2; 6401 6402 int old; 6403 + 6404 + if (p == &net->ipv6.devconf_dflt->disable_ipv6) { 6405 + WRITE_ONCE(*p, newf); 6406 + return 0; 6407 + } 6402 6408 6403 6409 if (!rtnl_trylock()) 6404 6410 return restart_syscall(); 6405 6411 6406 - net = (struct net *)table->extra2; 6407 6412 old = *p; 6408 - *p = newf; 6409 - 6410 - if (p == &net->ipv6.devconf_dflt->disable_ipv6) { 6411 - rtnl_unlock(); 6412 - return 0; 6413 - } 6413 + WRITE_ONCE(*p, newf); 6414 6414 6415 6415 if (p == &net->ipv6.devconf_all->disable_ipv6) { 6416 - net->ipv6.devconf_dflt->disable_ipv6 = newf; 6416 + WRITE_ONCE(net->ipv6.devconf_dflt->disable_ipv6, newf); 6417 6417 addrconf_disable_change(net, newf); 6418 6418 } else if ((!newf) ^ (!old)) 6419 6419 dev_disable_change((struct inet6_dev *)table->extra1); ··· 6529 6531 } else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) { 6530 6532 struct net_device *dev; 6531 6533 6532 - net->ipv6.devconf_dflt->addr_gen_mode = new_val; 6534 + WRITE_ONCE(net->ipv6.devconf_dflt->addr_gen_mode, new_val); 6533 6535 for_each_netdev(net, dev) { 6534 6536 idev = __in6_dev_get(dev); 6535 6537 if (idev && ··· 6541 6543 } 6542 6544 } 6543 6545 6544 - *((u32 *)ctl->data) = new_val; 6546 + WRITE_ONCE(*((u32 *)ctl->data), new_val); 6545 6547 } 6546 6548 6547 6549 out: ··· 6688 6690 static 6689 6691 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val) 6690 6692 { 6693 + struct net *net = (struct net *)ctl->extra2; 6691 6694 struct inet6_dev *idev; 6692 - struct net *net; 6695 + 6696 + if (valp == &net->ipv6.devconf_dflt->disable_policy) { 6697 + WRITE_ONCE(*valp, val); 6698 + return 0; 6699 + } 6693 6700 6694 6701 if (!rtnl_trylock()) 6695 6702 return restart_syscall(); 6696 6703 6697 - *valp = val; 6698 - 6699 - net = (struct net *)ctl->extra2; 6700 - if (valp == &net->ipv6.devconf_dflt->disable_policy) { 6701 - rtnl_unlock(); 6702 - return 0; 6703 - } 6704 + WRITE_ONCE(*valp, val); 6704 6705 6705 6706 if (valp == &net->ipv6.devconf_all->disable_policy) { 6706 6707 struct net_device *dev; ··· 7488 7491 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF, 7489 7492 inet6_netconf_get_devconf, 7490 7493 inet6_netconf_dump_devconf, 7491 - RTNL_FLAG_DOIT_UNLOCKED); 7494 + RTNL_FLAG_DOIT_UNLOCKED | 7495 + RTNL_FLAG_DUMP_UNLOCKED); 7492 7496 if (err < 0) 7493 7497 goto errout; 7494 7498 err = ipv6_addr_label_rtnl_register();
+9 -7
net/ipv6/exthdrs.c
··· 379 379 380 380 idev = __in6_dev_get(skb->dev); 381 381 382 - accept_seg6 = net->ipv6.devconf_all->seg6_enabled; 383 - if (accept_seg6 > idev->cnf.seg6_enabled) 384 - accept_seg6 = idev->cnf.seg6_enabled; 382 + accept_seg6 = min(READ_ONCE(net->ipv6.devconf_all->seg6_enabled), 383 + READ_ONCE(idev->cnf.seg6_enabled)); 385 384 386 385 if (!accept_seg6) { 387 386 kfree_skb(skb); ··· 654 655 struct ipv6_rt_hdr *hdr; 655 656 struct rt0_hdr *rthdr; 656 657 struct net *net = dev_net(skb->dev); 657 - int accept_source_route = net->ipv6.devconf_all->accept_source_route; 658 + int accept_source_route; 658 659 659 - if (idev && accept_source_route > idev->cnf.accept_source_route) 660 - accept_source_route = idev->cnf.accept_source_route; 660 + accept_source_route = READ_ONCE(net->ipv6.devconf_all->accept_source_route); 661 + 662 + if (idev) 663 + accept_source_route = min(accept_source_route, 664 + READ_ONCE(idev->cnf.accept_source_route)); 661 665 662 666 if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || 663 667 !pskb_may_pull(skb, (skb_transport_offset(skb) + ··· 921 919 goto drop; 922 920 923 921 /* Ignore if IOAM is not enabled on ingress */ 924 - if (!__in6_dev_get(skb->dev)->cnf.ioam6_enabled) 922 + if (!READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_enabled)) 925 923 goto ignore; 926 924 927 925 /* Truncated Option header */
+4 -4
net/ipv6/ioam6.c
··· 727 727 if (!skb->dev) 728 728 raw16 = IOAM6_U16_UNAVAILABLE; 729 729 else 730 - raw16 = (__force u16)__in6_dev_get(skb->dev)->cnf.ioam6_id; 730 + raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id); 731 731 732 732 *(__be16 *)data = cpu_to_be16(raw16); 733 733 data += sizeof(__be16); ··· 735 735 if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) 736 736 raw16 = IOAM6_U16_UNAVAILABLE; 737 737 else 738 - raw16 = (__force u16)__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id; 738 + raw16 = (__force u16)READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id); 739 739 740 740 *(__be16 *)data = cpu_to_be16(raw16); 741 741 data += sizeof(__be16); ··· 822 822 if (!skb->dev) 823 823 raw32 = IOAM6_U32_UNAVAILABLE; 824 824 else 825 - raw32 = __in6_dev_get(skb->dev)->cnf.ioam6_id_wide; 825 + raw32 = READ_ONCE(__in6_dev_get(skb->dev)->cnf.ioam6_id_wide); 826 826 827 827 *(__be32 *)data = cpu_to_be32(raw32); 828 828 data += sizeof(__be32); ··· 830 830 if (skb_dst(skb)->dev->flags & IFF_LOOPBACK) 831 831 raw32 = IOAM6_U32_UNAVAILABLE; 832 832 else 833 - raw32 = __in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide; 833 + raw32 = READ_ONCE(__in6_dev_get(skb_dst(skb)->dev)->cnf.ioam6_id_wide); 834 834 835 835 *(__be32 *)data = cpu_to_be32(raw32); 836 836 data += sizeof(__be32);
+3 -3
net/ipv6/ip6_input.c
··· 168 168 169 169 SKB_DR_SET(reason, NOT_SPECIFIED); 170 170 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || 171 - !idev || unlikely(idev->cnf.disable_ipv6)) { 171 + !idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) { 172 172 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); 173 - if (idev && unlikely(idev->cnf.disable_ipv6)) 173 + if (idev && unlikely(READ_ONCE(idev->cnf.disable_ipv6))) 174 174 SKB_DR_SET(reason, IPV6DISABLED); 175 175 goto drop; 176 176 } ··· 236 236 if (!ipv6_addr_is_multicast(&hdr->daddr) && 237 237 (skb->pkt_type == PACKET_BROADCAST || 238 238 skb->pkt_type == PACKET_MULTICAST) && 239 - idev->cnf.drop_unicast_in_l2_multicast) { 239 + READ_ONCE(idev->cnf.drop_unicast_in_l2_multicast)) { 240 240 SKB_DR_SET(reason, UNICAST_IN_L2_MULTICAST); 241 241 goto err; 242 242 }
+5 -5
net/ipv6/ip6_output.c
··· 234 234 skb->protocol = htons(ETH_P_IPV6); 235 235 skb->dev = dev; 236 236 237 - if (unlikely(idev->cnf.disable_ipv6)) { 237 + if (unlikely(READ_ONCE(idev->cnf.disable_ipv6))) { 238 238 IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS); 239 239 kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED); 240 240 return 0; ··· 501 501 u32 mtu; 502 502 503 503 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif)); 504 - if (net->ipv6.devconf_all->forwarding == 0) 504 + if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0) 505 505 goto error; 506 506 507 507 if (skb->pkt_type != PACKET_HOST) ··· 513 513 if (skb_warn_if_lro(skb)) 514 514 goto drop; 515 515 516 - if (!net->ipv6.devconf_all->disable_policy && 517 - (!idev || !idev->cnf.disable_policy) && 516 + if (!READ_ONCE(net->ipv6.devconf_all->disable_policy) && 517 + (!idev || !READ_ONCE(idev->cnf.disable_policy)) && 518 518 !xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { 519 519 __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); 520 520 goto drop; ··· 552 552 } 553 553 554 554 /* XXX: idev->cnf.proxy_ndp? */ 555 - if (net->ipv6.devconf_all->proxy_ndp && 555 + if (READ_ONCE(net->ipv6.devconf_all->proxy_ndp) && 556 556 pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { 557 557 int proxied = ip6_forward_proxy_check(skb); 558 558 if (proxied > 0) {
+1 -1
net/ipv6/ipv6_sockglue.c
··· 1346 1346 } 1347 1347 1348 1348 if (val < 0) 1349 - val = sock_net(sk)->ipv6.devconf_all->hop_limit; 1349 + val = READ_ONCE(sock_net(sk)->ipv6.devconf_all->hop_limit); 1350 1350 break; 1351 1351 } 1352 1352
+7 -7
net/ipv6/mcast.c
··· 159 159 int iv; 160 160 161 161 if (mld_in_v1_mode(idev)) 162 - iv = idev->cnf.mldv1_unsolicited_report_interval; 162 + iv = READ_ONCE(idev->cnf.mldv1_unsolicited_report_interval); 163 163 else 164 - iv = idev->cnf.mldv2_unsolicited_report_interval; 164 + iv = READ_ONCE(idev->cnf.mldv2_unsolicited_report_interval); 165 165 166 166 return iv > 0 ? iv : 1; 167 167 } ··· 1202 1202 1203 1203 static int mld_force_mld_version(const struct inet6_dev *idev) 1204 1204 { 1205 + const struct net *net = dev_net(idev->dev); 1206 + int all_force; 1207 + 1208 + all_force = READ_ONCE(net->ipv6.devconf_all->force_mld_version); 1205 1209 /* Normally, both are 0 here. If enforcement to a particular is 1206 1210 * being used, individual device enforcement will have a lower 1207 1211 * precedence over 'all' device (.../conf/all/force_mld_version). 1208 1212 */ 1209 - 1210 - if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0) 1211 - return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version; 1212 - else 1213 - return idev->cnf.force_mld_version; 1213 + return all_force ?: READ_ONCE(idev->cnf.force_mld_version); 1214 1214 } 1215 1215 1216 1216 static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
+37 -32
net/ipv6/ndisc.c
··· 451 451 452 452 rcu_read_lock(); 453 453 idev = __in6_dev_get(skb->dev); 454 - tclass = idev ? idev->cnf.ndisc_tclass : 0; 454 + tclass = idev ? READ_ONCE(idev->cnf.ndisc_tclass) : 0; 455 455 rcu_read_unlock(); 456 456 457 457 skb_push(skb, sizeof(*hdr)); ··· 535 535 src_addr = solicited_addr; 536 536 if (ifp->flags & IFA_F_OPTIMISTIC) 537 537 override = false; 538 - inc_opt |= ifp->idev->cnf.force_tllao; 538 + inc_opt |= READ_ONCE(ifp->idev->cnf.force_tllao); 539 539 in6_ifa_put(ifp); 540 540 } else { 541 541 if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr, ··· 903 903 } 904 904 905 905 if (ipv6_chk_acast_addr(net, dev, &msg->target) || 906 - (idev->cnf.forwarding && 907 - (net->ipv6.devconf_all->proxy_ndp || idev->cnf.proxy_ndp) && 906 + (READ_ONCE(idev->cnf.forwarding) && 907 + (READ_ONCE(net->ipv6.devconf_all->proxy_ndp) || 908 + READ_ONCE(idev->cnf.proxy_ndp)) && 908 909 (is_router = pndisc_is_router(&msg->target, dev)) >= 0)) { 909 910 if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) && 910 911 skb->pkt_type != PACKET_HOST && ··· 930 929 } 931 930 932 931 if (is_router < 0) 933 - is_router = idev->cnf.forwarding; 932 + is_router = READ_ONCE(idev->cnf.forwarding); 934 933 935 934 if (dad) { 936 935 ndisc_send_na(dev, &in6addr_linklocal_allnodes, &msg->target, ··· 974 973 { 975 974 struct inet6_dev *idev = __in6_dev_get(dev); 976 975 977 - switch (idev->cnf.accept_untracked_na) { 976 + switch (READ_ONCE(idev->cnf.accept_untracked_na)) { 978 977 case 0: /* Don't accept untracked na (absent in neighbor cache) */ 979 978 return 0; 980 979 case 1: /* Create new entries from na if currently untracked */ ··· 1025 1024 * drop_unsolicited_na takes precedence over accept_untracked_na 1026 1025 */ 1027 1026 if (!msg->icmph.icmp6_solicited && idev && 1028 - idev->cnf.drop_unsolicited_na) 1027 + READ_ONCE(idev->cnf.drop_unsolicited_na)) 1029 1028 return reason; 1030 1029 1031 1030 if (!ndisc_parse_options(dev, msg->opt, ndoptlen, &ndopts)) ··· 1081 1080 * Note that we don't do a (daddr == all-routers-mcast) check. 1082 1081 */ 1083 1082 new_state = msg->icmph.icmp6_solicited ? NUD_REACHABLE : NUD_STALE; 1084 - if (!neigh && lladdr && idev && idev->cnf.forwarding) { 1083 + if (!neigh && lladdr && idev && READ_ONCE(idev->cnf.forwarding)) { 1085 1084 if (accept_untracked_na(dev, saddr)) { 1086 1085 neigh = neigh_create(&nd_tbl, &msg->target, dev); 1087 1086 new_state = NUD_STALE; ··· 1101 1100 * has already sent a NA to us. 1102 1101 */ 1103 1102 if (lladdr && !memcmp(lladdr, dev->dev_addr, dev->addr_len) && 1104 - net->ipv6.devconf_all->forwarding && net->ipv6.devconf_all->proxy_ndp && 1103 + READ_ONCE(net->ipv6.devconf_all->forwarding) && 1104 + READ_ONCE(net->ipv6.devconf_all->proxy_ndp) && 1105 1105 pneigh_lookup(&nd_tbl, net, &msg->target, dev, 0)) { 1106 1106 /* XXX: idev->cnf.proxy_ndp */ 1107 1107 goto out; ··· 1150 1148 } 1151 1149 1152 1150 /* Don't accept RS if we're not in router mode */ 1153 - if (!idev->cnf.forwarding) 1151 + if (!READ_ONCE(idev->cnf.forwarding)) 1154 1152 goto out; 1155 1153 1156 1154 /* ··· 1320 1318 if (old_if_flags != in6_dev->if_flags) 1321 1319 send_ifinfo_notify = true; 1322 1320 1323 - if (!in6_dev->cnf.accept_ra_defrtr) { 1321 + if (!READ_ONCE(in6_dev->cnf.accept_ra_defrtr)) { 1324 1322 ND_PRINTK(2, info, 1325 1323 "RA: %s, defrtr is false for dev: %s\n", 1326 1324 __func__, skb->dev->name); ··· 1328 1326 } 1329 1327 1330 1328 lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime); 1331 - if (lifetime != 0 && lifetime < in6_dev->cnf.accept_ra_min_lft) { 1329 + if (lifetime != 0 && 1330 + lifetime < READ_ONCE(in6_dev->cnf.accept_ra_min_lft)) { 1332 1331 ND_PRINTK(2, info, 1333 1332 "RA: router lifetime (%ds) is too short: %s\n", 1334 1333 lifetime, skb->dev->name); ··· 1340 1337 * accept_ra_from_local is set to true. 1341 1338 */ 1342 1339 net = dev_net(in6_dev->dev); 1343 - if (!in6_dev->cnf.accept_ra_from_local && 1340 + if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) && 1344 1341 ipv6_chk_addr(net, &ipv6_hdr(skb)->saddr, in6_dev->dev, 0)) { 1345 1342 ND_PRINTK(2, info, 1346 1343 "RA from local address detected on dev: %s: default router ignored\n", ··· 1352 1349 pref = ra_msg->icmph.icmp6_router_pref; 1353 1350 /* 10b is handled as if it were 00b (medium) */ 1354 1351 if (pref == ICMPV6_ROUTER_PREF_INVALID || 1355 - !in6_dev->cnf.accept_ra_rtr_pref) 1352 + !READ_ONCE(in6_dev->cnf.accept_ra_rtr_pref)) 1356 1353 pref = ICMPV6_ROUTER_PREF_MEDIUM; 1357 1354 #endif 1358 1355 /* routes added from RAs do not use nexthop objects */ ··· 1423 1420 1424 1421 spin_unlock_bh(&table->tb6_lock); 1425 1422 } 1426 - if (in6_dev->cnf.accept_ra_min_hop_limit < 256 && 1423 + if (READ_ONCE(in6_dev->cnf.accept_ra_min_hop_limit) < 256 && 1427 1424 ra_msg->icmph.icmp6_hop_limit) { 1428 - if (in6_dev->cnf.accept_ra_min_hop_limit <= ra_msg->icmph.icmp6_hop_limit) { 1429 - in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; 1425 + if (READ_ONCE(in6_dev->cnf.accept_ra_min_hop_limit) <= 1426 + ra_msg->icmph.icmp6_hop_limit) { 1427 + WRITE_ONCE(in6_dev->cnf.hop_limit, 1428 + ra_msg->icmph.icmp6_hop_limit); 1430 1429 fib6_metric_set(rt, RTAX_HOPLIMIT, 1431 1430 ra_msg->icmph.icmp6_hop_limit); 1432 1431 } else { ··· 1510 1505 } 1511 1506 1512 1507 #ifdef CONFIG_IPV6_ROUTE_INFO 1513 - if (!in6_dev->cnf.accept_ra_from_local && 1508 + if (!READ_ONCE(in6_dev->cnf.accept_ra_from_local) && 1514 1509 ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, 1515 1510 in6_dev->dev, 0)) { 1516 1511 ND_PRINTK(2, info, ··· 1519 1514 goto skip_routeinfo; 1520 1515 } 1521 1516 1522 - if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) { 1517 + if (READ_ONCE(in6_dev->cnf.accept_ra_rtr_pref) && ndopts.nd_opts_ri) { 1523 1518 struct nd_opt_hdr *p; 1524 1519 for (p = ndopts.nd_opts_ri; 1525 1520 p; ··· 1531 1526 continue; 1532 1527 #endif 1533 1528 if (ri->prefix_len == 0 && 1534 - !in6_dev->cnf.accept_ra_defrtr) 1529 + !READ_ONCE(in6_dev->cnf.accept_ra_defrtr)) 1535 1530 continue; 1536 1531 if (ri->lifetime != 0 && 1537 - ntohl(ri->lifetime) < in6_dev->cnf.accept_ra_min_lft) 1532 + ntohl(ri->lifetime) < READ_ONCE(in6_dev->cnf.accept_ra_min_lft)) 1538 1533 continue; 1539 - if (ri->prefix_len < in6_dev->cnf.accept_ra_rt_info_min_plen) 1534 + if (ri->prefix_len < READ_ONCE(in6_dev->cnf.accept_ra_rt_info_min_plen)) 1540 1535 continue; 1541 - if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen) 1536 + if (ri->prefix_len > READ_ONCE(in6_dev->cnf.accept_ra_rt_info_max_plen)) 1542 1537 continue; 1543 1538 rt6_route_rcv(skb->dev, (u8 *)p, (p->nd_opt_len) << 3, 1544 1539 &ipv6_hdr(skb)->saddr); ··· 1558 1553 } 1559 1554 #endif 1560 1555 1561 - if (in6_dev->cnf.accept_ra_pinfo && ndopts.nd_opts_pi) { 1556 + if (READ_ONCE(in6_dev->cnf.accept_ra_pinfo) && ndopts.nd_opts_pi) { 1562 1557 struct nd_opt_hdr *p; 1563 1558 for (p = ndopts.nd_opts_pi; 1564 1559 p; ··· 1569 1564 } 1570 1565 } 1571 1566 1572 - if (ndopts.nd_opts_mtu && in6_dev->cnf.accept_ra_mtu) { 1567 + if (ndopts.nd_opts_mtu && READ_ONCE(in6_dev->cnf.accept_ra_mtu)) { 1573 1568 __be32 n; 1574 1569 u32 mtu; 1575 1570 ··· 1583 1578 1584 1579 if (mtu < IPV6_MIN_MTU || mtu > skb->dev->mtu) { 1585 1580 ND_PRINTK(2, warn, "RA: invalid mtu: %d\n", mtu); 1586 - } else if (in6_dev->cnf.mtu6 != mtu) { 1587 - in6_dev->cnf.mtu6 = mtu; 1581 + } else if (READ_ONCE(in6_dev->cnf.mtu6) != mtu) { 1582 + WRITE_ONCE(in6_dev->cnf.mtu6, mtu); 1588 1583 fib6_metric_set(rt, RTAX_MTU, mtu); 1589 1584 rt6_mtu_change(skb->dev, mtu); 1590 1585 } ··· 1818 1813 if (!idev) 1819 1814 return true; 1820 1815 if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED && 1821 - idev->cnf.suppress_frag_ndisc) { 1816 + READ_ONCE(idev->cnf.suppress_frag_ndisc)) { 1822 1817 net_warn_ratelimited("Received fragmented ndisc packet. Carefully consider disabling suppress_frag_ndisc.\n"); 1823 1818 return true; 1824 1819 } ··· 1895 1890 idev = in6_dev_get(dev); 1896 1891 if (!idev) 1897 1892 break; 1898 - if (idev->cnf.ndisc_notify || 1899 - net->ipv6.devconf_all->ndisc_notify) 1893 + if (READ_ONCE(idev->cnf.ndisc_notify) || 1894 + READ_ONCE(net->ipv6.devconf_all->ndisc_notify)) 1900 1895 ndisc_send_unsol_na(dev); 1901 1896 in6_dev_put(idev); 1902 1897 break; ··· 1905 1900 if (!idev) 1906 1901 evict_nocarrier = true; 1907 1902 else { 1908 - evict_nocarrier = idev->cnf.ndisc_evict_nocarrier && 1909 - net->ipv6.devconf_all->ndisc_evict_nocarrier; 1903 + evict_nocarrier = READ_ONCE(idev->cnf.ndisc_evict_nocarrier) && 1904 + READ_ONCE(net->ipv6.devconf_all->ndisc_evict_nocarrier); 1910 1905 in6_dev_put(idev); 1911 1906 } 1912 1907
+2 -2
net/ipv6/netfilter/nf_reject_ipv6.c
··· 83 83 84 84 skb_reserve(nskb, LL_MAX_HEADER); 85 85 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, 86 - net->ipv6.devconf_all->hop_limit); 86 + READ_ONCE(net->ipv6.devconf_all->hop_limit)); 87 87 nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen); 88 88 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); 89 89 ··· 124 124 125 125 skb_reserve(nskb, LL_MAX_HEADER); 126 126 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6, 127 - net->ipv6.devconf_all->hop_limit); 127 + READ_ONCE(net->ipv6.devconf_all->hop_limit)); 128 128 129 129 skb_reset_transport_header(nskb); 130 130 icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
+2 -2
net/ipv6/output_core.c
··· 111 111 rcu_read_lock(); 112 112 idev = __in6_dev_get(dev); 113 113 if (idev) 114 - hoplimit = idev->cnf.hop_limit; 114 + hoplimit = READ_ONCE(idev->cnf.hop_limit); 115 115 else 116 - hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit; 116 + hoplimit = READ_ONCE(dev_net(dev)->ipv6.devconf_all->hop_limit); 117 117 rcu_read_unlock(); 118 118 } 119 119 return hoplimit;
+11 -9
net/ipv6/route.c
··· 645 645 write_lock_bh(&neigh->lock); 646 646 if (!(neigh->nud_state & NUD_VALID) && 647 647 time_after(jiffies, 648 - neigh->updated + idev->cnf.rtr_probe_interval)) { 648 + neigh->updated + 649 + READ_ONCE(idev->cnf.rtr_probe_interval))) { 649 650 work = kmalloc(sizeof(*work), GFP_ATOMIC); 650 651 if (work) 651 652 __neigh_set_probe_once(neigh); 652 653 } 653 654 write_unlock_bh(&neigh->lock); 654 655 } else if (time_after(jiffies, last_probe + 655 - idev->cnf.rtr_probe_interval)) { 656 + READ_ONCE(idev->cnf.rtr_probe_interval))) { 656 657 work = kmalloc(sizeof(*work), GFP_ATOMIC); 657 658 } 658 659 ··· 1597 1596 1598 1597 rcu_read_lock(); 1599 1598 idev = __in6_dev_get(dev); 1600 - mtu = idev->cnf.mtu6; 1599 + mtu = READ_ONCE(idev->cnf.mtu6); 1601 1600 rcu_read_unlock(); 1602 1601 } 1603 1602 ··· 2221 2220 2222 2221 strict |= flags & RT6_LOOKUP_F_IFACE; 2223 2222 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE; 2224 - if (net->ipv6.devconf_all->forwarding == 0) 2223 + if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0) 2225 2224 strict |= RT6_LOOKUP_F_REACHABLE; 2226 2225 2227 2226 rcu_read_lock(); ··· 3250 3249 3251 3250 mtu = IPV6_MIN_MTU; 3252 3251 idev = __in6_dev_get(dev); 3253 - if (idev && idev->cnf.mtu6 > mtu) 3254 - mtu = idev->cnf.mtu6; 3252 + if (idev) 3253 + mtu = max_t(u32, mtu, READ_ONCE(idev->cnf.mtu6)); 3255 3254 } 3256 3255 3257 3256 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); ··· 4150 4149 in6_dev = __in6_dev_get(skb->dev); 4151 4150 if (!in6_dev) 4152 4151 return; 4153 - if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) 4152 + if (READ_ONCE(in6_dev->cnf.forwarding) || 4153 + !READ_ONCE(in6_dev->cnf.accept_redirects)) 4154 4154 return; 4155 4155 4156 4156 /* RFC2461 8.1: ··· 4585 4583 f6i->dst_nocount = true; 4586 4584 4587 4585 if (!anycast && 4588 - (net->ipv6.devconf_all->disable_policy || 4589 - idev->cnf.disable_policy)) 4586 + (READ_ONCE(net->ipv6.devconf_all->disable_policy) || 4587 + READ_ONCE(idev->cnf.disable_policy))) 4590 4588 f6i->dst_nopolicy = true; 4591 4589 } 4592 4590
+5 -3
net/ipv6/seg6_hmac.c
··· 241 241 struct sr6_tlv_hmac *tlv; 242 242 struct ipv6_sr_hdr *srh; 243 243 struct inet6_dev *idev; 244 + int require_hmac; 244 245 245 246 idev = __in6_dev_get(skb->dev); 246 247 ··· 249 248 250 249 tlv = seg6_get_tlv_hmac(srh); 251 250 251 + require_hmac = READ_ONCE(idev->cnf.seg6_require_hmac); 252 252 /* mandatory check but no tlv */ 253 - if (idev->cnf.seg6_require_hmac > 0 && !tlv) 253 + if (require_hmac > 0 && !tlv) 254 254 return false; 255 255 256 256 /* no check */ 257 - if (idev->cnf.seg6_require_hmac < 0) 257 + if (require_hmac < 0) 258 258 return true; 259 259 260 260 /* check only if present */ 261 - if (idev->cnf.seg6_require_hmac == 0 && !tlv) 261 + if (require_hmac == 0 && !tlv) 262 262 return true; 263 263 264 264 /* now, seg6_require_hmac >= 0 && tlv */
+1 -1
net/netfilter/nf_synproxy_core.c
··· 800 800 skb_reset_network_header(skb); 801 801 iph = skb_put(skb, sizeof(*iph)); 802 802 ip6_flow_hdr(iph, 0, 0); 803 - iph->hop_limit = net->ipv6.devconf_all->hop_limit; 803 + iph->hop_limit = READ_ONCE(net->ipv6.devconf_all->hop_limit); 804 804 iph->nexthdr = IPPROTO_TCP; 805 805 iph->saddr = *saddr; 806 806 iph->daddr = *daddr;