Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Merge in late fixes to prepare for the 6.11 net-next PR.

Conflicts:
93c3a96c301f ("net: pse-pd: Do not return EOPNOSUPP if config is null")
4cddb0f15ea9 ("net: ethtool: pse-pd: Fix possible null-deref")
30d7b6727724 ("net: ethtool: Add new power limit get and set features")
https://lore.kernel.org/20240715123204.623520bb@canb.auug.org.au/

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+311 -68
+1 -1
drivers/net/netconsole.c
··· 973 973 /* rtnl_lock already held 974 974 * we might sleep in __netpoll_cleanup() 975 975 */ 976 + nt->enabled = false; 976 977 spin_unlock_irqrestore(&target_list_lock, flags); 977 978 978 979 __netpoll_cleanup(&nt->np); ··· 981 980 spin_lock_irqsave(&target_list_lock, flags); 982 981 netdev_put(nt->np.dev, &nt->np.dev_tracker); 983 982 nt->np.dev = NULL; 984 - nt->enabled = false; 985 983 stopped = true; 986 984 netconsole_target_put(nt); 987 985 goto restart;
+2 -2
drivers/net/pse-pd/pse_core.c
··· 832 832 { 833 833 int err = 0; 834 834 835 - if (pse_has_c33(psec)) { 835 + if (pse_has_c33(psec) && config->c33_admin_control) { 836 836 err = pse_ethtool_c33_set_config(psec, config); 837 837 if (err) 838 838 return err; 839 839 } 840 840 841 - if (pse_has_podl(psec)) 841 + if (pse_has_podl(psec) && config->podl_admin_control) 842 842 err = pse_ethtool_podl_set_config(psec, config); 843 843 844 844 return err;
+15 -7
include/net/ip6_route.h
··· 127 127 128 128 static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i, 129 129 const struct in6_addr *daddr, 130 - unsigned int prefs, 130 + unsigned int prefs, int l3mdev_index, 131 131 struct in6_addr *saddr) 132 132 { 133 + struct net_device *l3mdev; 134 + struct net_device *dev; 135 + bool same_vrf; 133 136 int err = 0; 134 137 135 - if (f6i && f6i->fib6_prefsrc.plen) { 136 - *saddr = f6i->fib6_prefsrc.addr; 137 - } else { 138 - struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : NULL; 138 + rcu_read_lock(); 139 139 140 - err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr); 141 - } 140 + l3mdev = dev_get_by_index_rcu(net, l3mdev_index); 141 + if (!f6i || !f6i->fib6_prefsrc.plen || l3mdev) 142 + dev = f6i ? fib6_info_nh_dev(f6i) : NULL; 143 + same_vrf = !l3mdev || l3mdev_master_dev_rcu(dev) == l3mdev; 144 + if (f6i && f6i->fib6_prefsrc.plen && same_vrf) 145 + *saddr = f6i->fib6_prefsrc.addr; 146 + else 147 + err = ipv6_dev_get_saddr(net, same_vrf ? dev : l3mdev, daddr, prefs, saddr); 148 + 149 + rcu_read_unlock(); 142 150 143 151 return err; 144 152 }
+10 -26
include/net/xfrm.h
··· 178 178 struct hlist_node gclist; 179 179 struct hlist_node bydst; 180 180 }; 181 - struct hlist_node bysrc; 181 + union { 182 + struct hlist_node dev_gclist; 183 + struct hlist_node bysrc; 184 + }; 182 185 struct hlist_node byspi; 183 186 struct hlist_node byseq; 184 187 ··· 1595 1592 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) 1596 1593 { 1597 1594 struct xfrm_dev_offload *xdo = &x->xso; 1598 - struct net_device *dev = xdo->dev; 1595 + struct net_device *dev = READ_ONCE(xdo->dev); 1599 1596 1600 1597 if (dev && dev->xfrmdev_ops && 1601 1598 dev->xfrmdev_ops->xdo_dev_state_update_stats) ··· 1953 1950 struct xfrm_user_offload *xuo, u8 dir, 1954 1951 struct netlink_ext_ack *extack); 1955 1952 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); 1953 + void xfrm_dev_state_delete(struct xfrm_state *x); 1954 + void xfrm_dev_state_free(struct xfrm_state *x); 1956 1955 1957 1956 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x) 1958 1957 { 1959 1958 struct xfrm_dev_offload *xso = &x->xso; 1959 + struct net_device *dev = READ_ONCE(xso->dev); 1960 1960 1961 - if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn) 1962 - xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x); 1961 + if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn) 1962 + dev->xfrmdev_ops->xdo_dev_state_advance_esn(x); 1963 1963 } 1964 1964 1965 1965 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) ··· 1981 1975 return true; 1982 1976 1983 1977 return false; 1984 - } 1985 - 1986 - static inline void xfrm_dev_state_delete(struct xfrm_state *x) 1987 - { 1988 - struct xfrm_dev_offload *xso = &x->xso; 1989 - 1990 - if (xso->dev) 1991 - xso->dev->xfrmdev_ops->xdo_dev_state_delete(x); 1992 - } 1993 - 1994 - static inline void xfrm_dev_state_free(struct xfrm_state *x) 1995 - { 1996 - struct xfrm_dev_offload *xso = &x->xso; 1997 - struct net_device *dev = xso->dev; 1998 - 1999 - if (dev && dev->xfrmdev_ops) { 2000 - if (dev->xfrmdev_ops->xdo_dev_state_free) 2001 - dev->xfrmdev_ops->xdo_dev_state_free(x); 2002 - xso->dev = NULL; 2003 - xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; 2004 - netdev_put(dev, &xso->dev_tracker); 2005 - } 2006 1978 } 2007 1979 2008 1980 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
+2 -2
net/bridge/br_forward.c
··· 25 25 26 26 vg = nbp_vlan_group_rcu(p); 27 27 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && 28 - p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) && 29 - nbp_switchdev_allowed_egress(p, skb) && 28 + (br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) && 29 + br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) && 30 30 !br_skb_isolated(p, skb); 31 31 } 32 32
+1 -3
net/core/xdp.c
··· 127 127 return; 128 128 129 129 if (type == MEM_TYPE_PAGE_POOL) { 130 - rcu_read_lock(); 131 - xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params); 130 + xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); 132 131 page_pool_destroy(xa->page_pool); 133 - rcu_read_unlock(); 134 132 } 135 133 } 136 134 EXPORT_SYMBOL_GPL(xdp_unreg_mem_model);
+6 -2
net/ethtool/pse-pd.c
··· 277 277 tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]) { 278 278 struct pse_control_config config = {}; 279 279 280 - if (pse_has_podl(phydev->psec)) 280 + if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]) 281 281 config.podl_admin_control = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]); 282 - if (pse_has_c33(phydev->psec)) 282 + if (tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]) 283 283 config.c33_admin_control = nla_get_u32(tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]); 284 284 285 + /* pse_ethtool_set_config() will do nothing if the config 286 + * is zero 287 + */ 285 288 ret = pse_ethtool_set_config(phydev->psec, info->extack, 286 289 &config); 287 290 if (ret) 288 291 return ret; 289 292 } 290 293 294 + /* Return errno or zero - PSE has no notification */ 291 295 return ret; 292 296 } 293 297
+1 -2
net/ipv4/esp4.c
··· 239 239 #else 240 240 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) 241 241 { 242 - kfree_skb(skb); 243 - 242 + WARN_ON(1); 244 243 return -EOPNOTSUPP; 245 244 } 246 245 #endif
+7
net/ipv4/esp4_offload.c
··· 56 56 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 57 57 (xfrm_address_t *)&ip_hdr(skb)->daddr, 58 58 spi, IPPROTO_ESP, AF_INET); 59 + 60 + if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) { 61 + /* non-offload path will record the error and audit log */ 62 + xfrm_state_put(x); 63 + x = NULL; 64 + } 65 + 59 66 if (!x) 60 67 goto out_reset; 61 68
+11 -2
net/ipv4/fib_semantics.c
··· 2269 2269 fib_select_default(fl4, res); 2270 2270 2271 2271 check_saddr: 2272 - if (!fl4->saddr) 2273 - fl4->saddr = fib_result_prefsrc(net, res); 2272 + if (!fl4->saddr) { 2273 + struct net_device *l3mdev; 2274 + 2275 + l3mdev = dev_get_by_index_rcu(net, fl4->flowi4_l3mdev); 2276 + 2277 + if (!l3mdev || 2278 + l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) == l3mdev) 2279 + fl4->saddr = fib_result_prefsrc(net, res); 2280 + else 2281 + fl4->saddr = inet_select_addr(l3mdev, 0, RT_SCOPE_LINK); 2282 + } 2274 2283 }
+2 -1
net/ipv6/addrconf.c
··· 1873 1873 master, &dst, 1874 1874 scores, hiscore_idx); 1875 1875 1876 - if (scores[hiscore_idx].ifa) 1876 + if (scores[hiscore_idx].ifa && 1877 + scores[hiscore_idx].scopedist >= 0) 1877 1878 goto out; 1878 1879 } 1879 1880
+1 -2
net/ipv6/esp6.c
··· 256 256 #else 257 257 static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) 258 258 { 259 - kfree_skb(skb); 260 - 259 + WARN_ON(1); 261 260 return -EOPNOTSUPP; 262 261 } 263 262 #endif
+7
net/ipv6/esp6_offload.c
··· 83 83 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, 84 84 (xfrm_address_t *)&ipv6_hdr(skb)->daddr, 85 85 spi, IPPROTO_ESP, AF_INET6); 86 + 87 + if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) { 88 + /* non-offload path will record the error and audit log */ 89 + xfrm_state_put(x); 90 + x = NULL; 91 + } 92 + 86 93 if (!x) 87 94 goto out_reset; 88 95
+1
net/ipv6/ip6_output.c
··· 1124 1124 from = rt ? rcu_dereference(rt->from) : NULL; 1125 1125 err = ip6_route_get_saddr(net, from, &fl6->daddr, 1126 1126 sk ? READ_ONCE(inet6_sk(sk)->srcprefs) : 0, 1127 + fl6->flowi6_l3mdev, 1127 1128 &fl6->saddr); 1128 1129 rcu_read_unlock(); 1129 1130
+1 -1
net/ipv6/route.c
··· 5687 5687 goto nla_put_failure; 5688 5688 } else if (dest) { 5689 5689 struct in6_addr saddr_buf; 5690 - if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 && 5690 + if (ip6_route_get_saddr(net, rt, dest, 0, 0, &saddr_buf) == 0 && 5691 5691 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) 5692 5692 goto nla_put_failure; 5693 5693 }
+84 -2
net/packet/af_packet.c
··· 538 538 return packet_lookup_frame(po, rb, rb->head, status); 539 539 } 540 540 541 + static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev) 542 + { 543 + u8 *skb_orig_data = skb->data; 544 + int skb_orig_len = skb->len; 545 + struct vlan_hdr vhdr, *vh; 546 + unsigned int header_len; 547 + 548 + if (!dev) 549 + return 0; 550 + 551 + /* In the SOCK_DGRAM scenario, skb data starts at the network 552 + * protocol, which is after the VLAN headers. The outer VLAN 553 + * header is at the hard_header_len offset in non-variable 554 + * length link layer headers. If it's a VLAN device, the 555 + * min_header_len should be used to exclude the VLAN header 556 + * size. 557 + */ 558 + if (dev->min_header_len == dev->hard_header_len) 559 + header_len = dev->hard_header_len; 560 + else if (is_vlan_dev(dev)) 561 + header_len = dev->min_header_len; 562 + else 563 + return 0; 564 + 565 + skb_push(skb, skb->data - skb_mac_header(skb)); 566 + vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr); 567 + if (skb_orig_data != skb->data) { 568 + skb->data = skb_orig_data; 569 + skb->len = skb_orig_len; 570 + } 571 + if (unlikely(!vh)) 572 + return 0; 573 + 574 + return ntohs(vh->h_vlan_TCI); 575 + } 576 + 577 + static __be16 vlan_get_protocol_dgram(struct sk_buff *skb) 578 + { 579 + __be16 proto = skb->protocol; 580 + 581 + if (unlikely(eth_type_vlan(proto))) { 582 + u8 *skb_orig_data = skb->data; 583 + int skb_orig_len = skb->len; 584 + 585 + skb_push(skb, skb->data - skb_mac_header(skb)); 586 + proto = __vlan_get_protocol(skb, proto, NULL); 587 + if (skb_orig_data != skb->data) { 588 + skb->data = skb_orig_data; 589 + skb->len = skb_orig_len; 590 + } 591 + } 592 + 593 + return proto; 594 + } 595 + 541 596 static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) 542 597 { 543 598 del_timer_sync(&pkc->retire_blk_timer); ··· 1062 1007 static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, 1063 1008 struct tpacket3_hdr *ppd) 1064 1009 { 1010 + struct packet_sock *po = container_of(pkc, struct packet_sock, rx_ring.prb_bdqc); 1011 + 1065 1012 if (skb_vlan_tag_present(pkc->skb)) { 1066 1013 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); 1067 1014 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); 1015 + ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 1016 + } else if (unlikely(po->sk.sk_type == SOCK_DGRAM && eth_type_vlan(pkc->skb->protocol))) { 1017 + ppd->hv1.tp_vlan_tci = vlan_get_tci(pkc->skb, pkc->skb->dev); 1018 + ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->protocol); 1068 1019 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 1069 1020 } else { 1070 1021 ppd->hv1.tp_vlan_tci = 0; ··· 2488 2427 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); 2489 2428 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); 2490 2429 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 2430 + } else if (unlikely(sk->sk_type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) { 2431 + h.h2->tp_vlan_tci = vlan_get_tci(skb, skb->dev); 2432 + h.h2->tp_vlan_tpid = ntohs(skb->protocol); 2433 + status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 2491 2434 } else { 2492 2435 h.h2->tp_vlan_tci = 0; 2493 2436 h.h2->tp_vlan_tpid = 0; ··· 2521 2456 sll->sll_halen = dev_parse_header(skb, sll->sll_addr); 2522 2457 sll->sll_family = AF_PACKET; 2523 2458 sll->sll_hatype = dev->type; 2524 - sll->sll_protocol = skb->protocol; 2459 + sll->sll_protocol = (sk->sk_type == SOCK_DGRAM) ? 2460 + vlan_get_protocol_dgram(skb) : skb->protocol; 2525 2461 sll->sll_pkttype = skb->pkt_type; 2526 2462 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV))) 2527 2463 sll->sll_ifindex = orig_dev->ifindex; ··· 3547 3481 /* Original length was stored in sockaddr_ll fields */ 3548 3482 origlen = PACKET_SKB_CB(skb)->sa.origlen; 3549 3483 sll->sll_family = AF_PACKET; 3550 - sll->sll_protocol = skb->protocol; 3484 + sll->sll_protocol = (sock->type == SOCK_DGRAM) ? 3485 + vlan_get_protocol_dgram(skb) : skb->protocol; 3551 3486 } 3552 3487 3553 3488 sock_recv_cmsgs(msg, sk, skb); ··· 3605 3538 aux.tp_vlan_tci = skb_vlan_tag_get(skb); 3606 3539 aux.tp_vlan_tpid = ntohs(skb->vlan_proto); 3607 3540 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 3541 + } else if (unlikely(sock->type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) { 3542 + struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; 3543 + struct net_device *dev; 3544 + 3545 + rcu_read_lock(); 3546 + dev = dev_get_by_index_rcu(sock_net(sk), sll->sll_ifindex); 3547 + if (dev) { 3548 + aux.tp_vlan_tci = vlan_get_tci(skb, dev); 3549 + aux.tp_vlan_tpid = ntohs(skb->protocol); 3550 + aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; 3551 + } else { 3552 + aux.tp_vlan_tci = 0; 3553 + aux.tp_vlan_tpid = 0; 3554 + } 3555 + rcu_read_unlock(); 3608 3556 } else { 3609 3557 aux.tp_vlan_tci = 0; 3610 3558 aux.tp_vlan_tpid = 0;
+3 -5
net/xfrm/xfrm_input.c
··· 475 475 encap_type == UDP_ENCAP_ESPINUDP))) { 476 476 x = xfrm_input_state(skb); 477 477 478 - if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) { 479 - XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEDIRERROR); 480 - goto drop; 481 - } 482 - 483 478 if (unlikely(x->km.state != XFRM_STATE_VALID)) { 484 479 if (x->km.state == XFRM_STATE_ACQ) 485 480 XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); ··· 581 586 } 582 587 583 588 if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) { 589 + secpath_reset(skb); 584 590 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEDIRERROR); 591 + xfrm_audit_state_notfound(skb, family, spi, seq); 585 592 xfrm_state_put(x); 593 + x = NULL; 586 594 goto drop; 587 595 } 588 596
+2 -3
net/xfrm/xfrm_policy.c
··· 452 452 453 453 static void xfrm_policy_kill(struct xfrm_policy *policy) 454 454 { 455 + xfrm_dev_policy_delete(policy); 456 + 455 457 write_lock_bh(&policy->lock); 456 458 policy->walk.dead = 1; 457 459 write_unlock_bh(&policy->lock); ··· 1852 1850 1853 1851 __xfrm_policy_unlink(pol, dir); 1854 1852 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1855 - xfrm_dev_policy_delete(pol); 1856 1853 cnt++; 1857 1854 xfrm_audit_policy_delete(pol, 1, task_valid); 1858 1855 xfrm_policy_kill(pol); ··· 1892 1891 1893 1892 __xfrm_policy_unlink(pol, dir); 1894 1893 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1895 - xfrm_dev_policy_delete(pol); 1896 1894 cnt++; 1897 1895 xfrm_audit_policy_delete(pol, 1, task_valid); 1898 1896 xfrm_policy_kill(pol); ··· 2342 2342 pol = __xfrm_policy_unlink(pol, dir); 2343 2343 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 2344 2344 if (pol) { 2345 - xfrm_dev_policy_delete(pol); 2346 2345 xfrm_policy_kill(pol); 2347 2346 return 0; 2348 2347 }
+61 -4
net/xfrm/xfrm_state.c
··· 49 49 50 50 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task); 51 51 static HLIST_HEAD(xfrm_state_gc_list); 52 + static HLIST_HEAD(xfrm_state_dev_gc_list); 52 53 53 54 static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x) 54 55 { ··· 215 214 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO]; 216 215 217 216 static DEFINE_SPINLOCK(xfrm_state_gc_lock); 217 + static DEFINE_SPINLOCK(xfrm_state_dev_gc_lock); 218 218 219 219 int __xfrm_state_delete(struct xfrm_state *x); 220 220 ··· 685 683 } 686 684 EXPORT_SYMBOL(xfrm_state_alloc); 687 685 686 + #ifdef CONFIG_XFRM_OFFLOAD 687 + void xfrm_dev_state_delete(struct xfrm_state *x) 688 + { 689 + struct xfrm_dev_offload *xso = &x->xso; 690 + struct net_device *dev = READ_ONCE(xso->dev); 691 + 692 + if (dev) { 693 + dev->xfrmdev_ops->xdo_dev_state_delete(x); 694 + spin_lock_bh(&xfrm_state_dev_gc_lock); 695 + hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list); 696 + spin_unlock_bh(&xfrm_state_dev_gc_lock); 697 + } 698 + } 699 + EXPORT_SYMBOL_GPL(xfrm_dev_state_delete); 700 + 701 + void xfrm_dev_state_free(struct xfrm_state *x) 702 + { 703 + struct xfrm_dev_offload *xso = &x->xso; 704 + struct net_device *dev = READ_ONCE(xso->dev); 705 + 706 + if (dev && dev->xfrmdev_ops) { 707 + spin_lock_bh(&xfrm_state_dev_gc_lock); 708 + if (!hlist_unhashed(&x->dev_gclist)) 709 + hlist_del(&x->dev_gclist); 710 + spin_unlock_bh(&xfrm_state_dev_gc_lock); 711 + 712 + if (dev->xfrmdev_ops->xdo_dev_state_free) 713 + dev->xfrmdev_ops->xdo_dev_state_free(x); 714 + WRITE_ONCE(xso->dev, NULL); 715 + xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; 716 + netdev_put(dev, &xso->dev_tracker); 717 + } 718 + } 719 + #endif 720 + 688 721 void __xfrm_state_destroy(struct xfrm_state *x, bool sync) 689 722 { 690 723 WARN_ON(x->km.state != XFRM_STATE_DEAD); ··· 886 849 887 850 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid) 888 851 { 852 + struct xfrm_state *x; 853 + struct hlist_node *tmp; 854 + struct xfrm_dev_offload *xso; 889 855 int i, err = 0, cnt = 0; 890 856 891 857 spin_lock_bh(&net->xfrm.xfrm_state_lock); ··· 898 858 899 859 err = -ESRCH; 900 860 for (i = 0; i <= net->xfrm.state_hmask; i++) { 901 - struct xfrm_state *x; 902 - struct xfrm_dev_offload *xso; 903 861 restart: 904 862 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 905 863 xso = &x->xso; ··· 907 869 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 908 870 909 871 err = xfrm_state_delete(x); 872 + xfrm_dev_state_free(x); 873 + 910 874 xfrm_audit_state_delete(x, err ? 0 : 1, 911 875 task_valid); 912 876 xfrm_state_put(x); ··· 925 885 926 886 out: 927 887 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 888 + 889 + spin_lock_bh(&xfrm_state_dev_gc_lock); 890 + restart_gc: 891 + hlist_for_each_entry_safe(x, tmp, &xfrm_state_dev_gc_list, dev_gclist) { 892 + xso = &x->xso; 893 + 894 + if (xso->dev == dev) { 895 + spin_unlock_bh(&xfrm_state_dev_gc_lock); 896 + xfrm_dev_state_free(x); 897 + spin_lock_bh(&xfrm_state_dev_gc_lock); 898 + goto restart_gc; 899 + } 900 + 901 + } 902 + spin_unlock_bh(&xfrm_state_dev_gc_lock); 903 + 904 + xfrm_flush_gc(); 905 + 928 906 return err; 929 907 } 930 908 EXPORT_SYMBOL(xfrm_dev_state_flush); ··· 1332 1274 xso->dev = xdo->dev; 1333 1275 xso->real_dev = xdo->real_dev; 1334 1276 xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ; 1335 - netdev_tracker_alloc(xso->dev, &xso->dev_tracker, 1336 - GFP_ATOMIC); 1277 + netdev_hold(xso->dev, &xso->dev_tracker, GFP_ATOMIC); 1337 1278 error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL); 1338 1279 if (error) { 1339 1280 xso->dir = 0;
-1
net/xfrm/xfrm_user.c
··· 2466 2466 NETLINK_CB(skb).portid); 2467 2467 } 2468 2468 } else { 2469 - xfrm_dev_policy_delete(xp); 2470 2469 xfrm_audit_policy_delete(xp, err ? 0 : 1, true); 2471 2470 2472 2471 if (err != 0)
+91 -2
tools/testing/selftests/net/vrf_route_leaking.sh
··· 59 59 # while it is forwarded between different vrfs. 60 60 61 61 source lib.sh 62 + PATH=$PWD:$PWD/tools/testing/selftests/net:$PATH 62 63 VERBOSE=0 63 64 PAUSE_ON_FAIL=no 64 65 DEFAULT_TTYPE=sym ··· 534 533 ipv6_ping_frag asym 535 534 } 536 535 536 + ipv4_ping_local() 537 + { 538 + log_section "IPv4 (sym route): VRF ICMP local error route lookup ping" 539 + 540 + setup_sym 541 + 542 + check_connectivity || return 543 + 544 + run_cmd ip netns exec $r1 ip vrf exec blue ping -c1 -w1 ${H2_N2_IP} 545 + log_test $? 0 "VRF ICMP local IPv4" 546 + } 547 + 548 + ipv4_tcp_local() 549 + { 550 + log_section "IPv4 (sym route): VRF tcp local connection" 551 + 552 + setup_sym 553 + 554 + check_connectivity || return 555 + 556 + run_cmd nettest -s -O "$h2" -l ${H2_N2_IP} -I eth0 -3 eth0 & 557 + sleep 1 558 + run_cmd nettest -N "$r1" -d blue -r ${H2_N2_IP} 559 + log_test $? 0 "VRF tcp local connection IPv4" 560 + } 561 + 562 + ipv4_udp_local() 563 + { 564 + log_section "IPv4 (sym route): VRF udp local connection" 565 + 566 + setup_sym 567 + 568 + check_connectivity || return 569 + 570 + run_cmd nettest -s -D -O "$h2" -l ${H2_N2_IP} -I eth0 -3 eth0 & 571 + sleep 1 572 + run_cmd nettest -D -N "$r1" -d blue -r ${H2_N2_IP} 573 + log_test $? 0 "VRF udp local connection IPv4" 574 + } 575 + 576 + ipv6_ping_local() 577 + { 578 + log_section "IPv6 (sym route): VRF ICMP local error route lookup ping" 579 + 580 + setup_sym 581 + 582 + check_connectivity6 || return 583 + 584 + run_cmd ip netns exec $r1 ip vrf exec blue ${ping6} -c1 -w1 ${H2_N2_IP6} 585 + log_test $? 0 "VRF ICMP local IPv6" 586 + } 587 + 588 + ipv6_tcp_local() 589 + { 590 + log_section "IPv6 (sym route): VRF tcp local connection" 591 + 592 + setup_sym 593 + 594 + check_connectivity6 || return 595 + 596 + run_cmd nettest -s -6 -O "$h2" -l ${H2_N2_IP6} -I eth0 -3 eth0 & 597 + sleep 1 598 + run_cmd nettest -6 -N "$r1" -d blue -r ${H2_N2_IP6} 599 + log_test $? 0 "VRF tcp local connection IPv6" 600 + } 601 + 602 + ipv6_udp_local() 603 + { 604 + log_section "IPv6 (sym route): VRF udp local connection" 605 + 606 + setup_sym 607 + 608 + check_connectivity6 || return 609 + 610 + run_cmd nettest -s -6 -D -O "$h2" -l ${H2_N2_IP6} -I eth0 -3 eth0 & 611 + sleep 1 612 + run_cmd nettest -6 -D -N "$r1" -d blue -r ${H2_N2_IP6} 613 + log_test $? 0 "VRF udp local connection IPv6" 614 + } 615 + 537 616 ################################################################################ 538 617 # usage 539 618 ··· 636 555 # Some systems don't have a ping6 binary anymore 637 556 command -v ping6 > /dev/null 2>&1 && ping6=$(command -v ping6) || ping6=$(command -v ping) 638 557 639 - TESTS_IPV4="ipv4_ping_ttl ipv4_traceroute ipv4_ping_frag ipv4_ping_ttl_asym ipv4_traceroute_asym" 640 - TESTS_IPV6="ipv6_ping_ttl ipv6_traceroute ipv6_ping_ttl_asym ipv6_traceroute_asym" 558 + TESTS_IPV4="ipv4_ping_ttl ipv4_traceroute ipv4_ping_frag ipv4_ping_local ipv4_tcp_local 559 + ipv4_udp_local ipv4_ping_ttl_asym ipv4_traceroute_asym" 560 + TESTS_IPV6="ipv6_ping_ttl ipv6_traceroute ipv6_ping_local ipv6_tcp_local ipv6_udp_local 561 + ipv6_ping_ttl_asym ipv6_traceroute_asym" 641 562 642 563 ret=0 643 564 nsuccess=0 ··· 677 594 ipv4_traceroute|traceroute) ipv4_traceroute;;& 678 595 ipv4_traceroute_asym|traceroute) ipv4_traceroute_asym;;& 679 596 ipv4_ping_frag|ping) ipv4_ping_frag;;& 597 + ipv4_ping_local|ping) ipv4_ping_local;;& 598 + ipv4_tcp_local) ipv4_tcp_local;;& 599 + ipv4_udp_local) ipv4_udp_local;;& 680 600 681 601 ipv6_ping_ttl|ping) ipv6_ping_ttl;;& 682 602 ipv6_ping_ttl_asym|ping) ipv6_ping_ttl_asym;;& 683 603 ipv6_traceroute|traceroute) ipv6_traceroute;;& 684 604 ipv6_traceroute_asym|traceroute) ipv6_traceroute_asym;;& 685 605 ipv6_ping_frag|ping) ipv6_ping_frag;;& 606 + ipv6_ping_local|ping) ipv6_ping_local;;& 607 + ipv6_tcp_local) ipv6_tcp_local;;& 608 + ipv6_udp_local) ipv6_udp_local;;& 686 609 687 610 # setup namespaces and config, but do not run any tests 688 611 setup_sym|setup) setup_sym; exit 0;;