Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ipsec-next-2025-03-24' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next

Steffen Klassert says:

====================
pull request (net-next): ipsec-next 2025-03-24

1) Prevent setting high order sequence number bits input in
non-ESN mode. From Leon Romanovsky.

2) Support PMTU handling in tunnel mode for packet offload.
From Leon Romanovsky.

3) Make xfrm_state_lookup_byaddr lockless.
From Florian Westphal.

4) Remove unnecessary NULL check in xfrm_lookup_with_ifid().
From Dan Carpenter.

* tag 'ipsec-next-2025-03-24' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next:
xfrm: Remove unnecessary NULL check in xfrm_lookup_with_ifid()
xfrm: state: make xfrm_state_lookup_byaddr lockless
xfrm: check for PMTU in tunnel mode for packet offload
xfrm: provide common xdo_dev_offload_ok callback implementation
xfrm: rely on XFRM offload
xfrm: simplify SA initialization routine
xfrm: delay initialization of offload path till its actually requested
xfrm: prevent high SEQ input in non-ESN mode
====================

Link: https://patch.msgid.link/20250324061855.4116819-1-steffen.klassert@secunet.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+102 -193
+2 -1
Documentation/networking/xfrm_device.rst
··· 126 126 the skb and the intended offload state to ask the driver if the offload 127 127 will serviceable. This can check the packet information to be sure the 128 128 offload can be supported (e.g. IPv4 or IPv6, no IPv4 options, etc) and 129 - return true of false to signify its support. 129 + return true or false to signify its support. In case driver doesn't implement 130 + this callback, the stack provides reasonable defaults. 130 131 131 132 Crypto offload mode: 132 133 When ready to send, the driver needs to inspect the Tx packet for the
+5 -11
drivers/net/bonding/bond_main.c
··· 674 674 static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) 675 675 { 676 676 struct net_device *real_dev; 677 - bool ok = false; 678 677 679 678 rcu_read_lock(); 680 679 real_dev = bond_ipsec_dev(xs); 681 - if (!real_dev) 682 - goto out; 680 + if (!real_dev || netif_is_bond_master(real_dev)) { 681 + rcu_read_unlock(); 682 + return false; 683 + } 683 684 684 - if (!real_dev->xfrmdev_ops || 685 - !real_dev->xfrmdev_ops->xdo_dev_offload_ok || 686 - netif_is_bond_master(real_dev)) 687 - goto out; 688 - 689 - ok = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); 690 - out: 691 685 rcu_read_unlock(); 692 - return ok; 686 + return true; 693 687 } 694 688 695 689 /**
-21
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 6538 6538 mutex_unlock(&uld_mutex); 6539 6539 } 6540 6540 6541 - static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 6542 - { 6543 - struct adapter *adap = netdev2adap(x->xso.dev); 6544 - bool ret = false; 6545 - 6546 - if (!mutex_trylock(&uld_mutex)) { 6547 - dev_dbg(adap->pdev_dev, 6548 - "crypto uld critical resource is under use\n"); 6549 - return ret; 6550 - } 6551 - if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS)) 6552 - goto out_unlock; 6553 - 6554 - ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x); 6555 - 6556 - out_unlock: 6557 - mutex_unlock(&uld_mutex); 6558 - return ret; 6559 - } 6560 - 6561 6541 static void cxgb4_advance_esn_state(struct xfrm_state *x) 6562 6542 { 6563 6543 struct adapter *adap = netdev2adap(x->xso.dev); ··· 6563 6583 .xdo_dev_state_add = cxgb4_xfrm_add_state, 6564 6584 .xdo_dev_state_delete = cxgb4_xfrm_del_state, 6565 6585 .xdo_dev_state_free = cxgb4_xfrm_free_state, 6566 - .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok, 6567 6586 .xdo_dev_state_advance_esn = cxgb4_advance_esn_state, 6568 6587 }; 6569 6588
-16
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
··· 71 71 static LIST_HEAD(uld_ctx_list); 72 72 static DEFINE_MUTEX(dev_mutex); 73 73 74 - static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x); 75 74 static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state); 76 75 static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev); 77 76 static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop); ··· 84 85 .xdo_dev_state_add = ch_ipsec_xfrm_add_state, 85 86 .xdo_dev_state_delete = ch_ipsec_xfrm_del_state, 86 87 .xdo_dev_state_free = ch_ipsec_xfrm_free_state, 87 - .xdo_dev_offload_ok = ch_ipsec_offload_ok, 88 88 .xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state, 89 89 }; 90 90 ··· 319 321 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle; 320 322 kfree(sa_entry); 321 323 module_put(THIS_MODULE); 322 - } 323 - 324 - static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 325 - { 326 - if (x->props.family == AF_INET) { 327 - /* Offload with IP options is not supported yet */ 328 - if (ip_hdr(skb)->ihl > 5) 329 - return false; 330 - } else { 331 - /* Offload with IPv6 extension headers is not support yet */ 332 - if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) 333 - return false; 334 - } 335 - return true; 336 324 } 337 325 338 326 static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
-21
drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
··· 817 817 } 818 818 } 819 819 820 - /** 821 - * ixgbe_ipsec_offload_ok - can this packet use the xfrm hw offload 822 - * @skb: current data packet 823 - * @xs: pointer to transformer state struct 824 - **/ 825 - static bool ixgbe_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) 826 - { 827 - if (xs->props.family == AF_INET) { 828 - /* Offload with IPv4 options is not supported yet */ 829 - if (ip_hdr(skb)->ihl != 5) 830 - return false; 831 - } else { 832 - /* Offload with IPv6 extension headers is not support yet */ 833 - if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) 834 - return false; 835 - } 836 - 837 - return true; 838 - } 839 - 840 820 static const struct xfrmdev_ops ixgbe_xfrmdev_ops = { 841 821 .xdo_dev_state_add = ixgbe_ipsec_add_sa, 842 822 .xdo_dev_state_delete = ixgbe_ipsec_del_sa, 843 - .xdo_dev_offload_ok = ixgbe_ipsec_offload_ok, 844 823 }; 845 824 846 825 /**
-21
drivers/net/ethernet/intel/ixgbevf/ipsec.c
··· 428 428 } 429 429 } 430 430 431 - /** 432 - * ixgbevf_ipsec_offload_ok - can this packet use the xfrm hw offload 433 - * @skb: current data packet 434 - * @xs: pointer to transformer state struct 435 - **/ 436 - static bool ixgbevf_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) 437 - { 438 - if (xs->props.family == AF_INET) { 439 - /* Offload with IPv4 options is not supported yet */ 440 - if (ip_hdr(skb)->ihl != 5) 441 - return false; 442 - } else { 443 - /* Offload with IPv6 extension headers is not support yet */ 444 - if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) 445 - return false; 446 - } 447 - 448 - return true; 449 - } 450 - 451 431 static const struct xfrmdev_ops ixgbevf_xfrmdev_ops = { 452 432 .xdo_dev_state_add = ixgbevf_ipsec_add_sa, 453 433 .xdo_dev_state_delete = ixgbevf_ipsec_del_sa, 454 - .xdo_dev_offload_ok = ixgbevf_ipsec_offload_ok, 455 434 }; 456 435 457 436 /**
-15
drivers/net/ethernet/marvell/octeontx2/nic/cn10k_ipsec.c
··· 744 744 queue_work(pf->ipsec.sa_workq, &pf->ipsec.sa_work); 745 745 } 746 746 747 - static bool cn10k_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 748 - { 749 - if (x->props.family == AF_INET) { 750 - /* Offload with IPv4 options is not supported yet */ 751 - if (ip_hdr(skb)->ihl > 5) 752 - return false; 753 - } else { 754 - /* Offload with IPv6 extension headers is not support yet */ 755 - if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) 756 - return false; 757 - } 758 - return true; 759 - } 760 - 761 747 static const struct xfrmdev_ops cn10k_ipsec_xfrmdev_ops = { 762 748 .xdo_dev_state_add = cn10k_ipsec_add_state, 763 749 .xdo_dev_state_delete = cn10k_ipsec_del_state, 764 - .xdo_dev_offload_ok = cn10k_ipsec_offload_ok, 765 750 }; 766 751 767 752 static void cn10k_ipsec_sa_wq_handler(struct work_struct *work)
-16
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
··· 966 966 priv->ipsec = NULL; 967 967 } 968 968 969 - static bool mlx5e_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 970 - { 971 - if (x->props.family == AF_INET) { 972 - /* Offload with IPv4 options is not supported yet */ 973 - if (ip_hdr(skb)->ihl > 5) 974 - return false; 975 - } else { 976 - /* Offload with IPv6 extension headers is not support yet */ 977 - if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) 978 - return false; 979 - } 980 - 981 - return true; 982 - } 983 - 984 969 static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x) 985 970 { 986 971 struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); ··· 1232 1247 .xdo_dev_state_add = mlx5e_xfrm_add_state, 1233 1248 .xdo_dev_state_delete = mlx5e_xfrm_del_state, 1234 1249 .xdo_dev_state_free = mlx5e_xfrm_free_state, 1235 - .xdo_dev_offload_ok = mlx5e_ipsec_offload_ok, 1236 1250 .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state, 1237 1251 1238 1252 .xdo_dev_state_update_stats = mlx5e_xfrm_update_stats,
-11
drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
··· 565 565 xa_erase(&nn->xa_ipsec, x->xso.offload_handle - 1); 566 566 } 567 567 568 - static bool nfp_net_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 569 - { 570 - if (x->props.family == AF_INET) 571 - /* Offload with IPv4 options is not supported yet */ 572 - return ip_hdr(skb)->ihl == 5; 573 - 574 - /* Offload with IPv6 extension headers is not support yet */ 575 - return !(ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)); 576 - } 577 - 578 568 static const struct xfrmdev_ops nfp_net_ipsec_xfrmdev_ops = { 579 569 .xdo_dev_state_add = nfp_net_xfrm_add_state, 580 570 .xdo_dev_state_delete = nfp_net_xfrm_del_state, 581 - .xdo_dev_offload_ok = nfp_net_ipsec_offload_ok, 582 571 }; 583 572 584 573 void nfp_net_ipsec_init(struct nfp_net *nn)
-11
drivers/net/netdevsim/ipsec.c
··· 217 217 ipsec->count--; 218 218 } 219 219 220 - static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) 221 - { 222 - struct netdevsim *ns = netdev_priv(xs->xso.real_dev); 223 - struct nsim_ipsec *ipsec = &ns->ipsec; 224 - 225 - ipsec->ok++; 226 - 227 - return true; 228 - } 229 - 230 220 static const struct xfrmdev_ops nsim_xfrmdev_ops = { 231 221 .xdo_dev_state_add = nsim_ipsec_add_sa, 232 222 .xdo_dev_state_delete = nsim_ipsec_del_sa, 233 - .xdo_dev_offload_ok = nsim_ipsec_offload_ok, 234 223 }; 235 224 236 225 bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
-1
drivers/net/netdevsim/netdevsim.h
··· 54 54 struct dentry *pfile; 55 55 u32 count; 56 56 u32 tx; 57 - u32 ok; 58 57 }; 59 58 60 59 #define NSIM_MACSEC_MAX_SECY_COUNT 3
+19 -2
include/net/xfrm.h
··· 464 464 465 465 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family); 466 466 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family); 467 + void xfrm_set_type_offload(struct xfrm_state *x); 468 + static inline void xfrm_unset_type_offload(struct xfrm_state *x) 469 + { 470 + if (!x->type_offload) 471 + return; 472 + 473 + module_put(x->type_offload->owner); 474 + x->type_offload = NULL; 475 + } 467 476 468 477 /** 469 478 * struct xfrm_mode_cbs - XFRM mode callbacks ··· 1769 1760 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); 1770 1761 int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack); 1771 1762 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu); 1772 - int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, 1773 - struct netlink_ext_ack *extack); 1763 + int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack); 1774 1764 int xfrm_init_state(struct xfrm_state *x); 1775 1765 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); 1776 1766 int xfrm_input_resume(struct sk_buff *skb, int nexthdr); ··· 1781 1773 struct sk_buff *)); 1782 1774 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err); 1783 1775 int xfrm_output(struct sock *sk, struct sk_buff *skb); 1776 + int xfrm4_tunnel_check_size(struct sk_buff *skb); 1777 + #if IS_ENABLED(CONFIG_IPV6) 1778 + int xfrm6_tunnel_check_size(struct sk_buff *skb); 1779 + #else 1780 + static inline int xfrm6_tunnel_check_size(struct sk_buff *skb) 1781 + { 1782 + return -EMSGSIZE; 1783 + } 1784 + #endif 1784 1785 1785 1786 #if IS_ENABLED(CONFIG_NET_PKTGEN) 1786 1787 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
+34 -12
net/xfrm/xfrm_device.c
··· 244 244 xfrm_address_t *daddr; 245 245 bool is_packet_offload; 246 246 247 - if (!x->type_offload) { 248 - NL_SET_ERR_MSG(extack, "Type doesn't support offload"); 249 - return -EINVAL; 250 - } 251 - 252 247 if (xuo->flags & 253 248 ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) { 254 249 NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request"); ··· 305 310 return -EINVAL; 306 311 } 307 312 313 + xfrm_set_type_offload(x); 314 + if (!x->type_offload) { 315 + NL_SET_ERR_MSG(extack, "Type doesn't support offload"); 316 + dev_put(dev); 317 + return -EINVAL; 318 + } 319 + 308 320 xso->dev = dev; 309 321 netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC); 310 322 xso->real_dev = dev; ··· 334 332 netdev_put(dev, &xso->dev_tracker); 335 333 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; 336 334 335 + xfrm_unset_type_offload(x); 337 336 /* User explicitly requested packet offload mode and configured 338 337 * policy in addition to the XFRM state. So be civil to users, 339 338 * and return an error instead of taking fallback path. ··· 418 415 struct dst_entry *dst = skb_dst(skb); 419 416 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 420 417 struct net_device *dev = x->xso.dev; 418 + bool check_tunnel_size; 421 419 422 - if (!x->type_offload || 423 - (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap)) 420 + if (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED) 424 421 return false; 425 422 426 - if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET || 427 - ((!dev || (dev == xfrm_dst_path(dst)->dev)) && 428 - !xdst->child->xfrm)) { 423 + if ((dev == xfrm_dst_path(dst)->dev) && !xdst->child->xfrm) { 429 424 mtu = xfrm_state_mtu(x, xdst->child_mtu_cached); 430 425 if (skb->len <= mtu) 431 426 goto ok; ··· 435 434 return false; 436 435 437 436 ok: 438 - if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok) 439 - return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); 437 + check_tunnel_size = x->xso.type == XFRM_DEV_OFFLOAD_PACKET && 438 + x->props.mode == XFRM_MODE_TUNNEL; 439 + switch (x->props.family) { 440 + case AF_INET: 441 + /* Check for IPv4 options */ 442 + if (ip_hdr(skb)->ihl != 5) 443 + return false; 444 + if (check_tunnel_size && xfrm4_tunnel_check_size(skb)) 445 + return false; 446 + break; 447 + case AF_INET6: 448 + /* Check for IPv6 extensions */ 449 + if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr)) 450 + return false; 451 + if (check_tunnel_size && xfrm6_tunnel_check_size(skb)) 452 + return false; 453 + break; 454 + default: 455 + break; 456 + } 457 + 458 + if (dev->xfrmdev_ops->xdo_dev_offload_ok) 459 + return dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x); 440 460 441 461 return true; 442 462 }
+4 -2
net/xfrm/xfrm_output.c
··· 827 827 } 828 828 EXPORT_SYMBOL_GPL(xfrm_output); 829 829 830 - static int xfrm4_tunnel_check_size(struct sk_buff *skb) 830 + int xfrm4_tunnel_check_size(struct sk_buff *skb) 831 831 { 832 832 int mtu, ret = 0; 833 833 ··· 853 853 out: 854 854 return ret; 855 855 } 856 + EXPORT_SYMBOL_GPL(xfrm4_tunnel_check_size); 856 857 857 858 static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb) 858 859 { ··· 876 875 } 877 876 878 877 #if IS_ENABLED(CONFIG_IPV6) 879 - static int xfrm6_tunnel_check_size(struct sk_buff *skb) 878 + int xfrm6_tunnel_check_size(struct sk_buff *skb) 880 879 { 881 880 int mtu, ret = 0; 882 881 struct dst_entry *dst = skb_dst(skb); ··· 906 905 out: 907 906 return ret; 908 907 } 908 + EXPORT_SYMBOL_GPL(xfrm6_tunnel_check_size); 909 909 #endif 910 910 911 911 static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
+1 -1
net/xfrm/xfrm_policy.c
··· 3294 3294 3295 3295 ok: 3296 3296 xfrm_pols_put(pols, drop_pols); 3297 - if (dst && dst->xfrm && 3297 + if (dst->xfrm && 3298 3298 (dst->xfrm->props.mode == XFRM_MODE_TUNNEL || 3299 3299 dst->xfrm->props.mode == XFRM_MODE_IPTFS)) 3300 3300 dst->flags |= DST_XFRM_TUNNEL;
+24 -30
net/xfrm/xfrm_state.c
··· 424 424 } 425 425 EXPORT_SYMBOL(xfrm_unregister_type_offload); 426 426 427 - static const struct xfrm_type_offload * 428 - xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load) 427 + void xfrm_set_type_offload(struct xfrm_state *x) 429 428 { 430 429 const struct xfrm_type_offload *type = NULL; 431 430 struct xfrm_state_afinfo *afinfo; 431 + bool try_load = true; 432 432 433 433 retry: 434 - afinfo = xfrm_state_get_afinfo(family); 434 + afinfo = xfrm_state_get_afinfo(x->props.family); 435 435 if (unlikely(afinfo == NULL)) 436 - return NULL; 436 + goto out; 437 437 438 - switch (proto) { 438 + switch (x->id.proto) { 439 439 case IPPROTO_ESP: 440 440 type = afinfo->type_offload_esp; 441 441 break; ··· 449 449 rcu_read_unlock(); 450 450 451 451 if (!type && try_load) { 452 - request_module("xfrm-offload-%d-%d", family, proto); 452 + request_module("xfrm-offload-%d-%d", x->props.family, 453 + x->id.proto); 453 454 try_load = false; 454 455 goto retry; 455 456 } 456 457 457 - return type; 458 + out: 459 + x->type_offload = type; 458 460 } 459 - 460 - static void xfrm_put_type_offload(const struct xfrm_type_offload *type) 461 - { 462 - module_put(type->owner); 463 - } 461 + EXPORT_SYMBOL(xfrm_set_type_offload); 464 462 465 463 static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = { 466 464 [XFRM_MODE_BEET] = { ··· 607 609 kfree(x->coaddr); 608 610 kfree(x->replay_esn); 609 611 kfree(x->preplay_esn); 610 - if (x->type_offload) 611 - xfrm_put_type_offload(x->type_offload); 612 612 if (x->type) { 613 613 x->type->destructor(x); 614 614 xfrm_put_type(x->type); ··· 779 783 { 780 784 struct xfrm_dev_offload *xso = &x->xso; 781 785 struct net_device *dev = READ_ONCE(xso->dev); 786 + 787 + xfrm_unset_type_offload(x); 782 788 783 789 if (dev && dev->xfrmdev_ops) { 784 790 spin_lock_bh(&xfrm_state_dev_gc_lock); ··· 2313 2315 struct xfrm_hash_state_ptrs state_ptrs; 2314 2316 struct xfrm_state *x; 2315 2317 2316 - spin_lock_bh(&net->xfrm.xfrm_state_lock); 2318 + rcu_read_lock(); 2317 2319 2318 2320 xfrm_hash_ptrs_get(net, &state_ptrs); 2319 2321 2320 2322 x = __xfrm_state_lookup_byaddr(&state_ptrs, mark, daddr, saddr, proto, family); 2321 - spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2323 + rcu_read_unlock(); 2322 2324 return x; 2323 2325 } 2324 2326 EXPORT_SYMBOL(xfrm_state_lookup_byaddr); ··· 3120 3122 } 3121 3123 EXPORT_SYMBOL_GPL(xfrm_state_mtu); 3122 3124 3123 - int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, 3124 - struct netlink_ext_ack *extack) 3125 + int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack) 3125 3126 { 3126 3127 const struct xfrm_mode *inner_mode; 3127 3128 const struct xfrm_mode *outer_mode; ··· 3175 3178 goto error; 3176 3179 } 3177 3180 3178 - x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload); 3179 - 3180 3181 err = x->type->init_state(x, extack); 3181 3182 if (err) 3182 3183 goto error; ··· 3187 3192 } 3188 3193 3189 3194 x->outer_mode = *outer_mode; 3190 - if (init_replay) { 3191 - err = xfrm_init_replay(x, extack); 3192 - if (err) 3193 - goto error; 3194 - } 3195 - 3196 3195 if (x->nat_keepalive_interval) { 3197 3196 if (x->dir != XFRM_SA_DIR_OUT) { 3198 3197 NL_SET_ERR_MSG(extack, "NAT keepalive is only supported for outbound SAs"); ··· 3218 3229 { 3219 3230 int err; 3220 3231 3221 - err = __xfrm_init_state(x, true, false, NULL); 3222 - if (!err) 3223 - x->km.state = XFRM_STATE_VALID; 3232 + err = __xfrm_init_state(x, NULL); 3233 + if (err) 3234 + return err; 3224 3235 3225 - return err; 3236 + err = xfrm_init_replay(x, NULL); 3237 + if (err) 3238 + return err; 3239 + 3240 + x->km.state = XFRM_STATE_VALID; 3241 + return 0; 3226 3242 } 3227 3243 3228 3244 EXPORT_SYMBOL(xfrm_init_state);
+13 -1
net/xfrm/xfrm_user.c
··· 178 178 "Replay seq and seq_hi should be 0 for output SA"); 179 179 return -EINVAL; 180 180 } 181 + if (rs->oseq_hi && !(p->flags & XFRM_STATE_ESN)) { 182 + NL_SET_ERR_MSG( 183 + extack, 184 + "Replay oseq_hi should be 0 in non-ESN mode for output SA"); 185 + return -EINVAL; 186 + } 181 187 if (rs->bmp_len) { 182 188 NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA"); 183 189 return -EINVAL; ··· 194 188 if (rs->oseq || rs->oseq_hi) { 195 189 NL_SET_ERR_MSG(extack, 196 190 "Replay oseq and oseq_hi should be 0 for input SA"); 191 + return -EINVAL; 192 + } 193 + if (rs->seq_hi && !(p->flags & XFRM_STATE_ESN)) { 194 + NL_SET_ERR_MSG( 195 + extack, 196 + "Replay seq_hi should be 0 in non-ESN mode for input SA"); 197 197 return -EINVAL; 198 198 } 199 199 } ··· 919 907 goto error; 920 908 } 921 909 922 - err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV], extack); 910 + err = __xfrm_init_state(x, extack); 923 911 if (err) 924 912 goto error; 925 913