Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec

Steffen Klassert says:

====================
pull request (net): ipsec 2022-03-09

1) Fix IPv6 PMTU discovery for xfrm interfaces.
From Lina Wang.

2) Revert failing for policies and states that are
configured with XFRMA_IF_ID 0. It broke a
user configuration. From Kai Lueke.

3) Fix a possible buffer overflow in the ESP output path.

4) Fix ESP GSO for tunnel and BEET mode on inter address
family tunnels.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+70 -23
+2
include/linux/netdevice.h
··· 4602 4602 4603 4603 struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 4604 4604 netdev_features_t features, bool tx_path); 4605 + struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, 4606 + netdev_features_t features, __be16 type); 4605 4607 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb, 4606 4608 netdev_features_t features); 4607 4609
+2
include/net/esp.h
··· 4 4 5 5 #include <linux/skbuff.h> 6 6 7 + #define ESP_SKB_FRAG_MAXSIZE (PAGE_SIZE << SKB_FRAG_PAGE_ORDER) 8 + 7 9 struct ip_esp_hdr; 8 10 9 11 static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
+25
net/core/gro.c
··· 93 93 EXPORT_SYMBOL(dev_remove_offload); 94 94 95 95 /** 96 + * skb_eth_gso_segment - segmentation handler for ethernet protocols. 97 + * @skb: buffer to segment 98 + * @features: features for the output path (see dev->features) 99 + * @type: Ethernet Protocol ID 100 + */ 101 + struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb, 102 + netdev_features_t features, __be16 type) 103 + { 104 + struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); 105 + struct packet_offload *ptype; 106 + 107 + rcu_read_lock(); 108 + list_for_each_entry_rcu(ptype, &offload_base, list) { 109 + if (ptype->type == type && ptype->callbacks.gso_segment) { 110 + segs = ptype->callbacks.gso_segment(skb, features); 111 + break; 112 + } 113 + } 114 + rcu_read_unlock(); 115 + 116 + return segs; 117 + } 118 + EXPORT_SYMBOL(skb_eth_gso_segment); 119 + 120 + /** 96 121 * skb_mac_gso_segment - mac layer segmentation handler. 97 122 * @skb: buffer to segment 98 123 * @features: features for the output path (see dev->features)
+5
net/ipv4/esp4.c
··· 446 446 struct page *page; 447 447 struct sk_buff *trailer; 448 448 int tailen = esp->tailen; 449 + unsigned int allocsz; 449 450 450 451 /* this is non-NULL only with TCP/UDP Encapsulation */ 451 452 if (x->encap) { ··· 455 454 if (err < 0) 456 455 return err; 457 456 } 457 + 458 + allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES); 459 + if (allocsz > ESP_SKB_FRAG_MAXSIZE) 460 + goto cow; 458 461 459 462 if (!skb_cloned(skb)) { 460 463 if (tailen <= skb_tailroom(skb)) {
+4 -2
net/ipv4/esp4_offload.c
··· 110 110 struct sk_buff *skb, 111 111 netdev_features_t features) 112 112 { 113 - __skb_push(skb, skb->mac_len); 114 - return skb_mac_gso_segment(skb, features); 113 + return skb_eth_gso_segment(skb, features, htons(ETH_P_IP)); 115 114 } 116 115 117 116 static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x, ··· 158 159 if (proto == IPPROTO_TCP) 159 160 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 160 161 } 162 + 163 + if (proto == IPPROTO_IPV6) 164 + skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4; 161 165 162 166 __skb_pull(skb, skb_transport_offset(skb)); 163 167 ops = rcu_dereference(inet_offloads[proto]);
+5
net/ipv6/esp6.c
··· 482 482 struct page *page; 483 483 struct sk_buff *trailer; 484 484 int tailen = esp->tailen; 485 + unsigned int allocsz; 485 486 486 487 if (x->encap) { 487 488 int err = esp6_output_encap(x, skb, esp); ··· 490 489 if (err < 0) 491 490 return err; 492 491 } 492 + 493 + allocsz = ALIGN(skb->data_len + tailen, L1_CACHE_BYTES); 494 + if (allocsz > ESP_SKB_FRAG_MAXSIZE) 495 + goto cow; 493 496 494 497 if (!skb_cloned(skb)) { 495 498 if (tailen <= skb_tailroom(skb)) {
+4 -2
net/ipv6/esp6_offload.c
··· 145 145 struct sk_buff *skb, 146 146 netdev_features_t features) 147 147 { 148 - __skb_push(skb, skb->mac_len); 149 - return skb_mac_gso_segment(skb, features); 148 + return skb_eth_gso_segment(skb, features, htons(ETH_P_IPV6)); 150 149 } 151 150 152 151 static struct sk_buff *xfrm6_transport_gso_segment(struct xfrm_state *x, ··· 197 198 skb->transport_header += 198 199 ipv6_skip_exthdr(skb, 0, &proto, &frag); 199 200 } 201 + 202 + if (proto == IPPROTO_IPIP) 203 + skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6; 200 204 201 205 __skb_pull(skb, skb_transport_offset(skb)); 202 206 ops = rcu_dereference(inet6_offloads[proto]);
+16
net/ipv6/xfrm6_output.c
··· 45 45 return xfrm_output(sk, skb); 46 46 } 47 47 48 + static int xfrm6_noneed_fragment(struct sk_buff *skb) 49 + { 50 + struct frag_hdr *fh; 51 + u8 prevhdr = ipv6_hdr(skb)->nexthdr; 52 + 53 + if (prevhdr != NEXTHDR_FRAGMENT) 54 + return 0; 55 + fh = (struct frag_hdr *)(skb->data + sizeof(struct ipv6hdr)); 56 + if (fh->nexthdr == NEXTHDR_ESP || fh->nexthdr == NEXTHDR_AUTH) 57 + return 1; 58 + return 0; 59 + } 60 + 48 61 static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) 49 62 { 50 63 struct dst_entry *dst = skb_dst(skb); ··· 86 73 xfrm6_local_rxpmtu(skb, mtu); 87 74 kfree_skb(skb); 88 75 return -EMSGSIZE; 76 + } else if (toobig && xfrm6_noneed_fragment(skb)) { 77 + skb->ignore_df = 1; 78 + goto skip_frag; 89 79 } else if (!skb->ignore_df && toobig && skb->sk) { 90 80 xfrm_local_error(skb, mtu); 91 81 kfree_skb(skb);
+4 -1
net/xfrm/xfrm_interface.c
··· 304 304 if (mtu < IPV6_MIN_MTU) 305 305 mtu = IPV6_MIN_MTU; 306 306 307 - icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 307 + if (skb->len > 1280) 308 + icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 309 + else 310 + goto xmit; 308 311 } else { 309 312 if (!(ip_hdr(skb)->frag_off & htons(IP_DF))) 310 313 goto xmit;
+3 -18
net/xfrm/xfrm_user.c
··· 630 630 631 631 xfrm_smark_init(attrs, &x->props.smark); 632 632 633 - if (attrs[XFRMA_IF_ID]) { 633 + if (attrs[XFRMA_IF_ID]) 634 634 x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 635 - if (!x->if_id) { 636 - err = -EINVAL; 637 - goto error; 638 - } 639 - } 640 635 641 636 err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]); 642 637 if (err) ··· 1427 1432 1428 1433 mark = xfrm_mark_get(attrs, &m); 1429 1434 1430 - if (attrs[XFRMA_IF_ID]) { 1435 + if (attrs[XFRMA_IF_ID]) 1431 1436 if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 1432 - if (!if_id) { 1433 - err = -EINVAL; 1434 - goto out_noput; 1435 - } 1436 - } 1437 1437 1438 1438 if (p->info.seq) { 1439 1439 x = xfrm_find_acq_byseq(net, mark, p->info.seq); ··· 1741 1751 1742 1752 xfrm_mark_get(attrs, &xp->mark); 1743 1753 1744 - if (attrs[XFRMA_IF_ID]) { 1754 + if (attrs[XFRMA_IF_ID]) 1745 1755 xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]); 1746 - if (!xp->if_id) { 1747 - err = -EINVAL; 1748 - goto error; 1749 - } 1750 - } 1751 1756 1752 1757 return xp; 1753 1758 error: