Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

udp: add udp gso

Implement generic segmentation offload support for udp datagrams. A
follow-up patch adds support to the protocol stack to generate such
packets.

UDP GSO is not UFO. UFO fragments a single large datagram. GSO splits
a large payload into a number of discrete UDP datagrams.

The implementation adds a GSO type SKB_UDP_GSO_L4 to differentiate it
from UFO (SKB_UDP_GSO).

IPPROTO_UDPLITE is excluded, as that protocol has no gso handler
registered.

[ Export __udp_gso_segment for ipv6. -DaveM ]

Signed-off-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Willem de Bruijn and committed by
David S. Miller
ee80d1eb 1cd7884d

+82 -4
+2
include/linux/skbuff.h
··· 573 573 SKB_GSO_ESP = 1 << 15, 574 574 575 575 SKB_GSO_UDP = 1 << 16, 576 + 577 + SKB_GSO_UDP_L4 = 1 << 17, 576 578 }; 577 579 578 580 #if BITS_PER_LONG > 32
+4
include/net/udp.h
··· 174 174 struct udphdr *uh, udp_lookup_t lookup); 175 175 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup); 176 176 177 + struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, 178 + netdev_features_t features, 179 + unsigned int mss, __sum16 check); 180 + 177 181 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb) 178 182 { 179 183 struct udphdr *uh;
+2
net/core/skbuff.c
··· 4940 4940 thlen = tcp_hdrlen(skb); 4941 4941 } else if (unlikely(skb_is_gso_sctp(skb))) { 4942 4942 thlen = sizeof(struct sctphdr); 4943 + } else if (shinfo->gso_type & SKB_GSO_UDP_L4) { 4944 + thlen = sizeof(struct udphdr); 4943 4945 } 4944 4946 /* UFO sets gso_size to the size of the fragmentation 4945 4947 * payload, i.e. the size of the L4 (UDP) header is already
+52 -1
net/ipv4/udp_offload.c
··· 187 187 } 188 188 EXPORT_SYMBOL(skb_udp_tunnel_segment); 189 189 190 + struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, 191 + netdev_features_t features, 192 + unsigned int mss, __sum16 check) 193 + { 194 + struct sk_buff *segs, *seg; 195 + unsigned int hdrlen; 196 + struct udphdr *uh; 197 + 198 + if (gso_skb->len <= sizeof(*uh) + mss) 199 + return ERR_PTR(-EINVAL); 200 + 201 + hdrlen = gso_skb->data - skb_mac_header(gso_skb); 202 + skb_pull(gso_skb, sizeof(*uh)); 203 + 204 + segs = skb_segment(gso_skb, features); 205 + if (unlikely(IS_ERR_OR_NULL(segs))) 206 + return segs; 207 + 208 + for (seg = segs; seg; seg = seg->next) { 209 + uh = udp_hdr(seg); 210 + uh->len = htons(seg->len - hdrlen); 211 + uh->check = check; 212 + 213 + /* last packet can be partial gso_size */ 214 + if (!seg->next) 215 + csum_replace2(&uh->check, htons(mss), 216 + htons(seg->len - hdrlen - sizeof(*uh))); 217 + } 218 + 219 + return segs; 220 + } 221 + EXPORT_SYMBOL_GPL(__udp_gso_segment); 222 + 223 + static struct sk_buff *__udp4_gso_segment(struct sk_buff *gso_skb, 224 + netdev_features_t features) 225 + { 226 + const struct iphdr *iph = ip_hdr(gso_skb); 227 + unsigned int mss = skb_shinfo(gso_skb)->gso_size; 228 + 229 + if (!can_checksum_protocol(features, htons(ETH_P_IP))) 230 + return ERR_PTR(-EIO); 231 + 232 + return __udp_gso_segment(gso_skb, features, mss, 233 + udp_v4_check(sizeof(struct udphdr) + mss, 234 + iph->saddr, iph->daddr, 0)); 235 + } 236 + EXPORT_SYMBOL_GPL(__udp4_gso_segment); 237 + 190 238 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, 191 239 netdev_features_t features) 192 240 { ··· 251 203 goto out; 252 204 } 253 205 254 - if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP)) 206 + if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4))) 255 207 goto out; 256 208 257 209 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 258 210 goto out; 211 + 212 + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 213 + return __udp4_gso_segment(skb, features); 259 214 260 215 mss = skb_shinfo(skb)->gso_size; 261 216 if (unlikely(skb->len <= mss))
+4 -2
net/ipv6/ip6_offload.c
··· 88 88 89 89 if (skb->encapsulation && 90 90 skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6)) 91 - udpfrag = proto == IPPROTO_UDP && encap; 91 + udpfrag = proto == IPPROTO_UDP && encap && 92 + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 92 93 else 93 - udpfrag = proto == IPPROTO_UDP && !skb->encapsulation; 94 + udpfrag = proto == IPPROTO_UDP && !skb->encapsulation && 95 + (skb_shinfo(skb)->gso_type & SKB_GSO_UDP); 94 96 95 97 ops = rcu_dereference(inet6_offloads[proto]); 96 98 if (likely(ops && ops->callbacks.gso_segment)) {
+18 -1
net/ipv6/udp_offload.c
··· 17 17 #include <net/ip6_checksum.h> 18 18 #include "ip6_offload.h" 19 19 20 + static struct sk_buff *__udp6_gso_segment(struct sk_buff *gso_skb, 21 + netdev_features_t features) 22 + { 23 + const struct ipv6hdr *ip6h = ipv6_hdr(gso_skb); 24 + unsigned int mss = skb_shinfo(gso_skb)->gso_size; 25 + 26 + if (!can_checksum_protocol(features, htons(ETH_P_IPV6))) 27 + return ERR_PTR(-EIO); 28 + 29 + return __udp_gso_segment(gso_skb, features, mss, 30 + udp_v6_check(sizeof(struct udphdr) + mss, 31 + &ip6h->saddr, &ip6h->daddr, 0)); 32 + } 33 + 20 34 static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, 21 35 netdev_features_t features) 22 36 { ··· 56 42 const struct ipv6hdr *ipv6h; 57 43 struct udphdr *uh; 58 44 59 - if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP)) 45 + if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4))) 60 46 goto out; 61 47 62 48 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 63 49 goto out; 50 + 51 + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 52 + return __udp6_gso_segment(skb, features); 64 53 65 54 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot 66 55 * do checksum of UDP packets sent as multiple IP fragments.