Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * IPV6 GSO/GRO offload support
4 * Linux INET6 implementation
5 */
6
7#include <linux/kernel.h>
8#include <linux/socket.h>
9#include <linux/netdevice.h>
10#include <linux/skbuff.h>
11#include <linux/printk.h>
12
13#include <net/protocol.h>
14#include <net/ipv6.h>
15#include <net/inet_common.h>
16#include <net/tcp.h>
17#include <net/udp.h>
18#include <net/gro.h>
19
20#include "ip6_offload.h"
21
22/* All GRO functions are always builtin, except UDP over ipv6, which lays in
23 * ipv6 module, as it depends on UDPv6 lookup function, so we need special care
24 * when ipv6 is built as a module
25 */
26#if IS_BUILTIN(CONFIG_IPV6)
27#define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__)
28#else
29#define INDIRECT_CALL_L4(f, f2, f1, ...) INDIRECT_CALL_1(f, f2, __VA_ARGS__)
30#endif
31
32#define indirect_call_gro_receive_l4(f2, f1, cb, head, skb) \
33({ \
34 unlikely(gro_recursion_inc_test(skb)) ? \
35 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
36 INDIRECT_CALL_L4(cb, f2, f1, head, skb); \
37})
38
39static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
40{
41 const struct net_offload *ops = NULL;
42
43 for (;;) {
44 struct ipv6_opt_hdr *opth;
45 int len;
46
47 if (proto != NEXTHDR_HOP) {
48 ops = rcu_dereference(inet6_offloads[proto]);
49
50 if (unlikely(!ops))
51 break;
52
53 if (!(ops->flags & INET6_PROTO_GSO_EXTHDR))
54 break;
55 }
56
57 if (unlikely(!pskb_may_pull(skb, 8)))
58 break;
59
60 opth = (void *)skb->data;
61 len = ipv6_optlen(opth);
62
63 if (unlikely(!pskb_may_pull(skb, len)))
64 break;
65
66 opth = (void *)skb->data;
67 proto = opth->nexthdr;
68 __skb_pull(skb, len);
69 }
70
71 return proto;
72}
73
74static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
75 netdev_features_t features)
76{
77 struct sk_buff *segs = ERR_PTR(-EINVAL);
78 struct ipv6hdr *ipv6h;
79 const struct net_offload *ops;
80 int proto;
81 struct frag_hdr *fptr;
82 unsigned int payload_len;
83 u8 *prevhdr;
84 int offset = 0;
85 bool encap, udpfrag;
86 int nhoff;
87 bool gso_partial;
88
89 skb_reset_network_header(skb);
90 nhoff = skb_network_header(skb) - skb_mac_header(skb);
91 if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
92 goto out;
93
94 encap = SKB_GSO_CB(skb)->encap_level > 0;
95 if (encap)
96 features &= skb->dev->hw_enc_features;
97 SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
98
99 ipv6h = ipv6_hdr(skb);
100 __skb_pull(skb, sizeof(*ipv6h));
101 segs = ERR_PTR(-EPROTONOSUPPORT);
102
103 proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
104
105 if (skb->encapsulation &&
106 skb_shinfo(skb)->gso_type & (SKB_GSO_IPXIP4 | SKB_GSO_IPXIP6))
107 udpfrag = proto == IPPROTO_UDP && encap &&
108 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
109 else
110 udpfrag = proto == IPPROTO_UDP && !skb->encapsulation &&
111 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
112
113 ops = rcu_dereference(inet6_offloads[proto]);
114 if (likely(ops && ops->callbacks.gso_segment)) {
115 skb_reset_transport_header(skb);
116 segs = ops->callbacks.gso_segment(skb, features);
117 if (!segs)
118 skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
119 }
120
121 if (IS_ERR_OR_NULL(segs))
122 goto out;
123
124 gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
125
126 for (skb = segs; skb; skb = skb->next) {
127 ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
128 if (gso_partial && skb_is_gso(skb))
129 payload_len = skb_shinfo(skb)->gso_size +
130 SKB_GSO_CB(skb)->data_offset +
131 skb->head - (unsigned char *)(ipv6h + 1);
132 else
133 payload_len = skb->len - nhoff - sizeof(*ipv6h);
134 ipv6h->payload_len = htons(payload_len);
135 skb->network_header = (u8 *)ipv6h - skb->head;
136 skb_reset_mac_len(skb);
137
138 if (udpfrag) {
139 int err = ip6_find_1stfragopt(skb, &prevhdr);
140 if (err < 0) {
141 kfree_skb_list(segs);
142 return ERR_PTR(err);
143 }
144 fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
145 fptr->frag_off = htons(offset);
146 if (skb->next)
147 fptr->frag_off |= htons(IP6_MF);
148 offset += (ntohs(ipv6h->payload_len) -
149 sizeof(struct frag_hdr));
150 }
151 if (encap)
152 skb_reset_inner_headers(skb);
153 }
154
155out:
156 return segs;
157}
158
159/* Return the total length of all the extension hdrs, following the same
160 * logic in ipv6_gso_pull_exthdrs() when parsing ext-hdrs.
161 */
162static int ipv6_exthdrs_len(struct ipv6hdr *iph,
163 const struct net_offload **opps)
164{
165 struct ipv6_opt_hdr *opth = (void *)iph;
166 int len = 0, proto, optlen = sizeof(*iph);
167
168 proto = iph->nexthdr;
169 for (;;) {
170 if (proto != NEXTHDR_HOP) {
171 *opps = rcu_dereference(inet6_offloads[proto]);
172 if (unlikely(!(*opps)))
173 break;
174 if (!((*opps)->flags & INET6_PROTO_GSO_EXTHDR))
175 break;
176 }
177 opth = (void *)opth + optlen;
178 optlen = ipv6_optlen(opth);
179 len += optlen;
180 proto = opth->nexthdr;
181 }
182 return len;
183}
184
185INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
186 struct sk_buff *skb)
187{
188 const struct net_offload *ops;
189 struct sk_buff *pp = NULL;
190 struct sk_buff *p;
191 struct ipv6hdr *iph;
192 unsigned int nlen;
193 unsigned int hlen;
194 unsigned int off;
195 u16 flush = 1;
196 int proto;
197
198 off = skb_gro_offset(skb);
199 hlen = off + sizeof(*iph);
200 iph = skb_gro_header_fast(skb, off);
201 if (skb_gro_header_hard(skb, hlen)) {
202 iph = skb_gro_header_slow(skb, hlen, off);
203 if (unlikely(!iph))
204 goto out;
205 }
206
207 skb_set_network_header(skb, off);
208 skb_gro_pull(skb, sizeof(*iph));
209 skb_set_transport_header(skb, skb_gro_offset(skb));
210
211 flush += ntohs(iph->payload_len) != skb_gro_len(skb);
212
213 proto = iph->nexthdr;
214 ops = rcu_dereference(inet6_offloads[proto]);
215 if (!ops || !ops->callbacks.gro_receive) {
216 __pskb_pull(skb, skb_gro_offset(skb));
217 skb_gro_frag0_invalidate(skb);
218 proto = ipv6_gso_pull_exthdrs(skb, proto);
219 skb_gro_pull(skb, -skb_transport_offset(skb));
220 skb_reset_transport_header(skb);
221 __skb_push(skb, skb_gro_offset(skb));
222
223 ops = rcu_dereference(inet6_offloads[proto]);
224 if (!ops || !ops->callbacks.gro_receive)
225 goto out;
226
227 iph = ipv6_hdr(skb);
228 }
229
230 NAPI_GRO_CB(skb)->proto = proto;
231
232 flush--;
233 nlen = skb_network_header_len(skb);
234
235 list_for_each_entry(p, head, list) {
236 const struct ipv6hdr *iph2;
237 __be32 first_word; /* <Version:4><Traffic_Class:8><Flow_Label:20> */
238
239 if (!NAPI_GRO_CB(p)->same_flow)
240 continue;
241
242 iph2 = (struct ipv6hdr *)(p->data + off);
243 first_word = *(__be32 *)iph ^ *(__be32 *)iph2;
244
245 /* All fields must match except length and Traffic Class.
246 * XXX skbs on the gro_list have all been parsed and pulled
247 * already so we don't need to compare nlen
248 * (nlen != (sizeof(*iph2) + ipv6_exthdrs_len(iph2, &ops)))
249 * memcmp() alone below is sufficient, right?
250 */
251 if ((first_word & htonl(0xF00FFFFF)) ||
252 !ipv6_addr_equal(&iph->saddr, &iph2->saddr) ||
253 !ipv6_addr_equal(&iph->daddr, &iph2->daddr) ||
254 *(u16 *)&iph->nexthdr != *(u16 *)&iph2->nexthdr) {
255not_same_flow:
256 NAPI_GRO_CB(p)->same_flow = 0;
257 continue;
258 }
259 if (unlikely(nlen > sizeof(struct ipv6hdr))) {
260 if (memcmp(iph + 1, iph2 + 1,
261 nlen - sizeof(struct ipv6hdr)))
262 goto not_same_flow;
263 }
264 /* flush if Traffic Class fields are different */
265 NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
266 NAPI_GRO_CB(p)->flush |= flush;
267
268 /* If the previous IP ID value was based on an atomic
269 * datagram we can overwrite the value and ignore it.
270 */
271 if (NAPI_GRO_CB(skb)->is_atomic)
272 NAPI_GRO_CB(p)->flush_id = 0;
273 }
274
275 NAPI_GRO_CB(skb)->is_atomic = true;
276 NAPI_GRO_CB(skb)->flush |= flush;
277
278 skb_gro_postpull_rcsum(skb, iph, nlen);
279
280 pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
281 ops->callbacks.gro_receive, head, skb);
282
283out:
284 skb_gro_flush_final(skb, pp, flush);
285
286 return pp;
287}
288
289static struct sk_buff *sit_ip6ip6_gro_receive(struct list_head *head,
290 struct sk_buff *skb)
291{
292 /* Common GRO receive for SIT and IP6IP6 */
293
294 if (NAPI_GRO_CB(skb)->encap_mark) {
295 NAPI_GRO_CB(skb)->flush = 1;
296 return NULL;
297 }
298
299 NAPI_GRO_CB(skb)->encap_mark = 1;
300
301 return ipv6_gro_receive(head, skb);
302}
303
304static struct sk_buff *ip4ip6_gro_receive(struct list_head *head,
305 struct sk_buff *skb)
306{
307 /* Common GRO receive for SIT and IP6IP6 */
308
309 if (NAPI_GRO_CB(skb)->encap_mark) {
310 NAPI_GRO_CB(skb)->flush = 1;
311 return NULL;
312 }
313
314 NAPI_GRO_CB(skb)->encap_mark = 1;
315
316 return inet_gro_receive(head, skb);
317}
318
319INDIRECT_CALLABLE_SCOPE int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
320{
321 const struct net_offload *ops;
322 struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff);
323 int err = -ENOSYS;
324
325 if (skb->encapsulation) {
326 skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6));
327 skb_set_inner_network_header(skb, nhoff);
328 }
329
330 iph->payload_len = htons(skb->len - nhoff - sizeof(*iph));
331
332 nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops);
333 if (WARN_ON(!ops || !ops->callbacks.gro_complete))
334 goto out;
335
336 err = INDIRECT_CALL_L4(ops->callbacks.gro_complete, tcp6_gro_complete,
337 udp6_gro_complete, skb, nhoff);
338
339out:
340 return err;
341}
342
343static int sit_gro_complete(struct sk_buff *skb, int nhoff)
344{
345 skb->encapsulation = 1;
346 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP4;
347 return ipv6_gro_complete(skb, nhoff);
348}
349
350static int ip6ip6_gro_complete(struct sk_buff *skb, int nhoff)
351{
352 skb->encapsulation = 1;
353 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
354 return ipv6_gro_complete(skb, nhoff);
355}
356
357static int ip4ip6_gro_complete(struct sk_buff *skb, int nhoff)
358{
359 skb->encapsulation = 1;
360 skb_shinfo(skb)->gso_type |= SKB_GSO_IPXIP6;
361 return inet_gro_complete(skb, nhoff);
362}
363
364static struct packet_offload ipv6_packet_offload __read_mostly = {
365 .type = cpu_to_be16(ETH_P_IPV6),
366 .callbacks = {
367 .gso_segment = ipv6_gso_segment,
368 .gro_receive = ipv6_gro_receive,
369 .gro_complete = ipv6_gro_complete,
370 },
371};
372
373static struct sk_buff *sit_gso_segment(struct sk_buff *skb,
374 netdev_features_t features)
375{
376 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP4))
377 return ERR_PTR(-EINVAL);
378
379 return ipv6_gso_segment(skb, features);
380}
381
382static struct sk_buff *ip4ip6_gso_segment(struct sk_buff *skb,
383 netdev_features_t features)
384{
385 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
386 return ERR_PTR(-EINVAL);
387
388 return inet_gso_segment(skb, features);
389}
390
391static struct sk_buff *ip6ip6_gso_segment(struct sk_buff *skb,
392 netdev_features_t features)
393{
394 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_IPXIP6))
395 return ERR_PTR(-EINVAL);
396
397 return ipv6_gso_segment(skb, features);
398}
399
400static const struct net_offload sit_offload = {
401 .callbacks = {
402 .gso_segment = sit_gso_segment,
403 .gro_receive = sit_ip6ip6_gro_receive,
404 .gro_complete = sit_gro_complete,
405 },
406};
407
408static const struct net_offload ip4ip6_offload = {
409 .callbacks = {
410 .gso_segment = ip4ip6_gso_segment,
411 .gro_receive = ip4ip6_gro_receive,
412 .gro_complete = ip4ip6_gro_complete,
413 },
414};
415
416static const struct net_offload ip6ip6_offload = {
417 .callbacks = {
418 .gso_segment = ip6ip6_gso_segment,
419 .gro_receive = sit_ip6ip6_gro_receive,
420 .gro_complete = ip6ip6_gro_complete,
421 },
422};
423static int __init ipv6_offload_init(void)
424{
425
426 if (tcpv6_offload_init() < 0)
427 pr_crit("%s: Cannot add TCP protocol offload\n", __func__);
428 if (ipv6_exthdrs_offload_init() < 0)
429 pr_crit("%s: Cannot add EXTHDRS protocol offload\n", __func__);
430
431 dev_add_offload(&ipv6_packet_offload);
432
433 inet_add_offload(&sit_offload, IPPROTO_IPV6);
434 inet6_add_offload(&ip6ip6_offload, IPPROTO_IPV6);
435 inet6_add_offload(&ip4ip6_offload, IPPROTO_IPIP);
436
437 return 0;
438}
439
440fs_initcall(ipv6_offload_init);