Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2#include <net/gro.h>
3#include <net/dst_metadata.h>
4#include <net/busy_poll.h>
5#include <trace/events/net.h>
6
7#define MAX_GRO_SKBS 8
8
9/* This should be increased if a protocol with a bigger head is added. */
10#define GRO_MAX_HEAD (MAX_HEADER + 128)
11
12static DEFINE_SPINLOCK(offload_lock);
13static struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
14/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
15int gro_normal_batch __read_mostly = 8;
16
17/**
18 * dev_add_offload - register offload handlers
19 * @po: protocol offload declaration
20 *
21 * Add protocol offload handlers to the networking stack. The passed
22 * &proto_offload is linked into kernel lists and may not be freed until
23 * it has been removed from the kernel lists.
24 *
25 * This call does not sleep therefore it can not
26 * guarantee all CPU's that are in middle of receiving packets
27 * will see the new offload handlers (until the next received packet).
28 */
29void dev_add_offload(struct packet_offload *po)
30{
31 struct packet_offload *elem;
32
33 spin_lock(&offload_lock);
34 list_for_each_entry(elem, &offload_base, list) {
35 if (po->priority < elem->priority)
36 break;
37 }
38 list_add_rcu(&po->list, elem->list.prev);
39 spin_unlock(&offload_lock);
40}
41EXPORT_SYMBOL(dev_add_offload);
42
43/**
44 * __dev_remove_offload - remove offload handler
45 * @po: packet offload declaration
46 *
47 * Remove a protocol offload handler that was previously added to the
48 * kernel offload handlers by dev_add_offload(). The passed &offload_type
49 * is removed from the kernel lists and can be freed or reused once this
50 * function returns.
51 *
52 * The packet type might still be in use by receivers
53 * and must not be freed until after all the CPU's have gone
54 * through a quiescent state.
55 */
56static void __dev_remove_offload(struct packet_offload *po)
57{
58 struct list_head *head = &offload_base;
59 struct packet_offload *po1;
60
61 spin_lock(&offload_lock);
62
63 list_for_each_entry(po1, head, list) {
64 if (po == po1) {
65 list_del_rcu(&po->list);
66 goto out;
67 }
68 }
69
70 pr_warn("dev_remove_offload: %p not found\n", po);
71out:
72 spin_unlock(&offload_lock);
73}
74
75/**
76 * dev_remove_offload - remove packet offload handler
77 * @po: packet offload declaration
78 *
79 * Remove a packet offload handler that was previously added to the kernel
80 * offload handlers by dev_add_offload(). The passed &offload_type is
81 * removed from the kernel lists and can be freed or reused once this
82 * function returns.
83 *
84 * This call sleeps to guarantee that no CPU is looking at the packet
85 * type after return.
86 */
87void dev_remove_offload(struct packet_offload *po)
88{
89 __dev_remove_offload(po);
90
91 synchronize_net();
92}
93EXPORT_SYMBOL(dev_remove_offload);
94
95/**
96 * skb_eth_gso_segment - segmentation handler for ethernet protocols.
97 * @skb: buffer to segment
98 * @features: features for the output path (see dev->features)
99 * @type: Ethernet Protocol ID
100 */
101struct sk_buff *skb_eth_gso_segment(struct sk_buff *skb,
102 netdev_features_t features, __be16 type)
103{
104 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
105 struct packet_offload *ptype;
106
107 rcu_read_lock();
108 list_for_each_entry_rcu(ptype, &offload_base, list) {
109 if (ptype->type == type && ptype->callbacks.gso_segment) {
110 segs = ptype->callbacks.gso_segment(skb, features);
111 break;
112 }
113 }
114 rcu_read_unlock();
115
116 return segs;
117}
118EXPORT_SYMBOL(skb_eth_gso_segment);
119
120/**
121 * skb_mac_gso_segment - mac layer segmentation handler.
122 * @skb: buffer to segment
123 * @features: features for the output path (see dev->features)
124 */
125struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
126 netdev_features_t features)
127{
128 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
129 struct packet_offload *ptype;
130 int vlan_depth = skb->mac_len;
131 __be16 type = skb_network_protocol(skb, &vlan_depth);
132
133 if (unlikely(!type))
134 return ERR_PTR(-EINVAL);
135
136 __skb_pull(skb, vlan_depth);
137
138 rcu_read_lock();
139 list_for_each_entry_rcu(ptype, &offload_base, list) {
140 if (ptype->type == type && ptype->callbacks.gso_segment) {
141 segs = ptype->callbacks.gso_segment(skb, features);
142 break;
143 }
144 }
145 rcu_read_unlock();
146
147 __skb_push(skb, skb->data - skb_mac_header(skb));
148
149 return segs;
150}
151EXPORT_SYMBOL(skb_mac_gso_segment);
152
153int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
154{
155 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
156 unsigned int offset = skb_gro_offset(skb);
157 unsigned int headlen = skb_headlen(skb);
158 unsigned int len = skb_gro_len(skb);
159 unsigned int delta_truesize;
160 unsigned int gro_max_size;
161 unsigned int new_truesize;
162 struct sk_buff *lp;
163
164 /* pairs with WRITE_ONCE() in netif_set_gro_max_size() */
165 gro_max_size = READ_ONCE(p->dev->gro_max_size);
166
167 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
168 return -E2BIG;
169
170 lp = NAPI_GRO_CB(p)->last;
171 pinfo = skb_shinfo(lp);
172
173 if (headlen <= offset) {
174 skb_frag_t *frag;
175 skb_frag_t *frag2;
176 int i = skbinfo->nr_frags;
177 int nr_frags = pinfo->nr_frags + i;
178
179 if (nr_frags > MAX_SKB_FRAGS)
180 goto merge;
181
182 offset -= headlen;
183 pinfo->nr_frags = nr_frags;
184 skbinfo->nr_frags = 0;
185
186 frag = pinfo->frags + nr_frags;
187 frag2 = skbinfo->frags + i;
188 do {
189 *--frag = *--frag2;
190 } while (--i);
191
192 skb_frag_off_add(frag, offset);
193 skb_frag_size_sub(frag, offset);
194
195 /* all fragments truesize : remove (head size + sk_buff) */
196 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
197 delta_truesize = skb->truesize - new_truesize;
198
199 skb->truesize = new_truesize;
200 skb->len -= skb->data_len;
201 skb->data_len = 0;
202
203 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
204 goto done;
205 } else if (skb->head_frag) {
206 int nr_frags = pinfo->nr_frags;
207 skb_frag_t *frag = pinfo->frags + nr_frags;
208 struct page *page = virt_to_head_page(skb->head);
209 unsigned int first_size = headlen - offset;
210 unsigned int first_offset;
211
212 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
213 goto merge;
214
215 first_offset = skb->data -
216 (unsigned char *)page_address(page) +
217 offset;
218
219 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
220
221 __skb_frag_set_page(frag, page);
222 skb_frag_off_set(frag, first_offset);
223 skb_frag_size_set(frag, first_size);
224
225 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
226 /* We dont need to clear skbinfo->nr_frags here */
227
228 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
229 delta_truesize = skb->truesize - new_truesize;
230 skb->truesize = new_truesize;
231 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
232 goto done;
233 }
234
235merge:
236 /* sk owenrship - if any - completely transferred to the aggregated packet */
237 skb->destructor = NULL;
238 delta_truesize = skb->truesize;
239 if (offset > headlen) {
240 unsigned int eat = offset - headlen;
241
242 skb_frag_off_add(&skbinfo->frags[0], eat);
243 skb_frag_size_sub(&skbinfo->frags[0], eat);
244 skb->data_len -= eat;
245 skb->len -= eat;
246 offset = headlen;
247 }
248
249 __skb_pull(skb, offset);
250
251 if (NAPI_GRO_CB(p)->last == p)
252 skb_shinfo(p)->frag_list = skb;
253 else
254 NAPI_GRO_CB(p)->last->next = skb;
255 NAPI_GRO_CB(p)->last = skb;
256 __skb_header_release(skb);
257 lp = p;
258
259done:
260 NAPI_GRO_CB(p)->count++;
261 p->data_len += len;
262 p->truesize += delta_truesize;
263 p->len += len;
264 if (lp != p) {
265 lp->data_len += len;
266 lp->truesize += delta_truesize;
267 lp->len += len;
268 }
269 NAPI_GRO_CB(skb)->same_flow = 1;
270 return 0;
271}
272
273
274static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
275{
276 struct packet_offload *ptype;
277 __be16 type = skb->protocol;
278 struct list_head *head = &offload_base;
279 int err = -ENOENT;
280
281 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
282
283 if (NAPI_GRO_CB(skb)->count == 1) {
284 skb_shinfo(skb)->gso_size = 0;
285 goto out;
286 }
287
288 rcu_read_lock();
289 list_for_each_entry_rcu(ptype, head, list) {
290 if (ptype->type != type || !ptype->callbacks.gro_complete)
291 continue;
292
293 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
294 ipv6_gro_complete, inet_gro_complete,
295 skb, 0);
296 break;
297 }
298 rcu_read_unlock();
299
300 if (err) {
301 WARN_ON(&ptype->list == head);
302 kfree_skb(skb);
303 return;
304 }
305
306out:
307 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
308}
309
310static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
311 bool flush_old)
312{
313 struct list_head *head = &napi->gro_hash[index].list;
314 struct sk_buff *skb, *p;
315
316 list_for_each_entry_safe_reverse(skb, p, head, list) {
317 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
318 return;
319 skb_list_del_init(skb);
320 napi_gro_complete(napi, skb);
321 napi->gro_hash[index].count--;
322 }
323
324 if (!napi->gro_hash[index].count)
325 __clear_bit(index, &napi->gro_bitmask);
326}
327
328/* napi->gro_hash[].list contains packets ordered by age.
329 * youngest packets at the head of it.
330 * Complete skbs in reverse order to reduce latencies.
331 */
332void napi_gro_flush(struct napi_struct *napi, bool flush_old)
333{
334 unsigned long bitmask = napi->gro_bitmask;
335 unsigned int i, base = ~0U;
336
337 while ((i = ffs(bitmask)) != 0) {
338 bitmask >>= i;
339 base += i;
340 __napi_gro_flush_chain(napi, base, flush_old);
341 }
342}
343EXPORT_SYMBOL(napi_gro_flush);
344
345static void gro_list_prepare(const struct list_head *head,
346 const struct sk_buff *skb)
347{
348 unsigned int maclen = skb->dev->hard_header_len;
349 u32 hash = skb_get_hash_raw(skb);
350 struct sk_buff *p;
351
352 list_for_each_entry(p, head, list) {
353 unsigned long diffs;
354
355 NAPI_GRO_CB(p)->flush = 0;
356
357 if (hash != skb_get_hash_raw(p)) {
358 NAPI_GRO_CB(p)->same_flow = 0;
359 continue;
360 }
361
362 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
363 diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb);
364 if (skb_vlan_tag_present(p))
365 diffs |= skb_vlan_tag_get(p) ^ skb_vlan_tag_get(skb);
366 diffs |= skb_metadata_differs(p, skb);
367 if (maclen == ETH_HLEN)
368 diffs |= compare_ether_header(skb_mac_header(p),
369 skb_mac_header(skb));
370 else if (!diffs)
371 diffs = memcmp(skb_mac_header(p),
372 skb_mac_header(skb),
373 maclen);
374
375 /* in most common scenarions 'slow_gro' is 0
376 * otherwise we are already on some slower paths
377 * either skip all the infrequent tests altogether or
378 * avoid trying too hard to skip each of them individually
379 */
380 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
381#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
382 struct tc_skb_ext *skb_ext;
383 struct tc_skb_ext *p_ext;
384#endif
385
386 diffs |= p->sk != skb->sk;
387 diffs |= skb_metadata_dst_cmp(p, skb);
388 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
389
390#if IS_ENABLED(CONFIG_SKB_EXTENSIONS) && IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
391 skb_ext = skb_ext_find(skb, TC_SKB_EXT);
392 p_ext = skb_ext_find(p, TC_SKB_EXT);
393
394 diffs |= (!!p_ext) ^ (!!skb_ext);
395 if (!diffs && unlikely(skb_ext))
396 diffs |= p_ext->chain ^ skb_ext->chain;
397#endif
398 }
399
400 NAPI_GRO_CB(p)->same_flow = !diffs;
401 }
402}
403
404static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
405{
406 const struct skb_shared_info *pinfo = skb_shinfo(skb);
407 const skb_frag_t *frag0 = &pinfo->frags[0];
408
409 NAPI_GRO_CB(skb)->data_offset = 0;
410 NAPI_GRO_CB(skb)->frag0 = NULL;
411 NAPI_GRO_CB(skb)->frag0_len = 0;
412
413 if (!skb_headlen(skb) && pinfo->nr_frags &&
414 !PageHighMem(skb_frag_page(frag0)) &&
415 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
416 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
417 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
418 skb_frag_size(frag0),
419 skb->end - skb->tail);
420 }
421}
422
423static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
424{
425 struct skb_shared_info *pinfo = skb_shinfo(skb);
426
427 BUG_ON(skb->end - skb->tail < grow);
428
429 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
430
431 skb->data_len -= grow;
432 skb->tail += grow;
433
434 skb_frag_off_add(&pinfo->frags[0], grow);
435 skb_frag_size_sub(&pinfo->frags[0], grow);
436
437 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
438 skb_frag_unref(skb, 0);
439 memmove(pinfo->frags, pinfo->frags + 1,
440 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
441 }
442}
443
444static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
445{
446 struct sk_buff *oldest;
447
448 oldest = list_last_entry(head, struct sk_buff, list);
449
450 /* We are called with head length >= MAX_GRO_SKBS, so this is
451 * impossible.
452 */
453 if (WARN_ON_ONCE(!oldest))
454 return;
455
456 /* Do not adjust napi->gro_hash[].count, caller is adding a new
457 * SKB to the chain.
458 */
459 skb_list_del_init(oldest);
460 napi_gro_complete(napi, oldest);
461}
462
463static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
464{
465 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
466 struct gro_list *gro_list = &napi->gro_hash[bucket];
467 struct list_head *head = &offload_base;
468 struct packet_offload *ptype;
469 __be16 type = skb->protocol;
470 struct sk_buff *pp = NULL;
471 enum gro_result ret;
472 int same_flow;
473 int grow;
474
475 if (netif_elide_gro(skb->dev))
476 goto normal;
477
478 gro_list_prepare(&gro_list->list, skb);
479
480 rcu_read_lock();
481 list_for_each_entry_rcu(ptype, head, list) {
482 if (ptype->type != type || !ptype->callbacks.gro_receive)
483 continue;
484
485 skb_set_network_header(skb, skb_gro_offset(skb));
486 skb_reset_mac_len(skb);
487 NAPI_GRO_CB(skb)->same_flow = 0;
488 NAPI_GRO_CB(skb)->flush = skb_is_gso(skb) || skb_has_frag_list(skb);
489 NAPI_GRO_CB(skb)->free = 0;
490 NAPI_GRO_CB(skb)->encap_mark = 0;
491 NAPI_GRO_CB(skb)->recursion_counter = 0;
492 NAPI_GRO_CB(skb)->is_fou = 0;
493 NAPI_GRO_CB(skb)->is_atomic = 1;
494 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
495
496 /* Setup for GRO checksum validation */
497 switch (skb->ip_summed) {
498 case CHECKSUM_COMPLETE:
499 NAPI_GRO_CB(skb)->csum = skb->csum;
500 NAPI_GRO_CB(skb)->csum_valid = 1;
501 NAPI_GRO_CB(skb)->csum_cnt = 0;
502 break;
503 case CHECKSUM_UNNECESSARY:
504 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
505 NAPI_GRO_CB(skb)->csum_valid = 0;
506 break;
507 default:
508 NAPI_GRO_CB(skb)->csum_cnt = 0;
509 NAPI_GRO_CB(skb)->csum_valid = 0;
510 }
511
512 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
513 ipv6_gro_receive, inet_gro_receive,
514 &gro_list->list, skb);
515 break;
516 }
517 rcu_read_unlock();
518
519 if (&ptype->list == head)
520 goto normal;
521
522 if (PTR_ERR(pp) == -EINPROGRESS) {
523 ret = GRO_CONSUMED;
524 goto ok;
525 }
526
527 same_flow = NAPI_GRO_CB(skb)->same_flow;
528 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
529
530 if (pp) {
531 skb_list_del_init(pp);
532 napi_gro_complete(napi, pp);
533 gro_list->count--;
534 }
535
536 if (same_flow)
537 goto ok;
538
539 if (NAPI_GRO_CB(skb)->flush)
540 goto normal;
541
542 if (unlikely(gro_list->count >= MAX_GRO_SKBS))
543 gro_flush_oldest(napi, &gro_list->list);
544 else
545 gro_list->count++;
546
547 NAPI_GRO_CB(skb)->count = 1;
548 NAPI_GRO_CB(skb)->age = jiffies;
549 NAPI_GRO_CB(skb)->last = skb;
550 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
551 list_add(&skb->list, &gro_list->list);
552 ret = GRO_HELD;
553
554pull:
555 grow = skb_gro_offset(skb) - skb_headlen(skb);
556 if (grow > 0)
557 gro_pull_from_frag0(skb, grow);
558ok:
559 if (gro_list->count) {
560 if (!test_bit(bucket, &napi->gro_bitmask))
561 __set_bit(bucket, &napi->gro_bitmask);
562 } else if (test_bit(bucket, &napi->gro_bitmask)) {
563 __clear_bit(bucket, &napi->gro_bitmask);
564 }
565
566 return ret;
567
568normal:
569 ret = GRO_NORMAL;
570 goto pull;
571}
572
573struct packet_offload *gro_find_receive_by_type(__be16 type)
574{
575 struct list_head *offload_head = &offload_base;
576 struct packet_offload *ptype;
577
578 list_for_each_entry_rcu(ptype, offload_head, list) {
579 if (ptype->type != type || !ptype->callbacks.gro_receive)
580 continue;
581 return ptype;
582 }
583 return NULL;
584}
585EXPORT_SYMBOL(gro_find_receive_by_type);
586
587struct packet_offload *gro_find_complete_by_type(__be16 type)
588{
589 struct list_head *offload_head = &offload_base;
590 struct packet_offload *ptype;
591
592 list_for_each_entry_rcu(ptype, offload_head, list) {
593 if (ptype->type != type || !ptype->callbacks.gro_complete)
594 continue;
595 return ptype;
596 }
597 return NULL;
598}
599EXPORT_SYMBOL(gro_find_complete_by_type);
600
601static gro_result_t napi_skb_finish(struct napi_struct *napi,
602 struct sk_buff *skb,
603 gro_result_t ret)
604{
605 switch (ret) {
606 case GRO_NORMAL:
607 gro_normal_one(napi, skb, 1);
608 break;
609
610 case GRO_MERGED_FREE:
611 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
612 napi_skb_free_stolen_head(skb);
613 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
614 __kfree_skb(skb);
615 else
616 __kfree_skb_defer(skb);
617 break;
618
619 case GRO_HELD:
620 case GRO_MERGED:
621 case GRO_CONSUMED:
622 break;
623 }
624
625 return ret;
626}
627
628gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
629{
630 gro_result_t ret;
631
632 skb_mark_napi_id(skb, napi);
633 trace_napi_gro_receive_entry(skb);
634
635 skb_gro_reset_offset(skb, 0);
636
637 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
638 trace_napi_gro_receive_exit(ret);
639
640 return ret;
641}
642EXPORT_SYMBOL(napi_gro_receive);
643
644static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
645{
646 if (unlikely(skb->pfmemalloc)) {
647 consume_skb(skb);
648 return;
649 }
650 __skb_pull(skb, skb_headlen(skb));
651 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
652 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
653 __vlan_hwaccel_clear_tag(skb);
654 skb->dev = napi->dev;
655 skb->skb_iif = 0;
656
657 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
658 skb->pkt_type = PACKET_HOST;
659
660 skb->encapsulation = 0;
661 skb_shinfo(skb)->gso_type = 0;
662 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
663 if (unlikely(skb->slow_gro)) {
664 skb_orphan(skb);
665 skb_ext_reset(skb);
666 nf_reset_ct(skb);
667 skb->slow_gro = 0;
668 }
669
670 napi->skb = skb;
671}
672
673struct sk_buff *napi_get_frags(struct napi_struct *napi)
674{
675 struct sk_buff *skb = napi->skb;
676
677 if (!skb) {
678 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
679 if (skb) {
680 napi->skb = skb;
681 skb_mark_napi_id(skb, napi);
682 }
683 }
684 return skb;
685}
686EXPORT_SYMBOL(napi_get_frags);
687
688static gro_result_t napi_frags_finish(struct napi_struct *napi,
689 struct sk_buff *skb,
690 gro_result_t ret)
691{
692 switch (ret) {
693 case GRO_NORMAL:
694 case GRO_HELD:
695 __skb_push(skb, ETH_HLEN);
696 skb->protocol = eth_type_trans(skb, skb->dev);
697 if (ret == GRO_NORMAL)
698 gro_normal_one(napi, skb, 1);
699 break;
700
701 case GRO_MERGED_FREE:
702 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
703 napi_skb_free_stolen_head(skb);
704 else
705 napi_reuse_skb(napi, skb);
706 break;
707
708 case GRO_MERGED:
709 case GRO_CONSUMED:
710 break;
711 }
712
713 return ret;
714}
715
716/* Upper GRO stack assumes network header starts at gro_offset=0
717 * Drivers could call both napi_gro_frags() and napi_gro_receive()
718 * We copy ethernet header into skb->data to have a common layout.
719 */
720static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
721{
722 struct sk_buff *skb = napi->skb;
723 const struct ethhdr *eth;
724 unsigned int hlen = sizeof(*eth);
725
726 napi->skb = NULL;
727
728 skb_reset_mac_header(skb);
729 skb_gro_reset_offset(skb, hlen);
730
731 if (unlikely(skb_gro_header_hard(skb, hlen))) {
732 eth = skb_gro_header_slow(skb, hlen, 0);
733 if (unlikely(!eth)) {
734 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
735 __func__, napi->dev->name);
736 napi_reuse_skb(napi, skb);
737 return NULL;
738 }
739 } else {
740 eth = (const struct ethhdr *)skb->data;
741 gro_pull_from_frag0(skb, hlen);
742 NAPI_GRO_CB(skb)->frag0 += hlen;
743 NAPI_GRO_CB(skb)->frag0_len -= hlen;
744 }
745 __skb_pull(skb, hlen);
746
747 /*
748 * This works because the only protocols we care about don't require
749 * special handling.
750 * We'll fix it up properly in napi_frags_finish()
751 */
752 skb->protocol = eth->h_proto;
753
754 return skb;
755}
756
757gro_result_t napi_gro_frags(struct napi_struct *napi)
758{
759 gro_result_t ret;
760 struct sk_buff *skb = napi_frags_skb(napi);
761
762 trace_napi_gro_frags_entry(skb);
763
764 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
765 trace_napi_gro_frags_exit(ret);
766
767 return ret;
768}
769EXPORT_SYMBOL(napi_gro_frags);
770
771/* Compute the checksum from gro_offset and return the folded value
772 * after adding in any pseudo checksum.
773 */
774__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
775{
776 __wsum wsum;
777 __sum16 sum;
778
779 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
780
781 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
782 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
783 /* See comments in __skb_checksum_complete(). */
784 if (likely(!sum)) {
785 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
786 !skb->csum_complete_sw)
787 netdev_rx_csum_fault(skb->dev, skb);
788 }
789
790 NAPI_GRO_CB(skb)->csum = wsum;
791 NAPI_GRO_CB(skb)->csum_valid = 1;
792
793 return sum;
794}
795EXPORT_SYMBOL(__skb_gro_checksum_complete);