Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: Allow skb_recycle_check to be done in stages

skb_recycle_check resets the skb if it's eligible for recycling.
However, there are times when a driver might want to optionally
manipulate the skb data with the skb before resetting the skb,
but after it has determined eligibility. We do this by splitting the
eligibility check from the skb reset, creating two inline functions to
accomplish that task.

Signed-off-by: Andy Fleming <afleming@freescale.com>
Acked-by: David Daney <david.daney@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Andy Fleming and committed by
David S. Miller
3d153a7c 1e5c22cd

+47 -25
+21
include/linux/skbuff.h
··· 550 550 return __alloc_skb(size, priority, 1, NUMA_NO_NODE); 551 551 } 552 552 553 + extern void skb_recycle(struct sk_buff *skb); 553 554 extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 554 555 555 556 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); ··· 2485 2484 2486 2485 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2487 2486 2487 + static inline bool skb_is_recycleable(struct sk_buff *skb, int skb_size) 2488 + { 2489 + if (irqs_disabled()) 2490 + return false; 2491 + 2492 + if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) 2493 + return false; 2494 + 2495 + if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 2496 + return false; 2497 + 2498 + skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 2499 + if (skb_end_pointer(skb) - skb->head < skb_size) 2500 + return false; 2501 + 2502 + if (skb_shared(skb) || skb_cloned(skb)) 2503 + return false; 2504 + 2505 + return true; 2506 + } 2488 2507 #endif /* __KERNEL__ */ 2489 2508 #endif /* _LINUX_SKBUFF_H */
+26 -25
net/core/skbuff.c
··· 485 485 EXPORT_SYMBOL(consume_skb); 486 486 487 487 /** 488 + * skb_recycle - clean up an skb for reuse 489 + * @skb: buffer 490 + * 491 + * Recycles the skb to be reused as a receive buffer. This 492 + * function does any necessary reference count dropping, and 493 + * cleans up the skbuff as if it just came from __alloc_skb(). 494 + */ 495 + void skb_recycle(struct sk_buff *skb) 496 + { 497 + struct skb_shared_info *shinfo; 498 + 499 + skb_release_head_state(skb); 500 + 501 + shinfo = skb_shinfo(skb); 502 + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 503 + atomic_set(&shinfo->dataref, 1); 504 + 505 + memset(skb, 0, offsetof(struct sk_buff, tail)); 506 + skb->data = skb->head + NET_SKB_PAD; 507 + skb_reset_tail_pointer(skb); 508 + } 509 + EXPORT_SYMBOL(skb_recycle); 510 + 511 + /** 488 512 * skb_recycle_check - check if skb can be reused for receive 489 513 * @skb: buffer 490 514 * @skb_size: minimum receive buffer size ··· 522 498 */ 523 499 bool skb_recycle_check(struct sk_buff *skb, int skb_size) 524 500 { 525 - struct skb_shared_info *shinfo; 526 - 527 - if (irqs_disabled()) 501 + if (!skb_is_recycleable(skb, skb_size)) 528 502 return false; 529 503 530 - if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) 531 - return false; 532 - 533 - if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 534 - return false; 535 - 536 - skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 537 - if (skb_end_pointer(skb) - skb->head < skb_size) 538 - return false; 539 - 540 - if (skb_shared(skb) || skb_cloned(skb)) 541 - return false; 542 - 543 - skb_release_head_state(skb); 544 - 545 - shinfo = skb_shinfo(skb); 546 - memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); 547 - atomic_set(&shinfo->dataref, 1); 548 - 549 - memset(skb, 0, offsetof(struct sk_buff, tail)); 550 - skb->data = skb->head + NET_SKB_PAD; 551 - skb_reset_tail_pointer(skb); 504 + skb_recycle(skb); 552 505 553 506 return true; 554 507 }