Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET]: Move generic skbuff stuff from XFRM code to generic code

Move generic skbuff stuff from XFRM code to generic code so that
AF_RXRPC can use it too.

The kdoc comments I've attached to the functions needs to be checked
by whoever wrote them as I had to make some guesses about the workings
of these functions.

Signed-off-By: David Howells <dhowells@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

David Howells and committed by
David S. Miller
716ea3a7 926554c4

+194 -171
+6
include/linux/skbuff.h
··· 86 86 */ 87 87 88 88 struct net_device; 89 + struct scatterlist; 89 90 90 91 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 91 92 struct nf_conntrack { ··· 348 347 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 349 348 int newheadroom, int newtailroom, 350 349 gfp_t priority); 350 + extern int skb_to_sgvec(struct sk_buff *skb, 351 + struct scatterlist *sg, int offset, 352 + int len); 353 + extern int skb_cow_data(struct sk_buff *skb, int tailbits, 354 + struct sk_buff **trailer); 351 355 extern int skb_pad(struct sk_buff *skb, int pad); 352 356 #define dev_kfree_skb(a) kfree_skb(a) 353 357 extern void skb_over_panic(struct sk_buff *skb, int len,
-2
include/net/esp.h
··· 40 40 } auth; 41 41 }; 42 42 43 - extern int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len); 44 - extern int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer); 45 43 extern void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len); 46 44 47 45 static inline int esp_mac_digest(struct esp_data *esp, struct sk_buff *skb,
+188
net/core/skbuff.c
··· 55 55 #include <linux/cache.h> 56 56 #include <linux/rtnetlink.h> 57 57 #include <linux/init.h> 58 + #include <linux/scatterlist.h> 58 59 59 60 #include <net/protocol.h> 60 61 #include <net/dst.h> ··· 2003 2002 NULL, NULL); 2004 2003 } 2005 2004 2005 + /** 2006 + * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 2007 + * @skb: Socket buffer containing the buffers to be mapped 2008 + * @sg: The scatter-gather list to map into 2009 + * @offset: The offset into the buffer's contents to start mapping 2010 + * @len: Length of buffer space to be mapped 2011 + * 2012 + * Fill the specified scatter-gather list with mappings/pointers into a 2013 + * region of the buffer space attached to a socket buffer. 2014 + */ 2015 + int 2016 + skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2017 + { 2018 + int start = skb_headlen(skb); 2019 + int i, copy = start - offset; 2020 + int elt = 0; 2021 + 2022 + if (copy > 0) { 2023 + if (copy > len) 2024 + copy = len; 2025 + sg[elt].page = virt_to_page(skb->data + offset); 2026 + sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; 2027 + sg[elt].length = copy; 2028 + elt++; 2029 + if ((len -= copy) == 0) 2030 + return elt; 2031 + offset += copy; 2032 + } 2033 + 2034 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2035 + int end; 2036 + 2037 + BUG_TRAP(start <= offset + len); 2038 + 2039 + end = start + skb_shinfo(skb)->frags[i].size; 2040 + if ((copy = end - offset) > 0) { 2041 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2042 + 2043 + if (copy > len) 2044 + copy = len; 2045 + sg[elt].page = frag->page; 2046 + sg[elt].offset = frag->page_offset+offset-start; 2047 + sg[elt].length = copy; 2048 + elt++; 2049 + if (!(len -= copy)) 2050 + return elt; 2051 + offset += copy; 2052 + } 2053 + start = end; 2054 + } 2055 + 2056 + if (skb_shinfo(skb)->frag_list) { 2057 + struct sk_buff *list = skb_shinfo(skb)->frag_list; 2058 + 2059 + for (; list; list = list->next) { 2060 + int end; 2061 + 2062 + BUG_TRAP(start <= offset + len); 2063 + 2064 + end = start + list->len; 2065 + if ((copy = end - offset) > 0) { 2066 + if (copy > len) 2067 + copy = len; 2068 + elt += skb_to_sgvec(list, sg+elt, offset - start, copy); 2069 + if ((len -= copy) == 0) 2070 + return elt; 2071 + offset += copy; 2072 + } 2073 + start = end; 2074 + } 2075 + } 2076 + BUG_ON(len); 2077 + return elt; 2078 + } 2079 + 2080 + /** 2081 + * skb_cow_data - Check that a socket buffer's data buffers are writable 2082 + * @skb: The socket buffer to check. 2083 + * @tailbits: Amount of trailing space to be added 2084 + * @trailer: Returned pointer to the skb where the @tailbits space begins 2085 + * 2086 + * Make sure that the data buffers attached to a socket buffer are 2087 + * writable. If they are not, private copies are made of the data buffers 2088 + * and the socket buffer is set to use these instead. 2089 + * 2090 + * If @tailbits is given, make sure that there is space to write @tailbits 2091 + * bytes of data beyond current end of socket buffer. @trailer will be 2092 + * set to point to the skb in which this space begins. 2093 + * 2094 + * The number of scatterlist elements required to completely map the 2095 + * COW'd and extended socket buffer will be returned. 2096 + */ 2097 + int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 2098 + { 2099 + int copyflag; 2100 + int elt; 2101 + struct sk_buff *skb1, **skb_p; 2102 + 2103 + /* If skb is cloned or its head is paged, reallocate 2104 + * head pulling out all the pages (pages are considered not writable 2105 + * at the moment even if they are anonymous). 2106 + */ 2107 + if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 2108 + __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 2109 + return -ENOMEM; 2110 + 2111 + /* Easy case. Most of packets will go this way. */ 2112 + if (!skb_shinfo(skb)->frag_list) { 2113 + /* A little of trouble, not enough of space for trailer. 2114 + * This should not happen, when stack is tuned to generate 2115 + * good frames. OK, on miss we reallocate and reserve even more 2116 + * space, 128 bytes is fair. */ 2117 + 2118 + if (skb_tailroom(skb) < tailbits && 2119 + pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 2120 + return -ENOMEM; 2121 + 2122 + /* Voila! */ 2123 + *trailer = skb; 2124 + return 1; 2125 + } 2126 + 2127 + /* Misery. We are in troubles, going to mincer fragments... */ 2128 + 2129 + elt = 1; 2130 + skb_p = &skb_shinfo(skb)->frag_list; 2131 + copyflag = 0; 2132 + 2133 + while ((skb1 = *skb_p) != NULL) { 2134 + int ntail = 0; 2135 + 2136 + /* The fragment is partially pulled by someone, 2137 + * this can happen on input. Copy it and everything 2138 + * after it. */ 2139 + 2140 + if (skb_shared(skb1)) 2141 + copyflag = 1; 2142 + 2143 + /* If the skb is the last, worry about trailer. */ 2144 + 2145 + if (skb1->next == NULL && tailbits) { 2146 + if (skb_shinfo(skb1)->nr_frags || 2147 + skb_shinfo(skb1)->frag_list || 2148 + skb_tailroom(skb1) < tailbits) 2149 + ntail = tailbits + 128; 2150 + } 2151 + 2152 + if (copyflag || 2153 + skb_cloned(skb1) || 2154 + ntail || 2155 + skb_shinfo(skb1)->nr_frags || 2156 + skb_shinfo(skb1)->frag_list) { 2157 + struct sk_buff *skb2; 2158 + 2159 + /* Fuck, we are miserable poor guys... */ 2160 + if (ntail == 0) 2161 + skb2 = skb_copy(skb1, GFP_ATOMIC); 2162 + else 2163 + skb2 = skb_copy_expand(skb1, 2164 + skb_headroom(skb1), 2165 + ntail, 2166 + GFP_ATOMIC); 2167 + if (unlikely(skb2 == NULL)) 2168 + return -ENOMEM; 2169 + 2170 + if (skb1->sk) 2171 + skb_set_owner_w(skb2, skb1->sk); 2172 + 2173 + /* Looking around. Are we still alive? 2174 + * OK, link new skb, drop old one */ 2175 + 2176 + skb2->next = skb1->next; 2177 + *skb_p = skb2; 2178 + kfree_skb(skb1); 2179 + skb1 = skb2; 2180 + } 2181 + elt++; 2182 + *trailer = skb1; 2183 + skb_p = &skb1->next; 2184 + } 2185 + 2186 + return elt; 2187 + } 2188 + 2006 2189 EXPORT_SYMBOL(___pskb_trim); 2007 2190 EXPORT_SYMBOL(__kfree_skb); 2008 2191 EXPORT_SYMBOL(kfree_skb); ··· 2221 2036 EXPORT_SYMBOL(skb_abort_seq_read); 2222 2037 EXPORT_SYMBOL(skb_find_text); 2223 2038 EXPORT_SYMBOL(skb_append_datato_frags); 2039 + 2040 + EXPORT_SYMBOL_GPL(skb_to_sgvec); 2041 + EXPORT_SYMBOL_GPL(skb_cow_data);
-169
net/xfrm/xfrm_algo.c
··· 612 612 613 613 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE) 614 614 615 - /* Looking generic it is not used in another places. */ 616 - 617 - int 618 - skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 619 - { 620 - int start = skb_headlen(skb); 621 - int i, copy = start - offset; 622 - int elt = 0; 623 - 624 - if (copy > 0) { 625 - if (copy > len) 626 - copy = len; 627 - sg[elt].page = virt_to_page(skb->data + offset); 628 - sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE; 629 - sg[elt].length = copy; 630 - elt++; 631 - if ((len -= copy) == 0) 632 - return elt; 633 - offset += copy; 634 - } 635 - 636 - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 637 - int end; 638 - 639 - BUG_TRAP(start <= offset + len); 640 - 641 - end = start + skb_shinfo(skb)->frags[i].size; 642 - if ((copy = end - offset) > 0) { 643 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 644 - 645 - if (copy > len) 646 - copy = len; 647 - sg[elt].page = frag->page; 648 - sg[elt].offset = frag->page_offset+offset-start; 649 - sg[elt].length = copy; 650 - elt++; 651 - if (!(len -= copy)) 652 - return elt; 653 - offset += copy; 654 - } 655 - start = end; 656 - } 657 - 658 - if (skb_shinfo(skb)->frag_list) { 659 - struct sk_buff *list = skb_shinfo(skb)->frag_list; 660 - 661 - for (; list; list = list->next) { 662 - int end; 663 - 664 - BUG_TRAP(start <= offset + len); 665 - 666 - end = start + list->len; 667 - if ((copy = end - offset) > 0) { 668 - if (copy > len) 669 - copy = len; 670 - elt += skb_to_sgvec(list, sg+elt, offset - start, copy); 671 - if ((len -= copy) == 0) 672 - return elt; 673 - offset += copy; 674 - } 675 - start = end; 676 - } 677 - } 678 - BUG_ON(len); 679 - return elt; 680 - } 681 - EXPORT_SYMBOL_GPL(skb_to_sgvec); 682 - 683 - /* Check that skb data bits are writable. If they are not, copy data 684 - * to newly created private area. If "tailbits" is given, make sure that 685 - * tailbits bytes beyond current end of skb are writable. 686 - * 687 - * Returns amount of elements of scatterlist to load for subsequent 688 - * transformations and pointer to writable trailer skb. 689 - */ 690 - 691 - int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 692 - { 693 - int copyflag; 694 - int elt; 695 - struct sk_buff *skb1, **skb_p; 696 - 697 - /* If skb is cloned or its head is paged, reallocate 698 - * head pulling out all the pages (pages are considered not writable 699 - * at the moment even if they are anonymous). 700 - */ 701 - if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 702 - __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 703 - return -ENOMEM; 704 - 705 - /* Easy case. Most of packets will go this way. */ 706 - if (!skb_shinfo(skb)->frag_list) { 707 - /* A little of trouble, not enough of space for trailer. 708 - * This should not happen, when stack is tuned to generate 709 - * good frames. OK, on miss we reallocate and reserve even more 710 - * space, 128 bytes is fair. */ 711 - 712 - if (skb_tailroom(skb) < tailbits && 713 - pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 714 - return -ENOMEM; 715 - 716 - /* Voila! */ 717 - *trailer = skb; 718 - return 1; 719 - } 720 - 721 - /* Misery. We are in troubles, going to mincer fragments... */ 722 - 723 - elt = 1; 724 - skb_p = &skb_shinfo(skb)->frag_list; 725 - copyflag = 0; 726 - 727 - while ((skb1 = *skb_p) != NULL) { 728 - int ntail = 0; 729 - 730 - /* The fragment is partially pulled by someone, 731 - * this can happen on input. Copy it and everything 732 - * after it. */ 733 - 734 - if (skb_shared(skb1)) 735 - copyflag = 1; 736 - 737 - /* If the skb is the last, worry about trailer. */ 738 - 739 - if (skb1->next == NULL && tailbits) { 740 - if (skb_shinfo(skb1)->nr_frags || 741 - skb_shinfo(skb1)->frag_list || 742 - skb_tailroom(skb1) < tailbits) 743 - ntail = tailbits + 128; 744 - } 745 - 746 - if (copyflag || 747 - skb_cloned(skb1) || 748 - ntail || 749 - skb_shinfo(skb1)->nr_frags || 750 - skb_shinfo(skb1)->frag_list) { 751 - struct sk_buff *skb2; 752 - 753 - /* Fuck, we are miserable poor guys... */ 754 - if (ntail == 0) 755 - skb2 = skb_copy(skb1, GFP_ATOMIC); 756 - else 757 - skb2 = skb_copy_expand(skb1, 758 - skb_headroom(skb1), 759 - ntail, 760 - GFP_ATOMIC); 761 - if (unlikely(skb2 == NULL)) 762 - return -ENOMEM; 763 - 764 - if (skb1->sk) 765 - skb_set_owner_w(skb2, skb1->sk); 766 - 767 - /* Looking around. Are we still alive? 768 - * OK, link new skb, drop old one */ 769 - 770 - skb2->next = skb1->next; 771 - *skb_p = skb2; 772 - kfree_skb(skb1); 773 - skb1 = skb2; 774 - } 775 - elt++; 776 - *trailer = skb1; 777 - skb_p = &skb1->next; 778 - } 779 - 780 - return elt; 781 - } 782 - EXPORT_SYMBOL_GPL(skb_cow_data); 783 - 784 615 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) 785 616 { 786 617 if (tail != skb) {