Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.24-rc7 2252 lines 56 kB view raw
1/* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $ 8 * 9 * Fixes: 10 * Alan Cox : Fixed the worst of the load 11 * balancer bugs. 12 * Dave Platt : Interrupt stacking fix. 13 * Richard Kooijman : Timestamp fixes. 14 * Alan Cox : Changed buffer format. 15 * Alan Cox : destructor hook for AF_UNIX etc. 16 * Linus Torvalds : Better skb_clone. 17 * Alan Cox : Added skb_copy. 18 * Alan Cox : Added all the changed routines Linus 19 * only put in the headers 20 * Ray VanTassle : Fixed --skb->lock in free 21 * Alan Cox : skb_copy copy arp field 22 * Andi Kleen : slabified it. 23 * Robert Olsson : Removed skb_head_pool 24 * 25 * NOTE: 26 * The __skb_ routines should be called with interrupts 27 * disabled, or you better be *real* sure that the operation is atomic 28 * with respect to whatever list is being frobbed (e.g. via lock_sock() 29 * or via disabling bottom half handlers, etc). 30 * 31 * This program is free software; you can redistribute it and/or 32 * modify it under the terms of the GNU General Public License 33 * as published by the Free Software Foundation; either version 34 * 2 of the License, or (at your option) any later version. 35 */ 36 37/* 38 * The functions in this file will not compile correctly with gcc 2.4.x 39 */ 40 41#include <linux/module.h> 42#include <linux/types.h> 43#include <linux/kernel.h> 44#include <linux/mm.h> 45#include <linux/interrupt.h> 46#include <linux/in.h> 47#include <linux/inet.h> 48#include <linux/slab.h> 49#include <linux/netdevice.h> 50#ifdef CONFIG_NET_CLS_ACT 51#include <net/pkt_sched.h> 52#endif 53#include <linux/string.h> 54#include <linux/skbuff.h> 55#include <linux/cache.h> 56#include <linux/rtnetlink.h> 57#include <linux/init.h> 58#include <linux/scatterlist.h> 59 60#include <net/protocol.h> 61#include <net/dst.h> 62#include <net/sock.h> 63#include <net/checksum.h> 64#include <net/xfrm.h> 65 66#include <asm/uaccess.h> 67#include <asm/system.h> 68 69#include "kmap_skb.h" 70 71static struct kmem_cache *skbuff_head_cache __read_mostly; 72static struct kmem_cache *skbuff_fclone_cache __read_mostly; 73 74/* 75 * Keep out-of-line to prevent kernel bloat. 76 * __builtin_return_address is not used because it is not always 77 * reliable. 78 */ 79 80/** 81 * skb_over_panic - private function 82 * @skb: buffer 83 * @sz: size 84 * @here: address 85 * 86 * Out of line support code for skb_put(). Not user callable. 87 */ 88void skb_over_panic(struct sk_buff *skb, int sz, void *here) 89{ 90 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 91 "data:%p tail:%#lx end:%#lx dev:%s\n", 92 here, skb->len, sz, skb->head, skb->data, 93 (unsigned long)skb->tail, (unsigned long)skb->end, 94 skb->dev ? skb->dev->name : "<NULL>"); 95 BUG(); 96} 97 98/** 99 * skb_under_panic - private function 100 * @skb: buffer 101 * @sz: size 102 * @here: address 103 * 104 * Out of line support code for skb_push(). Not user callable. 105 */ 106 107void skb_under_panic(struct sk_buff *skb, int sz, void *here) 108{ 109 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 110 "data:%p tail:%#lx end:%#lx dev:%s\n", 111 here, skb->len, sz, skb->head, skb->data, 112 (unsigned long)skb->tail, (unsigned long)skb->end, 113 skb->dev ? skb->dev->name : "<NULL>"); 114 BUG(); 115} 116 117void skb_truesize_bug(struct sk_buff *skb) 118{ 119 printk(KERN_ERR "SKB BUG: Invalid truesize (%u) " 120 "len=%u, sizeof(sk_buff)=%Zd\n", 121 skb->truesize, skb->len, sizeof(struct sk_buff)); 122} 123EXPORT_SYMBOL(skb_truesize_bug); 124 125/* Allocate a new skbuff. We do this ourselves so we can fill in a few 126 * 'private' fields and also do memory statistics to find all the 127 * [BEEP] leaks. 128 * 129 */ 130 131/** 132 * __alloc_skb - allocate a network buffer 133 * @size: size to allocate 134 * @gfp_mask: allocation mask 135 * @fclone: allocate from fclone cache instead of head cache 136 * and allocate a cloned (child) skb 137 * @node: numa node to allocate memory on 138 * 139 * Allocate a new &sk_buff. The returned buffer has no headroom and a 140 * tail room of size bytes. The object has a reference count of one. 141 * The return is the buffer. On a failure the return is %NULL. 142 * 143 * Buffers may only be allocated from interrupts using a @gfp_mask of 144 * %GFP_ATOMIC. 145 */ 146struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 147 int fclone, int node) 148{ 149 struct kmem_cache *cache; 150 struct skb_shared_info *shinfo; 151 struct sk_buff *skb; 152 u8 *data; 153 154 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 155 156 /* Get the HEAD */ 157 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 158 if (!skb) 159 goto out; 160 161 size = SKB_DATA_ALIGN(size); 162 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 163 gfp_mask, node); 164 if (!data) 165 goto nodata; 166 167 /* 168 * See comment in sk_buff definition, just before the 'tail' member 169 */ 170 memset(skb, 0, offsetof(struct sk_buff, tail)); 171 skb->truesize = size + sizeof(struct sk_buff); 172 atomic_set(&skb->users, 1); 173 skb->head = data; 174 skb->data = data; 175 skb_reset_tail_pointer(skb); 176 skb->end = skb->tail + size; 177 /* make sure we initialize shinfo sequentially */ 178 shinfo = skb_shinfo(skb); 179 atomic_set(&shinfo->dataref, 1); 180 shinfo->nr_frags = 0; 181 shinfo->gso_size = 0; 182 shinfo->gso_segs = 0; 183 shinfo->gso_type = 0; 184 shinfo->ip6_frag_id = 0; 185 shinfo->frag_list = NULL; 186 187 if (fclone) { 188 struct sk_buff *child = skb + 1; 189 atomic_t *fclone_ref = (atomic_t *) (child + 1); 190 191 skb->fclone = SKB_FCLONE_ORIG; 192 atomic_set(fclone_ref, 1); 193 194 child->fclone = SKB_FCLONE_UNAVAILABLE; 195 } 196out: 197 return skb; 198nodata: 199 kmem_cache_free(cache, skb); 200 skb = NULL; 201 goto out; 202} 203 204/** 205 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 206 * @dev: network device to receive on 207 * @length: length to allocate 208 * @gfp_mask: get_free_pages mask, passed to alloc_skb 209 * 210 * Allocate a new &sk_buff and assign it a usage count of one. The 211 * buffer has unspecified headroom built in. Users should allocate 212 * the headroom they think they need without accounting for the 213 * built in space. The built in space is used for optimisations. 214 * 215 * %NULL is returned if there is no free memory. 216 */ 217struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 218 unsigned int length, gfp_t gfp_mask) 219{ 220 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; 221 struct sk_buff *skb; 222 223 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); 224 if (likely(skb)) { 225 skb_reserve(skb, NET_SKB_PAD); 226 skb->dev = dev; 227 } 228 return skb; 229} 230 231static void skb_drop_list(struct sk_buff **listp) 232{ 233 struct sk_buff *list = *listp; 234 235 *listp = NULL; 236 237 do { 238 struct sk_buff *this = list; 239 list = list->next; 240 kfree_skb(this); 241 } while (list); 242} 243 244static inline void skb_drop_fraglist(struct sk_buff *skb) 245{ 246 skb_drop_list(&skb_shinfo(skb)->frag_list); 247} 248 249static void skb_clone_fraglist(struct sk_buff *skb) 250{ 251 struct sk_buff *list; 252 253 for (list = skb_shinfo(skb)->frag_list; list; list = list->next) 254 skb_get(list); 255} 256 257static void skb_release_data(struct sk_buff *skb) 258{ 259 if (!skb->cloned || 260 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 261 &skb_shinfo(skb)->dataref)) { 262 if (skb_shinfo(skb)->nr_frags) { 263 int i; 264 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 265 put_page(skb_shinfo(skb)->frags[i].page); 266 } 267 268 if (skb_shinfo(skb)->frag_list) 269 skb_drop_fraglist(skb); 270 271 kfree(skb->head); 272 } 273} 274 275/* 276 * Free an skbuff by memory without cleaning the state. 277 */ 278static void kfree_skbmem(struct sk_buff *skb) 279{ 280 struct sk_buff *other; 281 atomic_t *fclone_ref; 282 283 switch (skb->fclone) { 284 case SKB_FCLONE_UNAVAILABLE: 285 kmem_cache_free(skbuff_head_cache, skb); 286 break; 287 288 case SKB_FCLONE_ORIG: 289 fclone_ref = (atomic_t *) (skb + 2); 290 if (atomic_dec_and_test(fclone_ref)) 291 kmem_cache_free(skbuff_fclone_cache, skb); 292 break; 293 294 case SKB_FCLONE_CLONE: 295 fclone_ref = (atomic_t *) (skb + 1); 296 other = skb - 1; 297 298 /* The clone portion is available for 299 * fast-cloning again. 300 */ 301 skb->fclone = SKB_FCLONE_UNAVAILABLE; 302 303 if (atomic_dec_and_test(fclone_ref)) 304 kmem_cache_free(skbuff_fclone_cache, other); 305 break; 306 } 307} 308 309/* Free everything but the sk_buff shell. */ 310static void skb_release_all(struct sk_buff *skb) 311{ 312 dst_release(skb->dst); 313#ifdef CONFIG_XFRM 314 secpath_put(skb->sp); 315#endif 316 if (skb->destructor) { 317 WARN_ON(in_irq()); 318 skb->destructor(skb); 319 } 320#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 321 nf_conntrack_put(skb->nfct); 322 nf_conntrack_put_reasm(skb->nfct_reasm); 323#endif 324#ifdef CONFIG_BRIDGE_NETFILTER 325 nf_bridge_put(skb->nf_bridge); 326#endif 327/* XXX: IS this still necessary? - JHS */ 328#ifdef CONFIG_NET_SCHED 329 skb->tc_index = 0; 330#ifdef CONFIG_NET_CLS_ACT 331 skb->tc_verd = 0; 332#endif 333#endif 334 skb_release_data(skb); 335} 336 337/** 338 * __kfree_skb - private function 339 * @skb: buffer 340 * 341 * Free an sk_buff. Release anything attached to the buffer. 342 * Clean the state. This is an internal helper function. Users should 343 * always call kfree_skb 344 */ 345 346void __kfree_skb(struct sk_buff *skb) 347{ 348 skb_release_all(skb); 349 kfree_skbmem(skb); 350} 351 352/** 353 * kfree_skb - free an sk_buff 354 * @skb: buffer to free 355 * 356 * Drop a reference to the buffer and free it if the usage count has 357 * hit zero. 358 */ 359void kfree_skb(struct sk_buff *skb) 360{ 361 if (unlikely(!skb)) 362 return; 363 if (likely(atomic_read(&skb->users) == 1)) 364 smp_rmb(); 365 else if (likely(!atomic_dec_and_test(&skb->users))) 366 return; 367 __kfree_skb(skb); 368} 369 370static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 371{ 372 new->tstamp = old->tstamp; 373 new->dev = old->dev; 374 new->transport_header = old->transport_header; 375 new->network_header = old->network_header; 376 new->mac_header = old->mac_header; 377 new->dst = dst_clone(old->dst); 378#ifdef CONFIG_INET 379 new->sp = secpath_get(old->sp); 380#endif 381 memcpy(new->cb, old->cb, sizeof(old->cb)); 382 new->csum_start = old->csum_start; 383 new->csum_offset = old->csum_offset; 384 new->local_df = old->local_df; 385 new->pkt_type = old->pkt_type; 386 new->ip_summed = old->ip_summed; 387 skb_copy_queue_mapping(new, old); 388 new->priority = old->priority; 389#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 390 new->ipvs_property = old->ipvs_property; 391#endif 392 new->protocol = old->protocol; 393 new->mark = old->mark; 394 __nf_copy(new, old); 395#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 396 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) 397 new->nf_trace = old->nf_trace; 398#endif 399#ifdef CONFIG_NET_SCHED 400 new->tc_index = old->tc_index; 401#ifdef CONFIG_NET_CLS_ACT 402 new->tc_verd = old->tc_verd; 403#endif 404#endif 405 skb_copy_secmark(new, old); 406} 407 408static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) 409{ 410#define C(x) n->x = skb->x 411 412 n->next = n->prev = NULL; 413 n->sk = NULL; 414 __copy_skb_header(n, skb); 415 416 C(len); 417 C(data_len); 418 C(mac_len); 419 n->cloned = 1; 420 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 421 n->nohdr = 0; 422 n->destructor = NULL; 423 C(truesize); 424 atomic_set(&n->users, 1); 425 C(head); 426 C(data); 427 C(tail); 428 C(end); 429 430 atomic_inc(&(skb_shinfo(skb)->dataref)); 431 skb->cloned = 1; 432 433 return n; 434#undef C 435} 436 437/** 438 * skb_morph - morph one skb into another 439 * @dst: the skb to receive the contents 440 * @src: the skb to supply the contents 441 * 442 * This is identical to skb_clone except that the target skb is 443 * supplied by the user. 444 * 445 * The target skb is returned upon exit. 446 */ 447struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) 448{ 449 skb_release_all(dst); 450 return __skb_clone(dst, src); 451} 452EXPORT_SYMBOL_GPL(skb_morph); 453 454/** 455 * skb_clone - duplicate an sk_buff 456 * @skb: buffer to clone 457 * @gfp_mask: allocation priority 458 * 459 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 460 * copies share the same packet data but not structure. The new 461 * buffer has a reference count of 1. If the allocation fails the 462 * function returns %NULL otherwise the new buffer is returned. 463 * 464 * If this function is called from an interrupt gfp_mask() must be 465 * %GFP_ATOMIC. 466 */ 467 468struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 469{ 470 struct sk_buff *n; 471 472 n = skb + 1; 473 if (skb->fclone == SKB_FCLONE_ORIG && 474 n->fclone == SKB_FCLONE_UNAVAILABLE) { 475 atomic_t *fclone_ref = (atomic_t *) (n + 1); 476 n->fclone = SKB_FCLONE_CLONE; 477 atomic_inc(fclone_ref); 478 } else { 479 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 480 if (!n) 481 return NULL; 482 n->fclone = SKB_FCLONE_UNAVAILABLE; 483 } 484 485 return __skb_clone(n, skb); 486} 487 488static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 489{ 490#ifndef NET_SKBUFF_DATA_USES_OFFSET 491 /* 492 * Shift between the two data areas in bytes 493 */ 494 unsigned long offset = new->data - old->data; 495#endif 496 497 __copy_skb_header(new, old); 498 499#ifndef NET_SKBUFF_DATA_USES_OFFSET 500 /* {transport,network,mac}_header are relative to skb->head */ 501 new->transport_header += offset; 502 new->network_header += offset; 503 new->mac_header += offset; 504#endif 505 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 506 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 507 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 508} 509 510/** 511 * skb_copy - create private copy of an sk_buff 512 * @skb: buffer to copy 513 * @gfp_mask: allocation priority 514 * 515 * Make a copy of both an &sk_buff and its data. This is used when the 516 * caller wishes to modify the data and needs a private copy of the 517 * data to alter. Returns %NULL on failure or the pointer to the buffer 518 * on success. The returned buffer has a reference count of 1. 519 * 520 * As by-product this function converts non-linear &sk_buff to linear 521 * one, so that &sk_buff becomes completely private and caller is allowed 522 * to modify all the data of returned buffer. This means that this 523 * function is not recommended for use in circumstances when only 524 * header is going to be modified. Use pskb_copy() instead. 525 */ 526 527struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 528{ 529 int headerlen = skb->data - skb->head; 530 /* 531 * Allocate the copy buffer 532 */ 533 struct sk_buff *n; 534#ifdef NET_SKBUFF_DATA_USES_OFFSET 535 n = alloc_skb(skb->end + skb->data_len, gfp_mask); 536#else 537 n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask); 538#endif 539 if (!n) 540 return NULL; 541 542 /* Set the data pointer */ 543 skb_reserve(n, headerlen); 544 /* Set the tail pointer and length */ 545 skb_put(n, skb->len); 546 547 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 548 BUG(); 549 550 copy_skb_header(n, skb); 551 return n; 552} 553 554 555/** 556 * pskb_copy - create copy of an sk_buff with private head. 557 * @skb: buffer to copy 558 * @gfp_mask: allocation priority 559 * 560 * Make a copy of both an &sk_buff and part of its data, located 561 * in header. Fragmented data remain shared. This is used when 562 * the caller wishes to modify only header of &sk_buff and needs 563 * private copy of the header to alter. Returns %NULL on failure 564 * or the pointer to the buffer on success. 565 * The returned buffer has a reference count of 1. 566 */ 567 568struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 569{ 570 /* 571 * Allocate the copy buffer 572 */ 573 struct sk_buff *n; 574#ifdef NET_SKBUFF_DATA_USES_OFFSET 575 n = alloc_skb(skb->end, gfp_mask); 576#else 577 n = alloc_skb(skb->end - skb->head, gfp_mask); 578#endif 579 if (!n) 580 goto out; 581 582 /* Set the data pointer */ 583 skb_reserve(n, skb->data - skb->head); 584 /* Set the tail pointer and length */ 585 skb_put(n, skb_headlen(skb)); 586 /* Copy the bytes */ 587 skb_copy_from_linear_data(skb, n->data, n->len); 588 589 n->truesize += skb->data_len; 590 n->data_len = skb->data_len; 591 n->len = skb->len; 592 593 if (skb_shinfo(skb)->nr_frags) { 594 int i; 595 596 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 597 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 598 get_page(skb_shinfo(n)->frags[i].page); 599 } 600 skb_shinfo(n)->nr_frags = i; 601 } 602 603 if (skb_shinfo(skb)->frag_list) { 604 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 605 skb_clone_fraglist(n); 606 } 607 608 copy_skb_header(n, skb); 609out: 610 return n; 611} 612 613/** 614 * pskb_expand_head - reallocate header of &sk_buff 615 * @skb: buffer to reallocate 616 * @nhead: room to add at head 617 * @ntail: room to add at tail 618 * @gfp_mask: allocation priority 619 * 620 * Expands (or creates identical copy, if &nhead and &ntail are zero) 621 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 622 * reference count of 1. Returns zero in the case of success or error, 623 * if expansion failed. In the last case, &sk_buff is not changed. 624 * 625 * All the pointers pointing into skb header may change and must be 626 * reloaded after call to this function. 627 */ 628 629int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 630 gfp_t gfp_mask) 631{ 632 int i; 633 u8 *data; 634#ifdef NET_SKBUFF_DATA_USES_OFFSET 635 int size = nhead + skb->end + ntail; 636#else 637 int size = nhead + (skb->end - skb->head) + ntail; 638#endif 639 long off; 640 641 if (skb_shared(skb)) 642 BUG(); 643 644 size = SKB_DATA_ALIGN(size); 645 646 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 647 if (!data) 648 goto nodata; 649 650 /* Copy only real data... and, alas, header. This should be 651 * optimized for the cases when header is void. */ 652#ifdef NET_SKBUFF_DATA_USES_OFFSET 653 memcpy(data + nhead, skb->head, skb->tail); 654#else 655 memcpy(data + nhead, skb->head, skb->tail - skb->head); 656#endif 657 memcpy(data + size, skb_end_pointer(skb), 658 sizeof(struct skb_shared_info)); 659 660 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 661 get_page(skb_shinfo(skb)->frags[i].page); 662 663 if (skb_shinfo(skb)->frag_list) 664 skb_clone_fraglist(skb); 665 666 skb_release_data(skb); 667 668 off = (data + nhead) - skb->head; 669 670 skb->head = data; 671 skb->data += off; 672#ifdef NET_SKBUFF_DATA_USES_OFFSET 673 skb->end = size; 674 off = nhead; 675#else 676 skb->end = skb->head + size; 677#endif 678 /* {transport,network,mac}_header and tail are relative to skb->head */ 679 skb->tail += off; 680 skb->transport_header += off; 681 skb->network_header += off; 682 skb->mac_header += off; 683 skb->csum_start += nhead; 684 skb->cloned = 0; 685 skb->hdr_len = 0; 686 skb->nohdr = 0; 687 atomic_set(&skb_shinfo(skb)->dataref, 1); 688 return 0; 689 690nodata: 691 return -ENOMEM; 692} 693 694/* Make private copy of skb with writable head and some headroom */ 695 696struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 697{ 698 struct sk_buff *skb2; 699 int delta = headroom - skb_headroom(skb); 700 701 if (delta <= 0) 702 skb2 = pskb_copy(skb, GFP_ATOMIC); 703 else { 704 skb2 = skb_clone(skb, GFP_ATOMIC); 705 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 706 GFP_ATOMIC)) { 707 kfree_skb(skb2); 708 skb2 = NULL; 709 } 710 } 711 return skb2; 712} 713 714 715/** 716 * skb_copy_expand - copy and expand sk_buff 717 * @skb: buffer to copy 718 * @newheadroom: new free bytes at head 719 * @newtailroom: new free bytes at tail 720 * @gfp_mask: allocation priority 721 * 722 * Make a copy of both an &sk_buff and its data and while doing so 723 * allocate additional space. 724 * 725 * This is used when the caller wishes to modify the data and needs a 726 * private copy of the data to alter as well as more space for new fields. 727 * Returns %NULL on failure or the pointer to the buffer 728 * on success. The returned buffer has a reference count of 1. 729 * 730 * You must pass %GFP_ATOMIC as the allocation priority if this function 731 * is called from an interrupt. 732 */ 733struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 734 int newheadroom, int newtailroom, 735 gfp_t gfp_mask) 736{ 737 /* 738 * Allocate the copy buffer 739 */ 740 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 741 gfp_mask); 742 int oldheadroom = skb_headroom(skb); 743 int head_copy_len, head_copy_off; 744 int off; 745 746 if (!n) 747 return NULL; 748 749 skb_reserve(n, newheadroom); 750 751 /* Set the tail pointer and length */ 752 skb_put(n, skb->len); 753 754 head_copy_len = oldheadroom; 755 head_copy_off = 0; 756 if (newheadroom <= head_copy_len) 757 head_copy_len = newheadroom; 758 else 759 head_copy_off = newheadroom - head_copy_len; 760 761 /* Copy the linear header and data. */ 762 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 763 skb->len + head_copy_len)) 764 BUG(); 765 766 copy_skb_header(n, skb); 767 768 off = newheadroom - oldheadroom; 769 n->csum_start += off; 770#ifdef NET_SKBUFF_DATA_USES_OFFSET 771 n->transport_header += off; 772 n->network_header += off; 773 n->mac_header += off; 774#endif 775 776 return n; 777} 778 779/** 780 * skb_pad - zero pad the tail of an skb 781 * @skb: buffer to pad 782 * @pad: space to pad 783 * 784 * Ensure that a buffer is followed by a padding area that is zero 785 * filled. Used by network drivers which may DMA or transfer data 786 * beyond the buffer end onto the wire. 787 * 788 * May return error in out of memory cases. The skb is freed on error. 789 */ 790 791int skb_pad(struct sk_buff *skb, int pad) 792{ 793 int err; 794 int ntail; 795 796 /* If the skbuff is non linear tailroom is always zero.. */ 797 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 798 memset(skb->data+skb->len, 0, pad); 799 return 0; 800 } 801 802 ntail = skb->data_len + pad - (skb->end - skb->tail); 803 if (likely(skb_cloned(skb) || ntail > 0)) { 804 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 805 if (unlikely(err)) 806 goto free_skb; 807 } 808 809 /* FIXME: The use of this function with non-linear skb's really needs 810 * to be audited. 811 */ 812 err = skb_linearize(skb); 813 if (unlikely(err)) 814 goto free_skb; 815 816 memset(skb->data + skb->len, 0, pad); 817 return 0; 818 819free_skb: 820 kfree_skb(skb); 821 return err; 822} 823 824/* Trims skb to length len. It can change skb pointers. 825 */ 826 827int ___pskb_trim(struct sk_buff *skb, unsigned int len) 828{ 829 struct sk_buff **fragp; 830 struct sk_buff *frag; 831 int offset = skb_headlen(skb); 832 int nfrags = skb_shinfo(skb)->nr_frags; 833 int i; 834 int err; 835 836 if (skb_cloned(skb) && 837 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 838 return err; 839 840 i = 0; 841 if (offset >= len) 842 goto drop_pages; 843 844 for (; i < nfrags; i++) { 845 int end = offset + skb_shinfo(skb)->frags[i].size; 846 847 if (end < len) { 848 offset = end; 849 continue; 850 } 851 852 skb_shinfo(skb)->frags[i++].size = len - offset; 853 854drop_pages: 855 skb_shinfo(skb)->nr_frags = i; 856 857 for (; i < nfrags; i++) 858 put_page(skb_shinfo(skb)->frags[i].page); 859 860 if (skb_shinfo(skb)->frag_list) 861 skb_drop_fraglist(skb); 862 goto done; 863 } 864 865 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 866 fragp = &frag->next) { 867 int end = offset + frag->len; 868 869 if (skb_shared(frag)) { 870 struct sk_buff *nfrag; 871 872 nfrag = skb_clone(frag, GFP_ATOMIC); 873 if (unlikely(!nfrag)) 874 return -ENOMEM; 875 876 nfrag->next = frag->next; 877 kfree_skb(frag); 878 frag = nfrag; 879 *fragp = frag; 880 } 881 882 if (end < len) { 883 offset = end; 884 continue; 885 } 886 887 if (end > len && 888 unlikely((err = pskb_trim(frag, len - offset)))) 889 return err; 890 891 if (frag->next) 892 skb_drop_list(&frag->next); 893 break; 894 } 895 896done: 897 if (len > skb_headlen(skb)) { 898 skb->data_len -= skb->len - len; 899 skb->len = len; 900 } else { 901 skb->len = len; 902 skb->data_len = 0; 903 skb_set_tail_pointer(skb, len); 904 } 905 906 return 0; 907} 908 909/** 910 * __pskb_pull_tail - advance tail of skb header 911 * @skb: buffer to reallocate 912 * @delta: number of bytes to advance tail 913 * 914 * The function makes a sense only on a fragmented &sk_buff, 915 * it expands header moving its tail forward and copying necessary 916 * data from fragmented part. 917 * 918 * &sk_buff MUST have reference count of 1. 919 * 920 * Returns %NULL (and &sk_buff does not change) if pull failed 921 * or value of new tail of skb in the case of success. 922 * 923 * All the pointers pointing into skb header may change and must be 924 * reloaded after call to this function. 925 */ 926 927/* Moves tail of skb head forward, copying data from fragmented part, 928 * when it is necessary. 929 * 1. It may fail due to malloc failure. 930 * 2. It may change skb pointers. 931 * 932 * It is pretty complicated. Luckily, it is called only in exceptional cases. 933 */ 934unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 935{ 936 /* If skb has not enough free space at tail, get new one 937 * plus 128 bytes for future expansions. If we have enough 938 * room at tail, reallocate without expansion only if skb is cloned. 939 */ 940 int i, k, eat = (skb->tail + delta) - skb->end; 941 942 if (eat > 0 || skb_cloned(skb)) { 943 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 944 GFP_ATOMIC)) 945 return NULL; 946 } 947 948 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 949 BUG(); 950 951 /* Optimization: no fragments, no reasons to preestimate 952 * size of pulled pages. Superb. 953 */ 954 if (!skb_shinfo(skb)->frag_list) 955 goto pull_pages; 956 957 /* Estimate size of pulled pages. */ 958 eat = delta; 959 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 960 if (skb_shinfo(skb)->frags[i].size >= eat) 961 goto pull_pages; 962 eat -= skb_shinfo(skb)->frags[i].size; 963 } 964 965 /* If we need update frag list, we are in troubles. 966 * Certainly, it possible to add an offset to skb data, 967 * but taking into account that pulling is expected to 968 * be very rare operation, it is worth to fight against 969 * further bloating skb head and crucify ourselves here instead. 970 * Pure masohism, indeed. 8)8) 971 */ 972 if (eat) { 973 struct sk_buff *list = skb_shinfo(skb)->frag_list; 974 struct sk_buff *clone = NULL; 975 struct sk_buff *insp = NULL; 976 977 do { 978 BUG_ON(!list); 979 980 if (list->len <= eat) { 981 /* Eaten as whole. */ 982 eat -= list->len; 983 list = list->next; 984 insp = list; 985 } else { 986 /* Eaten partially. */ 987 988 if (skb_shared(list)) { 989 /* Sucks! We need to fork list. :-( */ 990 clone = skb_clone(list, GFP_ATOMIC); 991 if (!clone) 992 return NULL; 993 insp = list->next; 994 list = clone; 995 } else { 996 /* This may be pulled without 997 * problems. */ 998 insp = list; 999 } 1000 if (!pskb_pull(list, eat)) { 1001 if (clone) 1002 kfree_skb(clone); 1003 return NULL; 1004 } 1005 break; 1006 } 1007 } while (eat); 1008 1009 /* Free pulled out fragments. */ 1010 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1011 skb_shinfo(skb)->frag_list = list->next; 1012 kfree_skb(list); 1013 } 1014 /* And insert new clone at head. */ 1015 if (clone) { 1016 clone->next = list; 1017 skb_shinfo(skb)->frag_list = clone; 1018 } 1019 } 1020 /* Success! Now we may commit changes to skb data. */ 1021 1022pull_pages: 1023 eat = delta; 1024 k = 0; 1025 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1026 if (skb_shinfo(skb)->frags[i].size <= eat) { 1027 put_page(skb_shinfo(skb)->frags[i].page); 1028 eat -= skb_shinfo(skb)->frags[i].size; 1029 } else { 1030 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1031 if (eat) { 1032 skb_shinfo(skb)->frags[k].page_offset += eat; 1033 skb_shinfo(skb)->frags[k].size -= eat; 1034 eat = 0; 1035 } 1036 k++; 1037 } 1038 } 1039 skb_shinfo(skb)->nr_frags = k; 1040 1041 skb->tail += delta; 1042 skb->data_len -= delta; 1043 1044 return skb_tail_pointer(skb); 1045} 1046 1047/* Copy some data bits from skb to kernel buffer. */ 1048 1049int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1050{ 1051 int i, copy; 1052 int start = skb_headlen(skb); 1053 1054 if (offset > (int)skb->len - len) 1055 goto fault; 1056 1057 /* Copy header. */ 1058 if ((copy = start - offset) > 0) { 1059 if (copy > len) 1060 copy = len; 1061 skb_copy_from_linear_data_offset(skb, offset, to, copy); 1062 if ((len -= copy) == 0) 1063 return 0; 1064 offset += copy; 1065 to += copy; 1066 } 1067 1068 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1069 int end; 1070 1071 BUG_TRAP(start <= offset + len); 1072 1073 end = start + skb_shinfo(skb)->frags[i].size; 1074 if ((copy = end - offset) > 0) { 1075 u8 *vaddr; 1076 1077 if (copy > len) 1078 copy = len; 1079 1080 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1081 memcpy(to, 1082 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1083 offset - start, copy); 1084 kunmap_skb_frag(vaddr); 1085 1086 if ((len -= copy) == 0) 1087 return 0; 1088 offset += copy; 1089 to += copy; 1090 } 1091 start = end; 1092 } 1093 1094 if (skb_shinfo(skb)->frag_list) { 1095 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1096 1097 for (; list; list = list->next) { 1098 int end; 1099 1100 BUG_TRAP(start <= offset + len); 1101 1102 end = start + list->len; 1103 if ((copy = end - offset) > 0) { 1104 if (copy > len) 1105 copy = len; 1106 if (skb_copy_bits(list, offset - start, 1107 to, copy)) 1108 goto fault; 1109 if ((len -= copy) == 0) 1110 return 0; 1111 offset += copy; 1112 to += copy; 1113 } 1114 start = end; 1115 } 1116 } 1117 if (!len) 1118 return 0; 1119 1120fault: 1121 return -EFAULT; 1122} 1123 1124/** 1125 * skb_store_bits - store bits from kernel buffer to skb 1126 * @skb: destination buffer 1127 * @offset: offset in destination 1128 * @from: source buffer 1129 * @len: number of bytes to copy 1130 * 1131 * Copy the specified number of bytes from the source buffer to the 1132 * destination skb. This function handles all the messy bits of 1133 * traversing fragment lists and such. 1134 */ 1135 1136int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) 1137{ 1138 int i, copy; 1139 int start = skb_headlen(skb); 1140 1141 if (offset > (int)skb->len - len) 1142 goto fault; 1143 1144 if ((copy = start - offset) > 0) { 1145 if (copy > len) 1146 copy = len; 1147 skb_copy_to_linear_data_offset(skb, offset, from, copy); 1148 if ((len -= copy) == 0) 1149 return 0; 1150 offset += copy; 1151 from += copy; 1152 } 1153 1154 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1155 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1156 int end; 1157 1158 BUG_TRAP(start <= offset + len); 1159 1160 end = start + frag->size; 1161 if ((copy = end - offset) > 0) { 1162 u8 *vaddr; 1163 1164 if (copy > len) 1165 copy = len; 1166 1167 vaddr = kmap_skb_frag(frag); 1168 memcpy(vaddr + frag->page_offset + offset - start, 1169 from, copy); 1170 kunmap_skb_frag(vaddr); 1171 1172 if ((len -= copy) == 0) 1173 return 0; 1174 offset += copy; 1175 from += copy; 1176 } 1177 start = end; 1178 } 1179 1180 if (skb_shinfo(skb)->frag_list) { 1181 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1182 1183 for (; list; list = list->next) { 1184 int end; 1185 1186 BUG_TRAP(start <= offset + len); 1187 1188 end = start + list->len; 1189 if ((copy = end - offset) > 0) { 1190 if (copy > len) 1191 copy = len; 1192 if (skb_store_bits(list, offset - start, 1193 from, copy)) 1194 goto fault; 1195 if ((len -= copy) == 0) 1196 return 0; 1197 offset += copy; 1198 from += copy; 1199 } 1200 start = end; 1201 } 1202 } 1203 if (!len) 1204 return 0; 1205 1206fault: 1207 return -EFAULT; 1208} 1209 1210EXPORT_SYMBOL(skb_store_bits); 1211 1212/* Checksum skb data. */ 1213 1214__wsum skb_checksum(const struct sk_buff *skb, int offset, 1215 int len, __wsum csum) 1216{ 1217 int start = skb_headlen(skb); 1218 int i, copy = start - offset; 1219 int pos = 0; 1220 1221 /* Checksum header. */ 1222 if (copy > 0) { 1223 if (copy > len) 1224 copy = len; 1225 csum = csum_partial(skb->data + offset, copy, csum); 1226 if ((len -= copy) == 0) 1227 return csum; 1228 offset += copy; 1229 pos = copy; 1230 } 1231 1232 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1233 int end; 1234 1235 BUG_TRAP(start <= offset + len); 1236 1237 end = start + skb_shinfo(skb)->frags[i].size; 1238 if ((copy = end - offset) > 0) { 1239 __wsum csum2; 1240 u8 *vaddr; 1241 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1242 1243 if (copy > len) 1244 copy = len; 1245 vaddr = kmap_skb_frag(frag); 1246 csum2 = csum_partial(vaddr + frag->page_offset + 1247 offset - start, copy, 0); 1248 kunmap_skb_frag(vaddr); 1249 csum = csum_block_add(csum, csum2, pos); 1250 if (!(len -= copy)) 1251 return csum; 1252 offset += copy; 1253 pos += copy; 1254 } 1255 start = end; 1256 } 1257 1258 if (skb_shinfo(skb)->frag_list) { 1259 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1260 1261 for (; list; list = list->next) { 1262 int end; 1263 1264 BUG_TRAP(start <= offset + len); 1265 1266 end = start + list->len; 1267 if ((copy = end - offset) > 0) { 1268 __wsum csum2; 1269 if (copy > len) 1270 copy = len; 1271 csum2 = skb_checksum(list, offset - start, 1272 copy, 0); 1273 csum = csum_block_add(csum, csum2, pos); 1274 if ((len -= copy) == 0) 1275 return csum; 1276 offset += copy; 1277 pos += copy; 1278 } 1279 start = end; 1280 } 1281 } 1282 BUG_ON(len); 1283 1284 return csum; 1285} 1286 1287/* Both of above in one bottle. */ 1288 1289__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1290 u8 *to, int len, __wsum csum) 1291{ 1292 int start = skb_headlen(skb); 1293 int i, copy = start - offset; 1294 int pos = 0; 1295 1296 /* Copy header. */ 1297 if (copy > 0) { 1298 if (copy > len) 1299 copy = len; 1300 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1301 copy, csum); 1302 if ((len -= copy) == 0) 1303 return csum; 1304 offset += copy; 1305 to += copy; 1306 pos = copy; 1307 } 1308 1309 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1310 int end; 1311 1312 BUG_TRAP(start <= offset + len); 1313 1314 end = start + skb_shinfo(skb)->frags[i].size; 1315 if ((copy = end - offset) > 0) { 1316 __wsum csum2; 1317 u8 *vaddr; 1318 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1319 1320 if (copy > len) 1321 copy = len; 1322 vaddr = kmap_skb_frag(frag); 1323 csum2 = csum_partial_copy_nocheck(vaddr + 1324 frag->page_offset + 1325 offset - start, to, 1326 copy, 0); 1327 kunmap_skb_frag(vaddr); 1328 csum = csum_block_add(csum, csum2, pos); 1329 if (!(len -= copy)) 1330 return csum; 1331 offset += copy; 1332 to += copy; 1333 pos += copy; 1334 } 1335 start = end; 1336 } 1337 1338 if (skb_shinfo(skb)->frag_list) { 1339 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1340 1341 for (; list; list = list->next) { 1342 __wsum csum2; 1343 int end; 1344 1345 BUG_TRAP(start <= offset + len); 1346 1347 end = start + list->len; 1348 if ((copy = end - offset) > 0) { 1349 if (copy > len) 1350 copy = len; 1351 csum2 = skb_copy_and_csum_bits(list, 1352 offset - start, 1353 to, copy, 0); 1354 csum = csum_block_add(csum, csum2, pos); 1355 if ((len -= copy) == 0) 1356 return csum; 1357 offset += copy; 1358 to += copy; 1359 pos += copy; 1360 } 1361 start = end; 1362 } 1363 } 1364 BUG_ON(len); 1365 return csum; 1366} 1367 1368void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1369{ 1370 __wsum csum; 1371 long csstart; 1372 1373 if (skb->ip_summed == CHECKSUM_PARTIAL) 1374 csstart = skb->csum_start - skb_headroom(skb); 1375 else 1376 csstart = skb_headlen(skb); 1377 1378 BUG_ON(csstart > skb_headlen(skb)); 1379 1380 skb_copy_from_linear_data(skb, to, csstart); 1381 1382 csum = 0; 1383 if (csstart != skb->len) 1384 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 1385 skb->len - csstart, 0); 1386 1387 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1388 long csstuff = csstart + skb->csum_offset; 1389 1390 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 1391 } 1392} 1393 1394/** 1395 * skb_dequeue - remove from the head of the queue 1396 * @list: list to dequeue from 1397 * 1398 * Remove the head of the list. The list lock is taken so the function 1399 * may be used safely with other locking list functions. The head item is 1400 * returned or %NULL if the list is empty. 1401 */ 1402 1403struct sk_buff *skb_dequeue(struct sk_buff_head *list) 1404{ 1405 unsigned long flags; 1406 struct sk_buff *result; 1407 1408 spin_lock_irqsave(&list->lock, flags); 1409 result = __skb_dequeue(list); 1410 spin_unlock_irqrestore(&list->lock, flags); 1411 return result; 1412} 1413 1414/** 1415 * skb_dequeue_tail - remove from the tail of the queue 1416 * @list: list to dequeue from 1417 * 1418 * Remove the tail of the list. The list lock is taken so the function 1419 * may be used safely with other locking list functions. The tail item is 1420 * returned or %NULL if the list is empty. 1421 */ 1422struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 1423{ 1424 unsigned long flags; 1425 struct sk_buff *result; 1426 1427 spin_lock_irqsave(&list->lock, flags); 1428 result = __skb_dequeue_tail(list); 1429 spin_unlock_irqrestore(&list->lock, flags); 1430 return result; 1431} 1432 1433/** 1434 * skb_queue_purge - empty a list 1435 * @list: list to empty 1436 * 1437 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1438 * the list and one reference dropped. This function takes the list 1439 * lock and is atomic with respect to other list locking functions. 1440 */ 1441void skb_queue_purge(struct sk_buff_head *list) 1442{ 1443 struct sk_buff *skb; 1444 while ((skb = skb_dequeue(list)) != NULL) 1445 kfree_skb(skb); 1446} 1447 1448/** 1449 * skb_queue_head - queue a buffer at the list head 1450 * @list: list to use 1451 * @newsk: buffer to queue 1452 * 1453 * Queue a buffer at the start of the list. This function takes the 1454 * list lock and can be used safely with other locking &sk_buff functions 1455 * safely. 1456 * 1457 * A buffer cannot be placed on two lists at the same time. 1458 */ 1459void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 1460{ 1461 unsigned long flags; 1462 1463 spin_lock_irqsave(&list->lock, flags); 1464 __skb_queue_head(list, newsk); 1465 spin_unlock_irqrestore(&list->lock, flags); 1466} 1467 1468/** 1469 * skb_queue_tail - queue a buffer at the list tail 1470 * @list: list to use 1471 * @newsk: buffer to queue 1472 * 1473 * Queue a buffer at the tail of the list. This function takes the 1474 * list lock and can be used safely with other locking &sk_buff functions 1475 * safely. 1476 * 1477 * A buffer cannot be placed on two lists at the same time. 1478 */ 1479void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 1480{ 1481 unsigned long flags; 1482 1483 spin_lock_irqsave(&list->lock, flags); 1484 __skb_queue_tail(list, newsk); 1485 spin_unlock_irqrestore(&list->lock, flags); 1486} 1487 1488/** 1489 * skb_unlink - remove a buffer from a list 1490 * @skb: buffer to remove 1491 * @list: list to use 1492 * 1493 * Remove a packet from a list. The list locks are taken and this 1494 * function is atomic with respect to other list locked calls 1495 * 1496 * You must know what list the SKB is on. 1497 */ 1498void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1499{ 1500 unsigned long flags; 1501 1502 spin_lock_irqsave(&list->lock, flags); 1503 __skb_unlink(skb, list); 1504 spin_unlock_irqrestore(&list->lock, flags); 1505} 1506 1507/** 1508 * skb_append - append a buffer 1509 * @old: buffer to insert after 1510 * @newsk: buffer to insert 1511 * @list: list to use 1512 * 1513 * Place a packet after a given packet in a list. The list locks are taken 1514 * and this function is atomic with respect to other list locked calls. 1515 * A buffer cannot be placed on two lists at the same time. 1516 */ 1517void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1518{ 1519 unsigned long flags; 1520 1521 spin_lock_irqsave(&list->lock, flags); 1522 __skb_append(old, newsk, list); 1523 spin_unlock_irqrestore(&list->lock, flags); 1524} 1525 1526 1527/** 1528 * skb_insert - insert a buffer 1529 * @old: buffer to insert before 1530 * @newsk: buffer to insert 1531 * @list: list to use 1532 * 1533 * Place a packet before a given packet in a list. The list locks are 1534 * taken and this function is atomic with respect to other list locked 1535 * calls. 1536 * 1537 * A buffer cannot be placed on two lists at the same time. 1538 */ 1539void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1540{ 1541 unsigned long flags; 1542 1543 spin_lock_irqsave(&list->lock, flags); 1544 __skb_insert(newsk, old->prev, old, list); 1545 spin_unlock_irqrestore(&list->lock, flags); 1546} 1547 1548static inline void skb_split_inside_header(struct sk_buff *skb, 1549 struct sk_buff* skb1, 1550 const u32 len, const int pos) 1551{ 1552 int i; 1553 1554 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), 1555 pos - len); 1556 /* And move data appendix as is. */ 1557 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1558 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 1559 1560 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 1561 skb_shinfo(skb)->nr_frags = 0; 1562 skb1->data_len = skb->data_len; 1563 skb1->len += skb1->data_len; 1564 skb->data_len = 0; 1565 skb->len = len; 1566 skb_set_tail_pointer(skb, len); 1567} 1568 1569static inline void skb_split_no_header(struct sk_buff *skb, 1570 struct sk_buff* skb1, 1571 const u32 len, int pos) 1572{ 1573 int i, k = 0; 1574 const int nfrags = skb_shinfo(skb)->nr_frags; 1575 1576 skb_shinfo(skb)->nr_frags = 0; 1577 skb1->len = skb1->data_len = skb->len - len; 1578 skb->len = len; 1579 skb->data_len = len - pos; 1580 1581 for (i = 0; i < nfrags; i++) { 1582 int size = skb_shinfo(skb)->frags[i].size; 1583 1584 if (pos + size > len) { 1585 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 1586 1587 if (pos < len) { 1588 /* Split frag. 1589 * We have two variants in this case: 1590 * 1. Move all the frag to the second 1591 * part, if it is possible. F.e. 1592 * this approach is mandatory for TUX, 1593 * where splitting is expensive. 1594 * 2. Split is accurately. We make this. 1595 */ 1596 get_page(skb_shinfo(skb)->frags[i].page); 1597 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 1598 skb_shinfo(skb1)->frags[0].size -= len - pos; 1599 skb_shinfo(skb)->frags[i].size = len - pos; 1600 skb_shinfo(skb)->nr_frags++; 1601 } 1602 k++; 1603 } else 1604 skb_shinfo(skb)->nr_frags++; 1605 pos += size; 1606 } 1607 skb_shinfo(skb1)->nr_frags = k; 1608} 1609 1610/** 1611 * skb_split - Split fragmented skb to two parts at length len. 1612 * @skb: the buffer to split 1613 * @skb1: the buffer to receive the second part 1614 * @len: new length for skb 1615 */ 1616void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 1617{ 1618 int pos = skb_headlen(skb); 1619 1620 if (len < pos) /* Split line is inside header. */ 1621 skb_split_inside_header(skb, skb1, len, pos); 1622 else /* Second chunk has no header, nothing to copy. */ 1623 skb_split_no_header(skb, skb1, len, pos); 1624} 1625 1626/** 1627 * skb_prepare_seq_read - Prepare a sequential read of skb data 1628 * @skb: the buffer to read 1629 * @from: lower offset of data to be read 1630 * @to: upper offset of data to be read 1631 * @st: state variable 1632 * 1633 * Initializes the specified state variable. Must be called before 1634 * invoking skb_seq_read() for the first time. 1635 */ 1636void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 1637 unsigned int to, struct skb_seq_state *st) 1638{ 1639 st->lower_offset = from; 1640 st->upper_offset = to; 1641 st->root_skb = st->cur_skb = skb; 1642 st->frag_idx = st->stepped_offset = 0; 1643 st->frag_data = NULL; 1644} 1645 1646/** 1647 * skb_seq_read - Sequentially read skb data 1648 * @consumed: number of bytes consumed by the caller so far 1649 * @data: destination pointer for data to be returned 1650 * @st: state variable 1651 * 1652 * Reads a block of skb data at &consumed relative to the 1653 * lower offset specified to skb_prepare_seq_read(). Assigns 1654 * the head of the data block to &data and returns the length 1655 * of the block or 0 if the end of the skb data or the upper 1656 * offset has been reached. 1657 * 1658 * The caller is not required to consume all of the data 1659 * returned, i.e. &consumed is typically set to the number 1660 * of bytes already consumed and the next call to 1661 * skb_seq_read() will return the remaining part of the block. 1662 * 1663 * Note: The size of each block of data returned can be arbitary, 1664 * this limitation is the cost for zerocopy seqeuental 1665 * reads of potentially non linear data. 1666 * 1667 * Note: Fragment lists within fragments are not implemented 1668 * at the moment, state->root_skb could be replaced with 1669 * a stack for this purpose. 1670 */ 1671unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 1672 struct skb_seq_state *st) 1673{ 1674 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 1675 skb_frag_t *frag; 1676 1677 if (unlikely(abs_offset >= st->upper_offset)) 1678 return 0; 1679 1680next_skb: 1681 block_limit = skb_headlen(st->cur_skb); 1682 1683 if (abs_offset < block_limit) { 1684 *data = st->cur_skb->data + abs_offset; 1685 return block_limit - abs_offset; 1686 } 1687 1688 if (st->frag_idx == 0 && !st->frag_data) 1689 st->stepped_offset += skb_headlen(st->cur_skb); 1690 1691 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 1692 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 1693 block_limit = frag->size + st->stepped_offset; 1694 1695 if (abs_offset < block_limit) { 1696 if (!st->frag_data) 1697 st->frag_data = kmap_skb_frag(frag); 1698 1699 *data = (u8 *) st->frag_data + frag->page_offset + 1700 (abs_offset - st->stepped_offset); 1701 1702 return block_limit - abs_offset; 1703 } 1704 1705 if (st->frag_data) { 1706 kunmap_skb_frag(st->frag_data); 1707 st->frag_data = NULL; 1708 } 1709 1710 st->frag_idx++; 1711 st->stepped_offset += frag->size; 1712 } 1713 1714 if (st->frag_data) { 1715 kunmap_skb_frag(st->frag_data); 1716 st->frag_data = NULL; 1717 } 1718 1719 if (st->cur_skb->next) { 1720 st->cur_skb = st->cur_skb->next; 1721 st->frag_idx = 0; 1722 goto next_skb; 1723 } else if (st->root_skb == st->cur_skb && 1724 skb_shinfo(st->root_skb)->frag_list) { 1725 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 1726 goto next_skb; 1727 } 1728 1729 return 0; 1730} 1731 1732/** 1733 * skb_abort_seq_read - Abort a sequential read of skb data 1734 * @st: state variable 1735 * 1736 * Must be called if skb_seq_read() was not called until it 1737 * returned 0. 1738 */ 1739void skb_abort_seq_read(struct skb_seq_state *st) 1740{ 1741 if (st->frag_data) 1742 kunmap_skb_frag(st->frag_data); 1743} 1744 1745#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 1746 1747static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 1748 struct ts_config *conf, 1749 struct ts_state *state) 1750{ 1751 return skb_seq_read(offset, text, TS_SKB_CB(state)); 1752} 1753 1754static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 1755{ 1756 skb_abort_seq_read(TS_SKB_CB(state)); 1757} 1758 1759/** 1760 * skb_find_text - Find a text pattern in skb data 1761 * @skb: the buffer to look in 1762 * @from: search offset 1763 * @to: search limit 1764 * @config: textsearch configuration 1765 * @state: uninitialized textsearch state variable 1766 * 1767 * Finds a pattern in the skb data according to the specified 1768 * textsearch configuration. Use textsearch_next() to retrieve 1769 * subsequent occurrences of the pattern. Returns the offset 1770 * to the first occurrence or UINT_MAX if no match was found. 1771 */ 1772unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 1773 unsigned int to, struct ts_config *config, 1774 struct ts_state *state) 1775{ 1776 unsigned int ret; 1777 1778 config->get_next_block = skb_ts_get_next_block; 1779 config->finish = skb_ts_finish; 1780 1781 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 1782 1783 ret = textsearch_find(config, state); 1784 return (ret <= to - from ? ret : UINT_MAX); 1785} 1786 1787/** 1788 * skb_append_datato_frags: - append the user data to a skb 1789 * @sk: sock structure 1790 * @skb: skb structure to be appened with user data. 1791 * @getfrag: call back function to be used for getting the user data 1792 * @from: pointer to user message iov 1793 * @length: length of the iov message 1794 * 1795 * Description: This procedure append the user data in the fragment part 1796 * of the skb if any page alloc fails user this procedure returns -ENOMEM 1797 */ 1798int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 1799 int (*getfrag)(void *from, char *to, int offset, 1800 int len, int odd, struct sk_buff *skb), 1801 void *from, int length) 1802{ 1803 int frg_cnt = 0; 1804 skb_frag_t *frag = NULL; 1805 struct page *page = NULL; 1806 int copy, left; 1807 int offset = 0; 1808 int ret; 1809 1810 do { 1811 /* Return error if we don't have space for new frag */ 1812 frg_cnt = skb_shinfo(skb)->nr_frags; 1813 if (frg_cnt >= MAX_SKB_FRAGS) 1814 return -EFAULT; 1815 1816 /* allocate a new page for next frag */ 1817 page = alloc_pages(sk->sk_allocation, 0); 1818 1819 /* If alloc_page fails just return failure and caller will 1820 * free previous allocated pages by doing kfree_skb() 1821 */ 1822 if (page == NULL) 1823 return -ENOMEM; 1824 1825 /* initialize the next frag */ 1826 sk->sk_sndmsg_page = page; 1827 sk->sk_sndmsg_off = 0; 1828 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 1829 skb->truesize += PAGE_SIZE; 1830 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 1831 1832 /* get the new initialized frag */ 1833 frg_cnt = skb_shinfo(skb)->nr_frags; 1834 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 1835 1836 /* copy the user data to page */ 1837 left = PAGE_SIZE - frag->page_offset; 1838 copy = (length > left)? left : length; 1839 1840 ret = getfrag(from, (page_address(frag->page) + 1841 frag->page_offset + frag->size), 1842 offset, copy, 0, skb); 1843 if (ret < 0) 1844 return -EFAULT; 1845 1846 /* copy was successful so update the size parameters */ 1847 sk->sk_sndmsg_off += copy; 1848 frag->size += copy; 1849 skb->len += copy; 1850 skb->data_len += copy; 1851 offset += copy; 1852 length -= copy; 1853 1854 } while (length > 0); 1855 1856 return 0; 1857} 1858 1859/** 1860 * skb_pull_rcsum - pull skb and update receive checksum 1861 * @skb: buffer to update 1862 * @start: start of data before pull 1863 * @len: length of data pulled 1864 * 1865 * This function performs an skb_pull on the packet and updates 1866 * update the CHECKSUM_COMPLETE checksum. It should be used on 1867 * receive path processing instead of skb_pull unless you know 1868 * that the checksum difference is zero (e.g., a valid IP header) 1869 * or you are setting ip_summed to CHECKSUM_NONE. 1870 */ 1871unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 1872{ 1873 BUG_ON(len > skb->len); 1874 skb->len -= len; 1875 BUG_ON(skb->len < skb->data_len); 1876 skb_postpull_rcsum(skb, skb->data, len); 1877 return skb->data += len; 1878} 1879 1880EXPORT_SYMBOL_GPL(skb_pull_rcsum); 1881 1882/** 1883 * skb_segment - Perform protocol segmentation on skb. 1884 * @skb: buffer to segment 1885 * @features: features for the output path (see dev->features) 1886 * 1887 * This function performs segmentation on the given skb. It returns 1888 * the segment at the given position. It returns NULL if there are 1889 * no more segments to generate, or when an error is encountered. 1890 */ 1891struct sk_buff *skb_segment(struct sk_buff *skb, int features) 1892{ 1893 struct sk_buff *segs = NULL; 1894 struct sk_buff *tail = NULL; 1895 unsigned int mss = skb_shinfo(skb)->gso_size; 1896 unsigned int doffset = skb->data - skb_mac_header(skb); 1897 unsigned int offset = doffset; 1898 unsigned int headroom; 1899 unsigned int len; 1900 int sg = features & NETIF_F_SG; 1901 int nfrags = skb_shinfo(skb)->nr_frags; 1902 int err = -ENOMEM; 1903 int i = 0; 1904 int pos; 1905 1906 __skb_push(skb, doffset); 1907 headroom = skb_headroom(skb); 1908 pos = skb_headlen(skb); 1909 1910 do { 1911 struct sk_buff *nskb; 1912 skb_frag_t *frag; 1913 int hsize; 1914 int k; 1915 int size; 1916 1917 len = skb->len - offset; 1918 if (len > mss) 1919 len = mss; 1920 1921 hsize = skb_headlen(skb) - offset; 1922 if (hsize < 0) 1923 hsize = 0; 1924 if (hsize > len || !sg) 1925 hsize = len; 1926 1927 nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC); 1928 if (unlikely(!nskb)) 1929 goto err; 1930 1931 if (segs) 1932 tail->next = nskb; 1933 else 1934 segs = nskb; 1935 tail = nskb; 1936 1937 nskb->dev = skb->dev; 1938 skb_copy_queue_mapping(nskb, skb); 1939 nskb->priority = skb->priority; 1940 nskb->protocol = skb->protocol; 1941 nskb->dst = dst_clone(skb->dst); 1942 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 1943 nskb->pkt_type = skb->pkt_type; 1944 nskb->mac_len = skb->mac_len; 1945 1946 skb_reserve(nskb, headroom); 1947 skb_reset_mac_header(nskb); 1948 skb_set_network_header(nskb, skb->mac_len); 1949 nskb->transport_header = (nskb->network_header + 1950 skb_network_header_len(skb)); 1951 skb_copy_from_linear_data(skb, skb_put(nskb, doffset), 1952 doffset); 1953 if (!sg) { 1954 nskb->csum = skb_copy_and_csum_bits(skb, offset, 1955 skb_put(nskb, len), 1956 len, 0); 1957 continue; 1958 } 1959 1960 frag = skb_shinfo(nskb)->frags; 1961 k = 0; 1962 1963 nskb->ip_summed = CHECKSUM_PARTIAL; 1964 nskb->csum = skb->csum; 1965 skb_copy_from_linear_data_offset(skb, offset, 1966 skb_put(nskb, hsize), hsize); 1967 1968 while (pos < offset + len) { 1969 BUG_ON(i >= nfrags); 1970 1971 *frag = skb_shinfo(skb)->frags[i]; 1972 get_page(frag->page); 1973 size = frag->size; 1974 1975 if (pos < offset) { 1976 frag->page_offset += offset - pos; 1977 frag->size -= offset - pos; 1978 } 1979 1980 k++; 1981 1982 if (pos + size <= offset + len) { 1983 i++; 1984 pos += size; 1985 } else { 1986 frag->size -= pos + size - (offset + len); 1987 break; 1988 } 1989 1990 frag++; 1991 } 1992 1993 skb_shinfo(nskb)->nr_frags = k; 1994 nskb->data_len = len - hsize; 1995 nskb->len += nskb->data_len; 1996 nskb->truesize += nskb->data_len; 1997 } while ((offset += len) < skb->len); 1998 1999 return segs; 2000 2001err: 2002 while ((skb = segs)) { 2003 segs = skb->next; 2004 kfree_skb(skb); 2005 } 2006 return ERR_PTR(err); 2007} 2008 2009EXPORT_SYMBOL_GPL(skb_segment); 2010 2011void __init skb_init(void) 2012{ 2013 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 2014 sizeof(struct sk_buff), 2015 0, 2016 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2017 NULL); 2018 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2019 (2*sizeof(struct sk_buff)) + 2020 sizeof(atomic_t), 2021 0, 2022 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2023 NULL); 2024} 2025 2026/** 2027 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer 2028 * @skb: Socket buffer containing the buffers to be mapped 2029 * @sg: The scatter-gather list to map into 2030 * @offset: The offset into the buffer's contents to start mapping 2031 * @len: Length of buffer space to be mapped 2032 * 2033 * Fill the specified scatter-gather list with mappings/pointers into a 2034 * region of the buffer space attached to a socket buffer. 2035 */ 2036static int 2037__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2038{ 2039 int start = skb_headlen(skb); 2040 int i, copy = start - offset; 2041 int elt = 0; 2042 2043 if (copy > 0) { 2044 if (copy > len) 2045 copy = len; 2046 sg_set_buf(sg, skb->data + offset, copy); 2047 elt++; 2048 if ((len -= copy) == 0) 2049 return elt; 2050 offset += copy; 2051 } 2052 2053 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2054 int end; 2055 2056 BUG_TRAP(start <= offset + len); 2057 2058 end = start + skb_shinfo(skb)->frags[i].size; 2059 if ((copy = end - offset) > 0) { 2060 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2061 2062 if (copy > len) 2063 copy = len; 2064 sg_set_page(&sg[elt], frag->page, copy, 2065 frag->page_offset+offset-start); 2066 elt++; 2067 if (!(len -= copy)) 2068 return elt; 2069 offset += copy; 2070 } 2071 start = end; 2072 } 2073 2074 if (skb_shinfo(skb)->frag_list) { 2075 struct sk_buff *list = skb_shinfo(skb)->frag_list; 2076 2077 for (; list; list = list->next) { 2078 int end; 2079 2080 BUG_TRAP(start <= offset + len); 2081 2082 end = start + list->len; 2083 if ((copy = end - offset) > 0) { 2084 if (copy > len) 2085 copy = len; 2086 elt += __skb_to_sgvec(list, sg+elt, offset - start, 2087 copy); 2088 if ((len -= copy) == 0) 2089 return elt; 2090 offset += copy; 2091 } 2092 start = end; 2093 } 2094 } 2095 BUG_ON(len); 2096 return elt; 2097} 2098 2099int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) 2100{ 2101 int nsg = __skb_to_sgvec(skb, sg, offset, len); 2102 2103 sg_mark_end(&sg[nsg - 1]); 2104 2105 return nsg; 2106} 2107 2108/** 2109 * skb_cow_data - Check that a socket buffer's data buffers are writable 2110 * @skb: The socket buffer to check. 2111 * @tailbits: Amount of trailing space to be added 2112 * @trailer: Returned pointer to the skb where the @tailbits space begins 2113 * 2114 * Make sure that the data buffers attached to a socket buffer are 2115 * writable. If they are not, private copies are made of the data buffers 2116 * and the socket buffer is set to use these instead. 2117 * 2118 * If @tailbits is given, make sure that there is space to write @tailbits 2119 * bytes of data beyond current end of socket buffer. @trailer will be 2120 * set to point to the skb in which this space begins. 2121 * 2122 * The number of scatterlist elements required to completely map the 2123 * COW'd and extended socket buffer will be returned. 2124 */ 2125int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) 2126{ 2127 int copyflag; 2128 int elt; 2129 struct sk_buff *skb1, **skb_p; 2130 2131 /* If skb is cloned or its head is paged, reallocate 2132 * head pulling out all the pages (pages are considered not writable 2133 * at the moment even if they are anonymous). 2134 */ 2135 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && 2136 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) 2137 return -ENOMEM; 2138 2139 /* Easy case. Most of packets will go this way. */ 2140 if (!skb_shinfo(skb)->frag_list) { 2141 /* A little of trouble, not enough of space for trailer. 2142 * This should not happen, when stack is tuned to generate 2143 * good frames. OK, on miss we reallocate and reserve even more 2144 * space, 128 bytes is fair. */ 2145 2146 if (skb_tailroom(skb) < tailbits && 2147 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) 2148 return -ENOMEM; 2149 2150 /* Voila! */ 2151 *trailer = skb; 2152 return 1; 2153 } 2154 2155 /* Misery. We are in troubles, going to mincer fragments... */ 2156 2157 elt = 1; 2158 skb_p = &skb_shinfo(skb)->frag_list; 2159 copyflag = 0; 2160 2161 while ((skb1 = *skb_p) != NULL) { 2162 int ntail = 0; 2163 2164 /* The fragment is partially pulled by someone, 2165 * this can happen on input. Copy it and everything 2166 * after it. */ 2167 2168 if (skb_shared(skb1)) 2169 copyflag = 1; 2170 2171 /* If the skb is the last, worry about trailer. */ 2172 2173 if (skb1->next == NULL && tailbits) { 2174 if (skb_shinfo(skb1)->nr_frags || 2175 skb_shinfo(skb1)->frag_list || 2176 skb_tailroom(skb1) < tailbits) 2177 ntail = tailbits + 128; 2178 } 2179 2180 if (copyflag || 2181 skb_cloned(skb1) || 2182 ntail || 2183 skb_shinfo(skb1)->nr_frags || 2184 skb_shinfo(skb1)->frag_list) { 2185 struct sk_buff *skb2; 2186 2187 /* Fuck, we are miserable poor guys... */ 2188 if (ntail == 0) 2189 skb2 = skb_copy(skb1, GFP_ATOMIC); 2190 else 2191 skb2 = skb_copy_expand(skb1, 2192 skb_headroom(skb1), 2193 ntail, 2194 GFP_ATOMIC); 2195 if (unlikely(skb2 == NULL)) 2196 return -ENOMEM; 2197 2198 if (skb1->sk) 2199 skb_set_owner_w(skb2, skb1->sk); 2200 2201 /* Looking around. Are we still alive? 2202 * OK, link new skb, drop old one */ 2203 2204 skb2->next = skb1->next; 2205 *skb_p = skb2; 2206 kfree_skb(skb1); 2207 skb1 = skb2; 2208 } 2209 elt++; 2210 *trailer = skb1; 2211 skb_p = &skb1->next; 2212 } 2213 2214 return elt; 2215} 2216 2217EXPORT_SYMBOL(___pskb_trim); 2218EXPORT_SYMBOL(__kfree_skb); 2219EXPORT_SYMBOL(kfree_skb); 2220EXPORT_SYMBOL(__pskb_pull_tail); 2221EXPORT_SYMBOL(__alloc_skb); 2222EXPORT_SYMBOL(__netdev_alloc_skb); 2223EXPORT_SYMBOL(pskb_copy); 2224EXPORT_SYMBOL(pskb_expand_head); 2225EXPORT_SYMBOL(skb_checksum); 2226EXPORT_SYMBOL(skb_clone); 2227EXPORT_SYMBOL(skb_copy); 2228EXPORT_SYMBOL(skb_copy_and_csum_bits); 2229EXPORT_SYMBOL(skb_copy_and_csum_dev); 2230EXPORT_SYMBOL(skb_copy_bits); 2231EXPORT_SYMBOL(skb_copy_expand); 2232EXPORT_SYMBOL(skb_over_panic); 2233EXPORT_SYMBOL(skb_pad); 2234EXPORT_SYMBOL(skb_realloc_headroom); 2235EXPORT_SYMBOL(skb_under_panic); 2236EXPORT_SYMBOL(skb_dequeue); 2237EXPORT_SYMBOL(skb_dequeue_tail); 2238EXPORT_SYMBOL(skb_insert); 2239EXPORT_SYMBOL(skb_queue_purge); 2240EXPORT_SYMBOL(skb_queue_head); 2241EXPORT_SYMBOL(skb_queue_tail); 2242EXPORT_SYMBOL(skb_unlink); 2243EXPORT_SYMBOL(skb_append); 2244EXPORT_SYMBOL(skb_split); 2245EXPORT_SYMBOL(skb_prepare_seq_read); 2246EXPORT_SYMBOL(skb_seq_read); 2247EXPORT_SYMBOL(skb_abort_seq_read); 2248EXPORT_SYMBOL(skb_find_text); 2249EXPORT_SYMBOL(skb_append_datato_frags); 2250 2251EXPORT_SYMBOL_GPL(skb_to_sgvec); 2252EXPORT_SYMBOL_GPL(skb_cow_data);