Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.19-rc3 2093 lines 52 kB view raw
1/* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $ 8 * 9 * Fixes: 10 * Alan Cox : Fixed the worst of the load 11 * balancer bugs. 12 * Dave Platt : Interrupt stacking fix. 13 * Richard Kooijman : Timestamp fixes. 14 * Alan Cox : Changed buffer format. 15 * Alan Cox : destructor hook for AF_UNIX etc. 16 * Linus Torvalds : Better skb_clone. 17 * Alan Cox : Added skb_copy. 18 * Alan Cox : Added all the changed routines Linus 19 * only put in the headers 20 * Ray VanTassle : Fixed --skb->lock in free 21 * Alan Cox : skb_copy copy arp field 22 * Andi Kleen : slabified it. 23 * Robert Olsson : Removed skb_head_pool 24 * 25 * NOTE: 26 * The __skb_ routines should be called with interrupts 27 * disabled, or you better be *real* sure that the operation is atomic 28 * with respect to whatever list is being frobbed (e.g. via lock_sock() 29 * or via disabling bottom half handlers, etc). 30 * 31 * This program is free software; you can redistribute it and/or 32 * modify it under the terms of the GNU General Public License 33 * as published by the Free Software Foundation; either version 34 * 2 of the License, or (at your option) any later version. 35 */ 36 37/* 38 * The functions in this file will not compile correctly with gcc 2.4.x 39 */ 40 41#include <linux/module.h> 42#include <linux/types.h> 43#include <linux/kernel.h> 44#include <linux/sched.h> 45#include <linux/mm.h> 46#include <linux/interrupt.h> 47#include <linux/in.h> 48#include <linux/inet.h> 49#include <linux/slab.h> 50#include <linux/netdevice.h> 51#ifdef CONFIG_NET_CLS_ACT 52#include <net/pkt_sched.h> 53#endif 54#include <linux/string.h> 55#include <linux/skbuff.h> 56#include <linux/cache.h> 57#include <linux/rtnetlink.h> 58#include <linux/init.h> 59#include <linux/highmem.h> 60 61#include <net/protocol.h> 62#include <net/dst.h> 63#include <net/sock.h> 64#include <net/checksum.h> 65#include <net/xfrm.h> 66 67#include <asm/uaccess.h> 68#include <asm/system.h> 69 70static kmem_cache_t *skbuff_head_cache __read_mostly; 71static kmem_cache_t *skbuff_fclone_cache __read_mostly; 72 73/* 74 * Keep out-of-line to prevent kernel bloat. 75 * __builtin_return_address is not used because it is not always 76 * reliable. 77 */ 78 79/** 80 * skb_over_panic - private function 81 * @skb: buffer 82 * @sz: size 83 * @here: address 84 * 85 * Out of line support code for skb_put(). Not user callable. 86 */ 87void skb_over_panic(struct sk_buff *skb, int sz, void *here) 88{ 89 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 90 "data:%p tail:%p end:%p dev:%s\n", 91 here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, 92 skb->dev ? skb->dev->name : "<NULL>"); 93 BUG(); 94} 95 96/** 97 * skb_under_panic - private function 98 * @skb: buffer 99 * @sz: size 100 * @here: address 101 * 102 * Out of line support code for skb_push(). Not user callable. 103 */ 104 105void skb_under_panic(struct sk_buff *skb, int sz, void *here) 106{ 107 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 108 "data:%p tail:%p end:%p dev:%s\n", 109 here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, 110 skb->dev ? skb->dev->name : "<NULL>"); 111 BUG(); 112} 113 114void skb_truesize_bug(struct sk_buff *skb) 115{ 116 printk(KERN_ERR "SKB BUG: Invalid truesize (%u) " 117 "len=%u, sizeof(sk_buff)=%Zd\n", 118 skb->truesize, skb->len, sizeof(struct sk_buff)); 119} 120EXPORT_SYMBOL(skb_truesize_bug); 121 122/* Allocate a new skbuff. We do this ourselves so we can fill in a few 123 * 'private' fields and also do memory statistics to find all the 124 * [BEEP] leaks. 125 * 126 */ 127 128/** 129 * __alloc_skb - allocate a network buffer 130 * @size: size to allocate 131 * @gfp_mask: allocation mask 132 * @fclone: allocate from fclone cache instead of head cache 133 * and allocate a cloned (child) skb 134 * 135 * Allocate a new &sk_buff. The returned buffer has no headroom and a 136 * tail room of size bytes. The object has a reference count of one. 137 * The return is the buffer. On a failure the return is %NULL. 138 * 139 * Buffers may only be allocated from interrupts using a @gfp_mask of 140 * %GFP_ATOMIC. 141 */ 142struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 143 int fclone) 144{ 145 kmem_cache_t *cache; 146 struct skb_shared_info *shinfo; 147 struct sk_buff *skb; 148 u8 *data; 149 150 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 151 152 /* Get the HEAD */ 153 skb = kmem_cache_alloc(cache, gfp_mask & ~__GFP_DMA); 154 if (!skb) 155 goto out; 156 157 /* Get the DATA. Size must match skb_add_mtu(). */ 158 size = SKB_DATA_ALIGN(size); 159 data = kmalloc_track_caller(size + sizeof(struct skb_shared_info), 160 gfp_mask); 161 if (!data) 162 goto nodata; 163 164 memset(skb, 0, offsetof(struct sk_buff, truesize)); 165 skb->truesize = size + sizeof(struct sk_buff); 166 atomic_set(&skb->users, 1); 167 skb->head = data; 168 skb->data = data; 169 skb->tail = data; 170 skb->end = data + size; 171 /* make sure we initialize shinfo sequentially */ 172 shinfo = skb_shinfo(skb); 173 atomic_set(&shinfo->dataref, 1); 174 shinfo->nr_frags = 0; 175 shinfo->gso_size = 0; 176 shinfo->gso_segs = 0; 177 shinfo->gso_type = 0; 178 shinfo->ip6_frag_id = 0; 179 shinfo->frag_list = NULL; 180 181 if (fclone) { 182 struct sk_buff *child = skb + 1; 183 atomic_t *fclone_ref = (atomic_t *) (child + 1); 184 185 skb->fclone = SKB_FCLONE_ORIG; 186 atomic_set(fclone_ref, 1); 187 188 child->fclone = SKB_FCLONE_UNAVAILABLE; 189 } 190out: 191 return skb; 192nodata: 193 kmem_cache_free(cache, skb); 194 skb = NULL; 195 goto out; 196} 197 198/** 199 * alloc_skb_from_cache - allocate a network buffer 200 * @cp: kmem_cache from which to allocate the data area 201 * (object size must be big enough for @size bytes + skb overheads) 202 * @size: size to allocate 203 * @gfp_mask: allocation mask 204 * 205 * Allocate a new &sk_buff. The returned buffer has no headroom and 206 * tail room of size bytes. The object has a reference count of one. 207 * The return is the buffer. On a failure the return is %NULL. 208 * 209 * Buffers may only be allocated from interrupts using a @gfp_mask of 210 * %GFP_ATOMIC. 211 */ 212struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 213 unsigned int size, 214 gfp_t gfp_mask) 215{ 216 struct sk_buff *skb; 217 u8 *data; 218 219 /* Get the HEAD */ 220 skb = kmem_cache_alloc(skbuff_head_cache, 221 gfp_mask & ~__GFP_DMA); 222 if (!skb) 223 goto out; 224 225 /* Get the DATA. */ 226 size = SKB_DATA_ALIGN(size); 227 data = kmem_cache_alloc(cp, gfp_mask); 228 if (!data) 229 goto nodata; 230 231 memset(skb, 0, offsetof(struct sk_buff, truesize)); 232 skb->truesize = size + sizeof(struct sk_buff); 233 atomic_set(&skb->users, 1); 234 skb->head = data; 235 skb->data = data; 236 skb->tail = data; 237 skb->end = data + size; 238 239 atomic_set(&(skb_shinfo(skb)->dataref), 1); 240 skb_shinfo(skb)->nr_frags = 0; 241 skb_shinfo(skb)->gso_size = 0; 242 skb_shinfo(skb)->gso_segs = 0; 243 skb_shinfo(skb)->gso_type = 0; 244 skb_shinfo(skb)->frag_list = NULL; 245out: 246 return skb; 247nodata: 248 kmem_cache_free(skbuff_head_cache, skb); 249 skb = NULL; 250 goto out; 251} 252 253/** 254 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 255 * @dev: network device to receive on 256 * @length: length to allocate 257 * @gfp_mask: get_free_pages mask, passed to alloc_skb 258 * 259 * Allocate a new &sk_buff and assign it a usage count of one. The 260 * buffer has unspecified headroom built in. Users should allocate 261 * the headroom they think they need without accounting for the 262 * built in space. The built in space is used for optimisations. 263 * 264 * %NULL is returned if there is no free memory. 265 */ 266struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 267 unsigned int length, gfp_t gfp_mask) 268{ 269 struct sk_buff *skb; 270 271 skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 272 if (likely(skb)) { 273 skb_reserve(skb, NET_SKB_PAD); 274 skb->dev = dev; 275 } 276 return skb; 277} 278 279static void skb_drop_list(struct sk_buff **listp) 280{ 281 struct sk_buff *list = *listp; 282 283 *listp = NULL; 284 285 do { 286 struct sk_buff *this = list; 287 list = list->next; 288 kfree_skb(this); 289 } while (list); 290} 291 292static inline void skb_drop_fraglist(struct sk_buff *skb) 293{ 294 skb_drop_list(&skb_shinfo(skb)->frag_list); 295} 296 297static void skb_clone_fraglist(struct sk_buff *skb) 298{ 299 struct sk_buff *list; 300 301 for (list = skb_shinfo(skb)->frag_list; list; list = list->next) 302 skb_get(list); 303} 304 305static void skb_release_data(struct sk_buff *skb) 306{ 307 if (!skb->cloned || 308 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 309 &skb_shinfo(skb)->dataref)) { 310 if (skb_shinfo(skb)->nr_frags) { 311 int i; 312 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 313 put_page(skb_shinfo(skb)->frags[i].page); 314 } 315 316 if (skb_shinfo(skb)->frag_list) 317 skb_drop_fraglist(skb); 318 319 kfree(skb->head); 320 } 321} 322 323/* 324 * Free an skbuff by memory without cleaning the state. 325 */ 326void kfree_skbmem(struct sk_buff *skb) 327{ 328 struct sk_buff *other; 329 atomic_t *fclone_ref; 330 331 skb_release_data(skb); 332 switch (skb->fclone) { 333 case SKB_FCLONE_UNAVAILABLE: 334 kmem_cache_free(skbuff_head_cache, skb); 335 break; 336 337 case SKB_FCLONE_ORIG: 338 fclone_ref = (atomic_t *) (skb + 2); 339 if (atomic_dec_and_test(fclone_ref)) 340 kmem_cache_free(skbuff_fclone_cache, skb); 341 break; 342 343 case SKB_FCLONE_CLONE: 344 fclone_ref = (atomic_t *) (skb + 1); 345 other = skb - 1; 346 347 /* The clone portion is available for 348 * fast-cloning again. 349 */ 350 skb->fclone = SKB_FCLONE_UNAVAILABLE; 351 352 if (atomic_dec_and_test(fclone_ref)) 353 kmem_cache_free(skbuff_fclone_cache, other); 354 break; 355 }; 356} 357 358/** 359 * __kfree_skb - private function 360 * @skb: buffer 361 * 362 * Free an sk_buff. Release anything attached to the buffer. 363 * Clean the state. This is an internal helper function. Users should 364 * always call kfree_skb 365 */ 366 367void __kfree_skb(struct sk_buff *skb) 368{ 369 dst_release(skb->dst); 370#ifdef CONFIG_XFRM 371 secpath_put(skb->sp); 372#endif 373 if (skb->destructor) { 374 WARN_ON(in_irq()); 375 skb->destructor(skb); 376 } 377#ifdef CONFIG_NETFILTER 378 nf_conntrack_put(skb->nfct); 379#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 380 nf_conntrack_put_reasm(skb->nfct_reasm); 381#endif 382#ifdef CONFIG_BRIDGE_NETFILTER 383 nf_bridge_put(skb->nf_bridge); 384#endif 385#endif 386/* XXX: IS this still necessary? - JHS */ 387#ifdef CONFIG_NET_SCHED 388 skb->tc_index = 0; 389#ifdef CONFIG_NET_CLS_ACT 390 skb->tc_verd = 0; 391#endif 392#endif 393 394 kfree_skbmem(skb); 395} 396 397/** 398 * kfree_skb - free an sk_buff 399 * @skb: buffer to free 400 * 401 * Drop a reference to the buffer and free it if the usage count has 402 * hit zero. 403 */ 404void kfree_skb(struct sk_buff *skb) 405{ 406 if (unlikely(!skb)) 407 return; 408 if (likely(atomic_read(&skb->users) == 1)) 409 smp_rmb(); 410 else if (likely(!atomic_dec_and_test(&skb->users))) 411 return; 412 __kfree_skb(skb); 413} 414 415/** 416 * skb_clone - duplicate an sk_buff 417 * @skb: buffer to clone 418 * @gfp_mask: allocation priority 419 * 420 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 421 * copies share the same packet data but not structure. The new 422 * buffer has a reference count of 1. If the allocation fails the 423 * function returns %NULL otherwise the new buffer is returned. 424 * 425 * If this function is called from an interrupt gfp_mask() must be 426 * %GFP_ATOMIC. 427 */ 428 429struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 430{ 431 struct sk_buff *n; 432 433 n = skb + 1; 434 if (skb->fclone == SKB_FCLONE_ORIG && 435 n->fclone == SKB_FCLONE_UNAVAILABLE) { 436 atomic_t *fclone_ref = (atomic_t *) (n + 1); 437 n->fclone = SKB_FCLONE_CLONE; 438 atomic_inc(fclone_ref); 439 } else { 440 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 441 if (!n) 442 return NULL; 443 n->fclone = SKB_FCLONE_UNAVAILABLE; 444 } 445 446#define C(x) n->x = skb->x 447 448 n->next = n->prev = NULL; 449 n->sk = NULL; 450 C(tstamp); 451 C(dev); 452 C(h); 453 C(nh); 454 C(mac); 455 C(dst); 456 dst_clone(skb->dst); 457 C(sp); 458#ifdef CONFIG_INET 459 secpath_get(skb->sp); 460#endif 461 memcpy(n->cb, skb->cb, sizeof(skb->cb)); 462 C(len); 463 C(data_len); 464 C(csum); 465 C(local_df); 466 n->cloned = 1; 467 n->nohdr = 0; 468 C(pkt_type); 469 C(ip_summed); 470 C(priority); 471#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 472 C(ipvs_property); 473#endif 474 C(protocol); 475 n->destructor = NULL; 476#ifdef CONFIG_NETFILTER 477 C(nfmark); 478 C(nfct); 479 nf_conntrack_get(skb->nfct); 480 C(nfctinfo); 481#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 482 C(nfct_reasm); 483 nf_conntrack_get_reasm(skb->nfct_reasm); 484#endif 485#ifdef CONFIG_BRIDGE_NETFILTER 486 C(nf_bridge); 487 nf_bridge_get(skb->nf_bridge); 488#endif 489#endif /*CONFIG_NETFILTER*/ 490#ifdef CONFIG_NET_SCHED 491 C(tc_index); 492#ifdef CONFIG_NET_CLS_ACT 493 n->tc_verd = SET_TC_VERD(skb->tc_verd,0); 494 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); 495 n->tc_verd = CLR_TC_MUNGED(n->tc_verd); 496 C(input_dev); 497#endif 498 skb_copy_secmark(n, skb); 499#endif 500 C(truesize); 501 atomic_set(&n->users, 1); 502 C(head); 503 C(data); 504 C(tail); 505 C(end); 506 507 atomic_inc(&(skb_shinfo(skb)->dataref)); 508 skb->cloned = 1; 509 510 return n; 511} 512 513static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 514{ 515 /* 516 * Shift between the two data areas in bytes 517 */ 518 unsigned long offset = new->data - old->data; 519 520 new->sk = NULL; 521 new->dev = old->dev; 522 new->priority = old->priority; 523 new->protocol = old->protocol; 524 new->dst = dst_clone(old->dst); 525#ifdef CONFIG_INET 526 new->sp = secpath_get(old->sp); 527#endif 528 new->h.raw = old->h.raw + offset; 529 new->nh.raw = old->nh.raw + offset; 530 new->mac.raw = old->mac.raw + offset; 531 memcpy(new->cb, old->cb, sizeof(old->cb)); 532 new->local_df = old->local_df; 533 new->fclone = SKB_FCLONE_UNAVAILABLE; 534 new->pkt_type = old->pkt_type; 535 new->tstamp = old->tstamp; 536 new->destructor = NULL; 537#ifdef CONFIG_NETFILTER 538 new->nfmark = old->nfmark; 539 new->nfct = old->nfct; 540 nf_conntrack_get(old->nfct); 541 new->nfctinfo = old->nfctinfo; 542#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 543 new->nfct_reasm = old->nfct_reasm; 544 nf_conntrack_get_reasm(old->nfct_reasm); 545#endif 546#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 547 new->ipvs_property = old->ipvs_property; 548#endif 549#ifdef CONFIG_BRIDGE_NETFILTER 550 new->nf_bridge = old->nf_bridge; 551 nf_bridge_get(old->nf_bridge); 552#endif 553#endif 554#ifdef CONFIG_NET_SCHED 555#ifdef CONFIG_NET_CLS_ACT 556 new->tc_verd = old->tc_verd; 557#endif 558 new->tc_index = old->tc_index; 559#endif 560 skb_copy_secmark(new, old); 561 atomic_set(&new->users, 1); 562 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 563 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 564 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 565} 566 567/** 568 * skb_copy - create private copy of an sk_buff 569 * @skb: buffer to copy 570 * @gfp_mask: allocation priority 571 * 572 * Make a copy of both an &sk_buff and its data. This is used when the 573 * caller wishes to modify the data and needs a private copy of the 574 * data to alter. Returns %NULL on failure or the pointer to the buffer 575 * on success. The returned buffer has a reference count of 1. 576 * 577 * As by-product this function converts non-linear &sk_buff to linear 578 * one, so that &sk_buff becomes completely private and caller is allowed 579 * to modify all the data of returned buffer. This means that this 580 * function is not recommended for use in circumstances when only 581 * header is going to be modified. Use pskb_copy() instead. 582 */ 583 584struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 585{ 586 int headerlen = skb->data - skb->head; 587 /* 588 * Allocate the copy buffer 589 */ 590 struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len, 591 gfp_mask); 592 if (!n) 593 return NULL; 594 595 /* Set the data pointer */ 596 skb_reserve(n, headerlen); 597 /* Set the tail pointer and length */ 598 skb_put(n, skb->len); 599 n->csum = skb->csum; 600 n->ip_summed = skb->ip_summed; 601 602 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 603 BUG(); 604 605 copy_skb_header(n, skb); 606 return n; 607} 608 609 610/** 611 * pskb_copy - create copy of an sk_buff with private head. 612 * @skb: buffer to copy 613 * @gfp_mask: allocation priority 614 * 615 * Make a copy of both an &sk_buff and part of its data, located 616 * in header. Fragmented data remain shared. This is used when 617 * the caller wishes to modify only header of &sk_buff and needs 618 * private copy of the header to alter. Returns %NULL on failure 619 * or the pointer to the buffer on success. 620 * The returned buffer has a reference count of 1. 621 */ 622 623struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 624{ 625 /* 626 * Allocate the copy buffer 627 */ 628 struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask); 629 630 if (!n) 631 goto out; 632 633 /* Set the data pointer */ 634 skb_reserve(n, skb->data - skb->head); 635 /* Set the tail pointer and length */ 636 skb_put(n, skb_headlen(skb)); 637 /* Copy the bytes */ 638 memcpy(n->data, skb->data, n->len); 639 n->csum = skb->csum; 640 n->ip_summed = skb->ip_summed; 641 642 n->data_len = skb->data_len; 643 n->len = skb->len; 644 645 if (skb_shinfo(skb)->nr_frags) { 646 int i; 647 648 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 649 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 650 get_page(skb_shinfo(n)->frags[i].page); 651 } 652 skb_shinfo(n)->nr_frags = i; 653 } 654 655 if (skb_shinfo(skb)->frag_list) { 656 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 657 skb_clone_fraglist(n); 658 } 659 660 copy_skb_header(n, skb); 661out: 662 return n; 663} 664 665/** 666 * pskb_expand_head - reallocate header of &sk_buff 667 * @skb: buffer to reallocate 668 * @nhead: room to add at head 669 * @ntail: room to add at tail 670 * @gfp_mask: allocation priority 671 * 672 * Expands (or creates identical copy, if &nhead and &ntail are zero) 673 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 674 * reference count of 1. Returns zero in the case of success or error, 675 * if expansion failed. In the last case, &sk_buff is not changed. 676 * 677 * All the pointers pointing into skb header may change and must be 678 * reloaded after call to this function. 679 */ 680 681int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 682 gfp_t gfp_mask) 683{ 684 int i; 685 u8 *data; 686 int size = nhead + (skb->end - skb->head) + ntail; 687 long off; 688 689 if (skb_shared(skb)) 690 BUG(); 691 692 size = SKB_DATA_ALIGN(size); 693 694 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 695 if (!data) 696 goto nodata; 697 698 /* Copy only real data... and, alas, header. This should be 699 * optimized for the cases when header is void. */ 700 memcpy(data + nhead, skb->head, skb->tail - skb->head); 701 memcpy(data + size, skb->end, sizeof(struct skb_shared_info)); 702 703 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 704 get_page(skb_shinfo(skb)->frags[i].page); 705 706 if (skb_shinfo(skb)->frag_list) 707 skb_clone_fraglist(skb); 708 709 skb_release_data(skb); 710 711 off = (data + nhead) - skb->head; 712 713 skb->head = data; 714 skb->end = data + size; 715 skb->data += off; 716 skb->tail += off; 717 skb->mac.raw += off; 718 skb->h.raw += off; 719 skb->nh.raw += off; 720 skb->cloned = 0; 721 skb->nohdr = 0; 722 atomic_set(&skb_shinfo(skb)->dataref, 1); 723 return 0; 724 725nodata: 726 return -ENOMEM; 727} 728 729/* Make private copy of skb with writable head and some headroom */ 730 731struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 732{ 733 struct sk_buff *skb2; 734 int delta = headroom - skb_headroom(skb); 735 736 if (delta <= 0) 737 skb2 = pskb_copy(skb, GFP_ATOMIC); 738 else { 739 skb2 = skb_clone(skb, GFP_ATOMIC); 740 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 741 GFP_ATOMIC)) { 742 kfree_skb(skb2); 743 skb2 = NULL; 744 } 745 } 746 return skb2; 747} 748 749 750/** 751 * skb_copy_expand - copy and expand sk_buff 752 * @skb: buffer to copy 753 * @newheadroom: new free bytes at head 754 * @newtailroom: new free bytes at tail 755 * @gfp_mask: allocation priority 756 * 757 * Make a copy of both an &sk_buff and its data and while doing so 758 * allocate additional space. 759 * 760 * This is used when the caller wishes to modify the data and needs a 761 * private copy of the data to alter as well as more space for new fields. 762 * Returns %NULL on failure or the pointer to the buffer 763 * on success. The returned buffer has a reference count of 1. 764 * 765 * You must pass %GFP_ATOMIC as the allocation priority if this function 766 * is called from an interrupt. 767 * 768 * BUG ALERT: ip_summed is not copied. Why does this work? Is it used 769 * only by netfilter in the cases when checksum is recalculated? --ANK 770 */ 771struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 772 int newheadroom, int newtailroom, 773 gfp_t gfp_mask) 774{ 775 /* 776 * Allocate the copy buffer 777 */ 778 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 779 gfp_mask); 780 int head_copy_len, head_copy_off; 781 782 if (!n) 783 return NULL; 784 785 skb_reserve(n, newheadroom); 786 787 /* Set the tail pointer and length */ 788 skb_put(n, skb->len); 789 790 head_copy_len = skb_headroom(skb); 791 head_copy_off = 0; 792 if (newheadroom <= head_copy_len) 793 head_copy_len = newheadroom; 794 else 795 head_copy_off = newheadroom - head_copy_len; 796 797 /* Copy the linear header and data. */ 798 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 799 skb->len + head_copy_len)) 800 BUG(); 801 802 copy_skb_header(n, skb); 803 804 return n; 805} 806 807/** 808 * skb_pad - zero pad the tail of an skb 809 * @skb: buffer to pad 810 * @pad: space to pad 811 * 812 * Ensure that a buffer is followed by a padding area that is zero 813 * filled. Used by network drivers which may DMA or transfer data 814 * beyond the buffer end onto the wire. 815 * 816 * May return error in out of memory cases. The skb is freed on error. 817 */ 818 819int skb_pad(struct sk_buff *skb, int pad) 820{ 821 int err; 822 int ntail; 823 824 /* If the skbuff is non linear tailroom is always zero.. */ 825 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 826 memset(skb->data+skb->len, 0, pad); 827 return 0; 828 } 829 830 ntail = skb->data_len + pad - (skb->end - skb->tail); 831 if (likely(skb_cloned(skb) || ntail > 0)) { 832 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 833 if (unlikely(err)) 834 goto free_skb; 835 } 836 837 /* FIXME: The use of this function with non-linear skb's really needs 838 * to be audited. 839 */ 840 err = skb_linearize(skb); 841 if (unlikely(err)) 842 goto free_skb; 843 844 memset(skb->data + skb->len, 0, pad); 845 return 0; 846 847free_skb: 848 kfree_skb(skb); 849 return err; 850} 851 852/* Trims skb to length len. It can change skb pointers. 853 */ 854 855int ___pskb_trim(struct sk_buff *skb, unsigned int len) 856{ 857 struct sk_buff **fragp; 858 struct sk_buff *frag; 859 int offset = skb_headlen(skb); 860 int nfrags = skb_shinfo(skb)->nr_frags; 861 int i; 862 int err; 863 864 if (skb_cloned(skb) && 865 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 866 return err; 867 868 i = 0; 869 if (offset >= len) 870 goto drop_pages; 871 872 for (; i < nfrags; i++) { 873 int end = offset + skb_shinfo(skb)->frags[i].size; 874 875 if (end < len) { 876 offset = end; 877 continue; 878 } 879 880 skb_shinfo(skb)->frags[i++].size = len - offset; 881 882drop_pages: 883 skb_shinfo(skb)->nr_frags = i; 884 885 for (; i < nfrags; i++) 886 put_page(skb_shinfo(skb)->frags[i].page); 887 888 if (skb_shinfo(skb)->frag_list) 889 skb_drop_fraglist(skb); 890 goto done; 891 } 892 893 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 894 fragp = &frag->next) { 895 int end = offset + frag->len; 896 897 if (skb_shared(frag)) { 898 struct sk_buff *nfrag; 899 900 nfrag = skb_clone(frag, GFP_ATOMIC); 901 if (unlikely(!nfrag)) 902 return -ENOMEM; 903 904 nfrag->next = frag->next; 905 kfree_skb(frag); 906 frag = nfrag; 907 *fragp = frag; 908 } 909 910 if (end < len) { 911 offset = end; 912 continue; 913 } 914 915 if (end > len && 916 unlikely((err = pskb_trim(frag, len - offset)))) 917 return err; 918 919 if (frag->next) 920 skb_drop_list(&frag->next); 921 break; 922 } 923 924done: 925 if (len > skb_headlen(skb)) { 926 skb->data_len -= skb->len - len; 927 skb->len = len; 928 } else { 929 skb->len = len; 930 skb->data_len = 0; 931 skb->tail = skb->data + len; 932 } 933 934 return 0; 935} 936 937/** 938 * __pskb_pull_tail - advance tail of skb header 939 * @skb: buffer to reallocate 940 * @delta: number of bytes to advance tail 941 * 942 * The function makes a sense only on a fragmented &sk_buff, 943 * it expands header moving its tail forward and copying necessary 944 * data from fragmented part. 945 * 946 * &sk_buff MUST have reference count of 1. 947 * 948 * Returns %NULL (and &sk_buff does not change) if pull failed 949 * or value of new tail of skb in the case of success. 950 * 951 * All the pointers pointing into skb header may change and must be 952 * reloaded after call to this function. 953 */ 954 955/* Moves tail of skb head forward, copying data from fragmented part, 956 * when it is necessary. 957 * 1. It may fail due to malloc failure. 958 * 2. It may change skb pointers. 959 * 960 * It is pretty complicated. Luckily, it is called only in exceptional cases. 961 */ 962unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 963{ 964 /* If skb has not enough free space at tail, get new one 965 * plus 128 bytes for future expansions. If we have enough 966 * room at tail, reallocate without expansion only if skb is cloned. 967 */ 968 int i, k, eat = (skb->tail + delta) - skb->end; 969 970 if (eat > 0 || skb_cloned(skb)) { 971 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 972 GFP_ATOMIC)) 973 return NULL; 974 } 975 976 if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta)) 977 BUG(); 978 979 /* Optimization: no fragments, no reasons to preestimate 980 * size of pulled pages. Superb. 981 */ 982 if (!skb_shinfo(skb)->frag_list) 983 goto pull_pages; 984 985 /* Estimate size of pulled pages. */ 986 eat = delta; 987 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 988 if (skb_shinfo(skb)->frags[i].size >= eat) 989 goto pull_pages; 990 eat -= skb_shinfo(skb)->frags[i].size; 991 } 992 993 /* If we need update frag list, we are in troubles. 994 * Certainly, it possible to add an offset to skb data, 995 * but taking into account that pulling is expected to 996 * be very rare operation, it is worth to fight against 997 * further bloating skb head and crucify ourselves here instead. 998 * Pure masohism, indeed. 8)8) 999 */ 1000 if (eat) { 1001 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1002 struct sk_buff *clone = NULL; 1003 struct sk_buff *insp = NULL; 1004 1005 do { 1006 BUG_ON(!list); 1007 1008 if (list->len <= eat) { 1009 /* Eaten as whole. */ 1010 eat -= list->len; 1011 list = list->next; 1012 insp = list; 1013 } else { 1014 /* Eaten partially. */ 1015 1016 if (skb_shared(list)) { 1017 /* Sucks! We need to fork list. :-( */ 1018 clone = skb_clone(list, GFP_ATOMIC); 1019 if (!clone) 1020 return NULL; 1021 insp = list->next; 1022 list = clone; 1023 } else { 1024 /* This may be pulled without 1025 * problems. */ 1026 insp = list; 1027 } 1028 if (!pskb_pull(list, eat)) { 1029 if (clone) 1030 kfree_skb(clone); 1031 return NULL; 1032 } 1033 break; 1034 } 1035 } while (eat); 1036 1037 /* Free pulled out fragments. */ 1038 while ((list = skb_shinfo(skb)->frag_list) != insp) { 1039 skb_shinfo(skb)->frag_list = list->next; 1040 kfree_skb(list); 1041 } 1042 /* And insert new clone at head. */ 1043 if (clone) { 1044 clone->next = list; 1045 skb_shinfo(skb)->frag_list = clone; 1046 } 1047 } 1048 /* Success! Now we may commit changes to skb data. */ 1049 1050pull_pages: 1051 eat = delta; 1052 k = 0; 1053 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1054 if (skb_shinfo(skb)->frags[i].size <= eat) { 1055 put_page(skb_shinfo(skb)->frags[i].page); 1056 eat -= skb_shinfo(skb)->frags[i].size; 1057 } else { 1058 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1059 if (eat) { 1060 skb_shinfo(skb)->frags[k].page_offset += eat; 1061 skb_shinfo(skb)->frags[k].size -= eat; 1062 eat = 0; 1063 } 1064 k++; 1065 } 1066 } 1067 skb_shinfo(skb)->nr_frags = k; 1068 1069 skb->tail += delta; 1070 skb->data_len -= delta; 1071 1072 return skb->tail; 1073} 1074 1075/* Copy some data bits from skb to kernel buffer. */ 1076 1077int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1078{ 1079 int i, copy; 1080 int start = skb_headlen(skb); 1081 1082 if (offset > (int)skb->len - len) 1083 goto fault; 1084 1085 /* Copy header. */ 1086 if ((copy = start - offset) > 0) { 1087 if (copy > len) 1088 copy = len; 1089 memcpy(to, skb->data + offset, copy); 1090 if ((len -= copy) == 0) 1091 return 0; 1092 offset += copy; 1093 to += copy; 1094 } 1095 1096 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1097 int end; 1098 1099 BUG_TRAP(start <= offset + len); 1100 1101 end = start + skb_shinfo(skb)->frags[i].size; 1102 if ((copy = end - offset) > 0) { 1103 u8 *vaddr; 1104 1105 if (copy > len) 1106 copy = len; 1107 1108 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1109 memcpy(to, 1110 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1111 offset - start, copy); 1112 kunmap_skb_frag(vaddr); 1113 1114 if ((len -= copy) == 0) 1115 return 0; 1116 offset += copy; 1117 to += copy; 1118 } 1119 start = end; 1120 } 1121 1122 if (skb_shinfo(skb)->frag_list) { 1123 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1124 1125 for (; list; list = list->next) { 1126 int end; 1127 1128 BUG_TRAP(start <= offset + len); 1129 1130 end = start + list->len; 1131 if ((copy = end - offset) > 0) { 1132 if (copy > len) 1133 copy = len; 1134 if (skb_copy_bits(list, offset - start, 1135 to, copy)) 1136 goto fault; 1137 if ((len -= copy) == 0) 1138 return 0; 1139 offset += copy; 1140 to += copy; 1141 } 1142 start = end; 1143 } 1144 } 1145 if (!len) 1146 return 0; 1147 1148fault: 1149 return -EFAULT; 1150} 1151 1152/** 1153 * skb_store_bits - store bits from kernel buffer to skb 1154 * @skb: destination buffer 1155 * @offset: offset in destination 1156 * @from: source buffer 1157 * @len: number of bytes to copy 1158 * 1159 * Copy the specified number of bytes from the source buffer to the 1160 * destination skb. This function handles all the messy bits of 1161 * traversing fragment lists and such. 1162 */ 1163 1164int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len) 1165{ 1166 int i, copy; 1167 int start = skb_headlen(skb); 1168 1169 if (offset > (int)skb->len - len) 1170 goto fault; 1171 1172 if ((copy = start - offset) > 0) { 1173 if (copy > len) 1174 copy = len; 1175 memcpy(skb->data + offset, from, copy); 1176 if ((len -= copy) == 0) 1177 return 0; 1178 offset += copy; 1179 from += copy; 1180 } 1181 1182 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1183 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1184 int end; 1185 1186 BUG_TRAP(start <= offset + len); 1187 1188 end = start + frag->size; 1189 if ((copy = end - offset) > 0) { 1190 u8 *vaddr; 1191 1192 if (copy > len) 1193 copy = len; 1194 1195 vaddr = kmap_skb_frag(frag); 1196 memcpy(vaddr + frag->page_offset + offset - start, 1197 from, copy); 1198 kunmap_skb_frag(vaddr); 1199 1200 if ((len -= copy) == 0) 1201 return 0; 1202 offset += copy; 1203 from += copy; 1204 } 1205 start = end; 1206 } 1207 1208 if (skb_shinfo(skb)->frag_list) { 1209 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1210 1211 for (; list; list = list->next) { 1212 int end; 1213 1214 BUG_TRAP(start <= offset + len); 1215 1216 end = start + list->len; 1217 if ((copy = end - offset) > 0) { 1218 if (copy > len) 1219 copy = len; 1220 if (skb_store_bits(list, offset - start, 1221 from, copy)) 1222 goto fault; 1223 if ((len -= copy) == 0) 1224 return 0; 1225 offset += copy; 1226 from += copy; 1227 } 1228 start = end; 1229 } 1230 } 1231 if (!len) 1232 return 0; 1233 1234fault: 1235 return -EFAULT; 1236} 1237 1238EXPORT_SYMBOL(skb_store_bits); 1239 1240/* Checksum skb data. */ 1241 1242unsigned int skb_checksum(const struct sk_buff *skb, int offset, 1243 int len, unsigned int csum) 1244{ 1245 int start = skb_headlen(skb); 1246 int i, copy = start - offset; 1247 int pos = 0; 1248 1249 /* Checksum header. */ 1250 if (copy > 0) { 1251 if (copy > len) 1252 copy = len; 1253 csum = csum_partial(skb->data + offset, copy, csum); 1254 if ((len -= copy) == 0) 1255 return csum; 1256 offset += copy; 1257 pos = copy; 1258 } 1259 1260 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1261 int end; 1262 1263 BUG_TRAP(start <= offset + len); 1264 1265 end = start + skb_shinfo(skb)->frags[i].size; 1266 if ((copy = end - offset) > 0) { 1267 unsigned int csum2; 1268 u8 *vaddr; 1269 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1270 1271 if (copy > len) 1272 copy = len; 1273 vaddr = kmap_skb_frag(frag); 1274 csum2 = csum_partial(vaddr + frag->page_offset + 1275 offset - start, copy, 0); 1276 kunmap_skb_frag(vaddr); 1277 csum = csum_block_add(csum, csum2, pos); 1278 if (!(len -= copy)) 1279 return csum; 1280 offset += copy; 1281 pos += copy; 1282 } 1283 start = end; 1284 } 1285 1286 if (skb_shinfo(skb)->frag_list) { 1287 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1288 1289 for (; list; list = list->next) { 1290 int end; 1291 1292 BUG_TRAP(start <= offset + len); 1293 1294 end = start + list->len; 1295 if ((copy = end - offset) > 0) { 1296 unsigned int csum2; 1297 if (copy > len) 1298 copy = len; 1299 csum2 = skb_checksum(list, offset - start, 1300 copy, 0); 1301 csum = csum_block_add(csum, csum2, pos); 1302 if ((len -= copy) == 0) 1303 return csum; 1304 offset += copy; 1305 pos += copy; 1306 } 1307 start = end; 1308 } 1309 } 1310 BUG_ON(len); 1311 1312 return csum; 1313} 1314 1315/* Both of above in one bottle. */ 1316 1317unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1318 u8 *to, int len, unsigned int csum) 1319{ 1320 int start = skb_headlen(skb); 1321 int i, copy = start - offset; 1322 int pos = 0; 1323 1324 /* Copy header. */ 1325 if (copy > 0) { 1326 if (copy > len) 1327 copy = len; 1328 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1329 copy, csum); 1330 if ((len -= copy) == 0) 1331 return csum; 1332 offset += copy; 1333 to += copy; 1334 pos = copy; 1335 } 1336 1337 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1338 int end; 1339 1340 BUG_TRAP(start <= offset + len); 1341 1342 end = start + skb_shinfo(skb)->frags[i].size; 1343 if ((copy = end - offset) > 0) { 1344 unsigned int csum2; 1345 u8 *vaddr; 1346 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1347 1348 if (copy > len) 1349 copy = len; 1350 vaddr = kmap_skb_frag(frag); 1351 csum2 = csum_partial_copy_nocheck(vaddr + 1352 frag->page_offset + 1353 offset - start, to, 1354 copy, 0); 1355 kunmap_skb_frag(vaddr); 1356 csum = csum_block_add(csum, csum2, pos); 1357 if (!(len -= copy)) 1358 return csum; 1359 offset += copy; 1360 to += copy; 1361 pos += copy; 1362 } 1363 start = end; 1364 } 1365 1366 if (skb_shinfo(skb)->frag_list) { 1367 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1368 1369 for (; list; list = list->next) { 1370 unsigned int csum2; 1371 int end; 1372 1373 BUG_TRAP(start <= offset + len); 1374 1375 end = start + list->len; 1376 if ((copy = end - offset) > 0) { 1377 if (copy > len) 1378 copy = len; 1379 csum2 = skb_copy_and_csum_bits(list, 1380 offset - start, 1381 to, copy, 0); 1382 csum = csum_block_add(csum, csum2, pos); 1383 if ((len -= copy) == 0) 1384 return csum; 1385 offset += copy; 1386 to += copy; 1387 pos += copy; 1388 } 1389 start = end; 1390 } 1391 } 1392 BUG_ON(len); 1393 return csum; 1394} 1395 1396void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1397{ 1398 unsigned int csum; 1399 long csstart; 1400 1401 if (skb->ip_summed == CHECKSUM_PARTIAL) 1402 csstart = skb->h.raw - skb->data; 1403 else 1404 csstart = skb_headlen(skb); 1405 1406 BUG_ON(csstart > skb_headlen(skb)); 1407 1408 memcpy(to, skb->data, csstart); 1409 1410 csum = 0; 1411 if (csstart != skb->len) 1412 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 1413 skb->len - csstart, 0); 1414 1415 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1416 long csstuff = csstart + skb->csum; 1417 1418 *((unsigned short *)(to + csstuff)) = csum_fold(csum); 1419 } 1420} 1421 1422/** 1423 * skb_dequeue - remove from the head of the queue 1424 * @list: list to dequeue from 1425 * 1426 * Remove the head of the list. The list lock is taken so the function 1427 * may be used safely with other locking list functions. The head item is 1428 * returned or %NULL if the list is empty. 1429 */ 1430 1431struct sk_buff *skb_dequeue(struct sk_buff_head *list) 1432{ 1433 unsigned long flags; 1434 struct sk_buff *result; 1435 1436 spin_lock_irqsave(&list->lock, flags); 1437 result = __skb_dequeue(list); 1438 spin_unlock_irqrestore(&list->lock, flags); 1439 return result; 1440} 1441 1442/** 1443 * skb_dequeue_tail - remove from the tail of the queue 1444 * @list: list to dequeue from 1445 * 1446 * Remove the tail of the list. The list lock is taken so the function 1447 * may be used safely with other locking list functions. The tail item is 1448 * returned or %NULL if the list is empty. 1449 */ 1450struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 1451{ 1452 unsigned long flags; 1453 struct sk_buff *result; 1454 1455 spin_lock_irqsave(&list->lock, flags); 1456 result = __skb_dequeue_tail(list); 1457 spin_unlock_irqrestore(&list->lock, flags); 1458 return result; 1459} 1460 1461/** 1462 * skb_queue_purge - empty a list 1463 * @list: list to empty 1464 * 1465 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1466 * the list and one reference dropped. This function takes the list 1467 * lock and is atomic with respect to other list locking functions. 1468 */ 1469void skb_queue_purge(struct sk_buff_head *list) 1470{ 1471 struct sk_buff *skb; 1472 while ((skb = skb_dequeue(list)) != NULL) 1473 kfree_skb(skb); 1474} 1475 1476/** 1477 * skb_queue_head - queue a buffer at the list head 1478 * @list: list to use 1479 * @newsk: buffer to queue 1480 * 1481 * Queue a buffer at the start of the list. This function takes the 1482 * list lock and can be used safely with other locking &sk_buff functions 1483 * safely. 1484 * 1485 * A buffer cannot be placed on two lists at the same time. 1486 */ 1487void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 1488{ 1489 unsigned long flags; 1490 1491 spin_lock_irqsave(&list->lock, flags); 1492 __skb_queue_head(list, newsk); 1493 spin_unlock_irqrestore(&list->lock, flags); 1494} 1495 1496/** 1497 * skb_queue_tail - queue a buffer at the list tail 1498 * @list: list to use 1499 * @newsk: buffer to queue 1500 * 1501 * Queue a buffer at the tail of the list. This function takes the 1502 * list lock and can be used safely with other locking &sk_buff functions 1503 * safely. 1504 * 1505 * A buffer cannot be placed on two lists at the same time. 1506 */ 1507void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 1508{ 1509 unsigned long flags; 1510 1511 spin_lock_irqsave(&list->lock, flags); 1512 __skb_queue_tail(list, newsk); 1513 spin_unlock_irqrestore(&list->lock, flags); 1514} 1515 1516/** 1517 * skb_unlink - remove a buffer from a list 1518 * @skb: buffer to remove 1519 * @list: list to use 1520 * 1521 * Remove a packet from a list. The list locks are taken and this 1522 * function is atomic with respect to other list locked calls 1523 * 1524 * You must know what list the SKB is on. 1525 */ 1526void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1527{ 1528 unsigned long flags; 1529 1530 spin_lock_irqsave(&list->lock, flags); 1531 __skb_unlink(skb, list); 1532 spin_unlock_irqrestore(&list->lock, flags); 1533} 1534 1535/** 1536 * skb_append - append a buffer 1537 * @old: buffer to insert after 1538 * @newsk: buffer to insert 1539 * @list: list to use 1540 * 1541 * Place a packet after a given packet in a list. The list locks are taken 1542 * and this function is atomic with respect to other list locked calls. 1543 * A buffer cannot be placed on two lists at the same time. 1544 */ 1545void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1546{ 1547 unsigned long flags; 1548 1549 spin_lock_irqsave(&list->lock, flags); 1550 __skb_append(old, newsk, list); 1551 spin_unlock_irqrestore(&list->lock, flags); 1552} 1553 1554 1555/** 1556 * skb_insert - insert a buffer 1557 * @old: buffer to insert before 1558 * @newsk: buffer to insert 1559 * @list: list to use 1560 * 1561 * Place a packet before a given packet in a list. The list locks are 1562 * taken and this function is atomic with respect to other list locked 1563 * calls. 1564 * 1565 * A buffer cannot be placed on two lists at the same time. 1566 */ 1567void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1568{ 1569 unsigned long flags; 1570 1571 spin_lock_irqsave(&list->lock, flags); 1572 __skb_insert(newsk, old->prev, old, list); 1573 spin_unlock_irqrestore(&list->lock, flags); 1574} 1575 1576#if 0 1577/* 1578 * Tune the memory allocator for a new MTU size. 1579 */ 1580void skb_add_mtu(int mtu) 1581{ 1582 /* Must match allocation in alloc_skb */ 1583 mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info); 1584 1585 kmem_add_cache_size(mtu); 1586} 1587#endif 1588 1589static inline void skb_split_inside_header(struct sk_buff *skb, 1590 struct sk_buff* skb1, 1591 const u32 len, const int pos) 1592{ 1593 int i; 1594 1595 memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len); 1596 1597 /* And move data appendix as is. */ 1598 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1599 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 1600 1601 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 1602 skb_shinfo(skb)->nr_frags = 0; 1603 skb1->data_len = skb->data_len; 1604 skb1->len += skb1->data_len; 1605 skb->data_len = 0; 1606 skb->len = len; 1607 skb->tail = skb->data + len; 1608} 1609 1610static inline void skb_split_no_header(struct sk_buff *skb, 1611 struct sk_buff* skb1, 1612 const u32 len, int pos) 1613{ 1614 int i, k = 0; 1615 const int nfrags = skb_shinfo(skb)->nr_frags; 1616 1617 skb_shinfo(skb)->nr_frags = 0; 1618 skb1->len = skb1->data_len = skb->len - len; 1619 skb->len = len; 1620 skb->data_len = len - pos; 1621 1622 for (i = 0; i < nfrags; i++) { 1623 int size = skb_shinfo(skb)->frags[i].size; 1624 1625 if (pos + size > len) { 1626 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 1627 1628 if (pos < len) { 1629 /* Split frag. 1630 * We have two variants in this case: 1631 * 1. Move all the frag to the second 1632 * part, if it is possible. F.e. 1633 * this approach is mandatory for TUX, 1634 * where splitting is expensive. 1635 * 2. Split is accurately. We make this. 1636 */ 1637 get_page(skb_shinfo(skb)->frags[i].page); 1638 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 1639 skb_shinfo(skb1)->frags[0].size -= len - pos; 1640 skb_shinfo(skb)->frags[i].size = len - pos; 1641 skb_shinfo(skb)->nr_frags++; 1642 } 1643 k++; 1644 } else 1645 skb_shinfo(skb)->nr_frags++; 1646 pos += size; 1647 } 1648 skb_shinfo(skb1)->nr_frags = k; 1649} 1650 1651/** 1652 * skb_split - Split fragmented skb to two parts at length len. 1653 * @skb: the buffer to split 1654 * @skb1: the buffer to receive the second part 1655 * @len: new length for skb 1656 */ 1657void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 1658{ 1659 int pos = skb_headlen(skb); 1660 1661 if (len < pos) /* Split line is inside header. */ 1662 skb_split_inside_header(skb, skb1, len, pos); 1663 else /* Second chunk has no header, nothing to copy. */ 1664 skb_split_no_header(skb, skb1, len, pos); 1665} 1666 1667/** 1668 * skb_prepare_seq_read - Prepare a sequential read of skb data 1669 * @skb: the buffer to read 1670 * @from: lower offset of data to be read 1671 * @to: upper offset of data to be read 1672 * @st: state variable 1673 * 1674 * Initializes the specified state variable. Must be called before 1675 * invoking skb_seq_read() for the first time. 1676 */ 1677void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 1678 unsigned int to, struct skb_seq_state *st) 1679{ 1680 st->lower_offset = from; 1681 st->upper_offset = to; 1682 st->root_skb = st->cur_skb = skb; 1683 st->frag_idx = st->stepped_offset = 0; 1684 st->frag_data = NULL; 1685} 1686 1687/** 1688 * skb_seq_read - Sequentially read skb data 1689 * @consumed: number of bytes consumed by the caller so far 1690 * @data: destination pointer for data to be returned 1691 * @st: state variable 1692 * 1693 * Reads a block of skb data at &consumed relative to the 1694 * lower offset specified to skb_prepare_seq_read(). Assigns 1695 * the head of the data block to &data and returns the length 1696 * of the block or 0 if the end of the skb data or the upper 1697 * offset has been reached. 1698 * 1699 * The caller is not required to consume all of the data 1700 * returned, i.e. &consumed is typically set to the number 1701 * of bytes already consumed and the next call to 1702 * skb_seq_read() will return the remaining part of the block. 1703 * 1704 * Note: The size of each block of data returned can be arbitary, 1705 * this limitation is the cost for zerocopy seqeuental 1706 * reads of potentially non linear data. 1707 * 1708 * Note: Fragment lists within fragments are not implemented 1709 * at the moment, state->root_skb could be replaced with 1710 * a stack for this purpose. 1711 */ 1712unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 1713 struct skb_seq_state *st) 1714{ 1715 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 1716 skb_frag_t *frag; 1717 1718 if (unlikely(abs_offset >= st->upper_offset)) 1719 return 0; 1720 1721next_skb: 1722 block_limit = skb_headlen(st->cur_skb); 1723 1724 if (abs_offset < block_limit) { 1725 *data = st->cur_skb->data + abs_offset; 1726 return block_limit - abs_offset; 1727 } 1728 1729 if (st->frag_idx == 0 && !st->frag_data) 1730 st->stepped_offset += skb_headlen(st->cur_skb); 1731 1732 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 1733 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 1734 block_limit = frag->size + st->stepped_offset; 1735 1736 if (abs_offset < block_limit) { 1737 if (!st->frag_data) 1738 st->frag_data = kmap_skb_frag(frag); 1739 1740 *data = (u8 *) st->frag_data + frag->page_offset + 1741 (abs_offset - st->stepped_offset); 1742 1743 return block_limit - abs_offset; 1744 } 1745 1746 if (st->frag_data) { 1747 kunmap_skb_frag(st->frag_data); 1748 st->frag_data = NULL; 1749 } 1750 1751 st->frag_idx++; 1752 st->stepped_offset += frag->size; 1753 } 1754 1755 if (st->cur_skb->next) { 1756 st->cur_skb = st->cur_skb->next; 1757 st->frag_idx = 0; 1758 goto next_skb; 1759 } else if (st->root_skb == st->cur_skb && 1760 skb_shinfo(st->root_skb)->frag_list) { 1761 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 1762 goto next_skb; 1763 } 1764 1765 return 0; 1766} 1767 1768/** 1769 * skb_abort_seq_read - Abort a sequential read of skb data 1770 * @st: state variable 1771 * 1772 * Must be called if skb_seq_read() was not called until it 1773 * returned 0. 1774 */ 1775void skb_abort_seq_read(struct skb_seq_state *st) 1776{ 1777 if (st->frag_data) 1778 kunmap_skb_frag(st->frag_data); 1779} 1780 1781#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 1782 1783static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 1784 struct ts_config *conf, 1785 struct ts_state *state) 1786{ 1787 return skb_seq_read(offset, text, TS_SKB_CB(state)); 1788} 1789 1790static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 1791{ 1792 skb_abort_seq_read(TS_SKB_CB(state)); 1793} 1794 1795/** 1796 * skb_find_text - Find a text pattern in skb data 1797 * @skb: the buffer to look in 1798 * @from: search offset 1799 * @to: search limit 1800 * @config: textsearch configuration 1801 * @state: uninitialized textsearch state variable 1802 * 1803 * Finds a pattern in the skb data according to the specified 1804 * textsearch configuration. Use textsearch_next() to retrieve 1805 * subsequent occurrences of the pattern. Returns the offset 1806 * to the first occurrence or UINT_MAX if no match was found. 1807 */ 1808unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 1809 unsigned int to, struct ts_config *config, 1810 struct ts_state *state) 1811{ 1812 unsigned int ret; 1813 1814 config->get_next_block = skb_ts_get_next_block; 1815 config->finish = skb_ts_finish; 1816 1817 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 1818 1819 ret = textsearch_find(config, state); 1820 return (ret <= to - from ? ret : UINT_MAX); 1821} 1822 1823/** 1824 * skb_append_datato_frags: - append the user data to a skb 1825 * @sk: sock structure 1826 * @skb: skb structure to be appened with user data. 1827 * @getfrag: call back function to be used for getting the user data 1828 * @from: pointer to user message iov 1829 * @length: length of the iov message 1830 * 1831 * Description: This procedure append the user data in the fragment part 1832 * of the skb if any page alloc fails user this procedure returns -ENOMEM 1833 */ 1834int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 1835 int (*getfrag)(void *from, char *to, int offset, 1836 int len, int odd, struct sk_buff *skb), 1837 void *from, int length) 1838{ 1839 int frg_cnt = 0; 1840 skb_frag_t *frag = NULL; 1841 struct page *page = NULL; 1842 int copy, left; 1843 int offset = 0; 1844 int ret; 1845 1846 do { 1847 /* Return error if we don't have space for new frag */ 1848 frg_cnt = skb_shinfo(skb)->nr_frags; 1849 if (frg_cnt >= MAX_SKB_FRAGS) 1850 return -EFAULT; 1851 1852 /* allocate a new page for next frag */ 1853 page = alloc_pages(sk->sk_allocation, 0); 1854 1855 /* If alloc_page fails just return failure and caller will 1856 * free previous allocated pages by doing kfree_skb() 1857 */ 1858 if (page == NULL) 1859 return -ENOMEM; 1860 1861 /* initialize the next frag */ 1862 sk->sk_sndmsg_page = page; 1863 sk->sk_sndmsg_off = 0; 1864 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 1865 skb->truesize += PAGE_SIZE; 1866 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 1867 1868 /* get the new initialized frag */ 1869 frg_cnt = skb_shinfo(skb)->nr_frags; 1870 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 1871 1872 /* copy the user data to page */ 1873 left = PAGE_SIZE - frag->page_offset; 1874 copy = (length > left)? left : length; 1875 1876 ret = getfrag(from, (page_address(frag->page) + 1877 frag->page_offset + frag->size), 1878 offset, copy, 0, skb); 1879 if (ret < 0) 1880 return -EFAULT; 1881 1882 /* copy was successful so update the size parameters */ 1883 sk->sk_sndmsg_off += copy; 1884 frag->size += copy; 1885 skb->len += copy; 1886 skb->data_len += copy; 1887 offset += copy; 1888 length -= copy; 1889 1890 } while (length > 0); 1891 1892 return 0; 1893} 1894 1895/** 1896 * skb_pull_rcsum - pull skb and update receive checksum 1897 * @skb: buffer to update 1898 * @start: start of data before pull 1899 * @len: length of data pulled 1900 * 1901 * This function performs an skb_pull on the packet and updates 1902 * update the CHECKSUM_COMPLETE checksum. It should be used on 1903 * receive path processing instead of skb_pull unless you know 1904 * that the checksum difference is zero (e.g., a valid IP header) 1905 * or you are setting ip_summed to CHECKSUM_NONE. 1906 */ 1907unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 1908{ 1909 BUG_ON(len > skb->len); 1910 skb->len -= len; 1911 BUG_ON(skb->len < skb->data_len); 1912 skb_postpull_rcsum(skb, skb->data, len); 1913 return skb->data += len; 1914} 1915 1916EXPORT_SYMBOL_GPL(skb_pull_rcsum); 1917 1918/** 1919 * skb_segment - Perform protocol segmentation on skb. 1920 * @skb: buffer to segment 1921 * @features: features for the output path (see dev->features) 1922 * 1923 * This function performs segmentation on the given skb. It returns 1924 * the segment at the given position. It returns NULL if there are 1925 * no more segments to generate, or when an error is encountered. 1926 */ 1927struct sk_buff *skb_segment(struct sk_buff *skb, int features) 1928{ 1929 struct sk_buff *segs = NULL; 1930 struct sk_buff *tail = NULL; 1931 unsigned int mss = skb_shinfo(skb)->gso_size; 1932 unsigned int doffset = skb->data - skb->mac.raw; 1933 unsigned int offset = doffset; 1934 unsigned int headroom; 1935 unsigned int len; 1936 int sg = features & NETIF_F_SG; 1937 int nfrags = skb_shinfo(skb)->nr_frags; 1938 int err = -ENOMEM; 1939 int i = 0; 1940 int pos; 1941 1942 __skb_push(skb, doffset); 1943 headroom = skb_headroom(skb); 1944 pos = skb_headlen(skb); 1945 1946 do { 1947 struct sk_buff *nskb; 1948 skb_frag_t *frag; 1949 int hsize, nsize; 1950 int k; 1951 int size; 1952 1953 len = skb->len - offset; 1954 if (len > mss) 1955 len = mss; 1956 1957 hsize = skb_headlen(skb) - offset; 1958 if (hsize < 0) 1959 hsize = 0; 1960 nsize = hsize + doffset; 1961 if (nsize > len + doffset || !sg) 1962 nsize = len + doffset; 1963 1964 nskb = alloc_skb(nsize + headroom, GFP_ATOMIC); 1965 if (unlikely(!nskb)) 1966 goto err; 1967 1968 if (segs) 1969 tail->next = nskb; 1970 else 1971 segs = nskb; 1972 tail = nskb; 1973 1974 nskb->dev = skb->dev; 1975 nskb->priority = skb->priority; 1976 nskb->protocol = skb->protocol; 1977 nskb->dst = dst_clone(skb->dst); 1978 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 1979 nskb->pkt_type = skb->pkt_type; 1980 nskb->mac_len = skb->mac_len; 1981 1982 skb_reserve(nskb, headroom); 1983 nskb->mac.raw = nskb->data; 1984 nskb->nh.raw = nskb->data + skb->mac_len; 1985 nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw); 1986 memcpy(skb_put(nskb, doffset), skb->data, doffset); 1987 1988 if (!sg) { 1989 nskb->csum = skb_copy_and_csum_bits(skb, offset, 1990 skb_put(nskb, len), 1991 len, 0); 1992 continue; 1993 } 1994 1995 frag = skb_shinfo(nskb)->frags; 1996 k = 0; 1997 1998 nskb->ip_summed = CHECKSUM_PARTIAL; 1999 nskb->csum = skb->csum; 2000 memcpy(skb_put(nskb, hsize), skb->data + offset, hsize); 2001 2002 while (pos < offset + len) { 2003 BUG_ON(i >= nfrags); 2004 2005 *frag = skb_shinfo(skb)->frags[i]; 2006 get_page(frag->page); 2007 size = frag->size; 2008 2009 if (pos < offset) { 2010 frag->page_offset += offset - pos; 2011 frag->size -= offset - pos; 2012 } 2013 2014 k++; 2015 2016 if (pos + size <= offset + len) { 2017 i++; 2018 pos += size; 2019 } else { 2020 frag->size -= pos + size - (offset + len); 2021 break; 2022 } 2023 2024 frag++; 2025 } 2026 2027 skb_shinfo(nskb)->nr_frags = k; 2028 nskb->data_len = len - hsize; 2029 nskb->len += nskb->data_len; 2030 nskb->truesize += nskb->data_len; 2031 } while ((offset += len) < skb->len); 2032 2033 return segs; 2034 2035err: 2036 while ((skb = segs)) { 2037 segs = skb->next; 2038 kfree(skb); 2039 } 2040 return ERR_PTR(err); 2041} 2042 2043EXPORT_SYMBOL_GPL(skb_segment); 2044 2045void __init skb_init(void) 2046{ 2047 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 2048 sizeof(struct sk_buff), 2049 0, 2050 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2051 NULL, NULL); 2052 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2053 (2*sizeof(struct sk_buff)) + 2054 sizeof(atomic_t), 2055 0, 2056 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2057 NULL, NULL); 2058} 2059 2060EXPORT_SYMBOL(___pskb_trim); 2061EXPORT_SYMBOL(__kfree_skb); 2062EXPORT_SYMBOL(kfree_skb); 2063EXPORT_SYMBOL(__pskb_pull_tail); 2064EXPORT_SYMBOL(__alloc_skb); 2065EXPORT_SYMBOL(__netdev_alloc_skb); 2066EXPORT_SYMBOL(pskb_copy); 2067EXPORT_SYMBOL(pskb_expand_head); 2068EXPORT_SYMBOL(skb_checksum); 2069EXPORT_SYMBOL(skb_clone); 2070EXPORT_SYMBOL(skb_clone_fraglist); 2071EXPORT_SYMBOL(skb_copy); 2072EXPORT_SYMBOL(skb_copy_and_csum_bits); 2073EXPORT_SYMBOL(skb_copy_and_csum_dev); 2074EXPORT_SYMBOL(skb_copy_bits); 2075EXPORT_SYMBOL(skb_copy_expand); 2076EXPORT_SYMBOL(skb_over_panic); 2077EXPORT_SYMBOL(skb_pad); 2078EXPORT_SYMBOL(skb_realloc_headroom); 2079EXPORT_SYMBOL(skb_under_panic); 2080EXPORT_SYMBOL(skb_dequeue); 2081EXPORT_SYMBOL(skb_dequeue_tail); 2082EXPORT_SYMBOL(skb_insert); 2083EXPORT_SYMBOL(skb_queue_purge); 2084EXPORT_SYMBOL(skb_queue_head); 2085EXPORT_SYMBOL(skb_queue_tail); 2086EXPORT_SYMBOL(skb_unlink); 2087EXPORT_SYMBOL(skb_append); 2088EXPORT_SYMBOL(skb_split); 2089EXPORT_SYMBOL(skb_prepare_seq_read); 2090EXPORT_SYMBOL(skb_seq_read); 2091EXPORT_SYMBOL(skb_abort_seq_read); 2092EXPORT_SYMBOL(skb_find_text); 2093EXPORT_SYMBOL(skb_append_datato_frags);