Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.21 2041 lines 50 kB view raw
1/* 2 * Routines having to do with the 'struct sk_buff' memory handlers. 3 * 4 * Authors: Alan Cox <iiitac@pyr.swan.ac.uk> 5 * Florian La Roche <rzsfl@rz.uni-sb.de> 6 * 7 * Version: $Id: skbuff.c,v 1.90 2001/11/07 05:56:19 davem Exp $ 8 * 9 * Fixes: 10 * Alan Cox : Fixed the worst of the load 11 * balancer bugs. 12 * Dave Platt : Interrupt stacking fix. 13 * Richard Kooijman : Timestamp fixes. 14 * Alan Cox : Changed buffer format. 15 * Alan Cox : destructor hook for AF_UNIX etc. 16 * Linus Torvalds : Better skb_clone. 17 * Alan Cox : Added skb_copy. 18 * Alan Cox : Added all the changed routines Linus 19 * only put in the headers 20 * Ray VanTassle : Fixed --skb->lock in free 21 * Alan Cox : skb_copy copy arp field 22 * Andi Kleen : slabified it. 23 * Robert Olsson : Removed skb_head_pool 24 * 25 * NOTE: 26 * The __skb_ routines should be called with interrupts 27 * disabled, or you better be *real* sure that the operation is atomic 28 * with respect to whatever list is being frobbed (e.g. via lock_sock() 29 * or via disabling bottom half handlers, etc). 30 * 31 * This program is free software; you can redistribute it and/or 32 * modify it under the terms of the GNU General Public License 33 * as published by the Free Software Foundation; either version 34 * 2 of the License, or (at your option) any later version. 35 */ 36 37/* 38 * The functions in this file will not compile correctly with gcc 2.4.x 39 */ 40 41#include <linux/module.h> 42#include <linux/types.h> 43#include <linux/kernel.h> 44#include <linux/mm.h> 45#include <linux/interrupt.h> 46#include <linux/in.h> 47#include <linux/inet.h> 48#include <linux/slab.h> 49#include <linux/netdevice.h> 50#ifdef CONFIG_NET_CLS_ACT 51#include <net/pkt_sched.h> 52#endif 53#include <linux/string.h> 54#include <linux/skbuff.h> 55#include <linux/cache.h> 56#include <linux/rtnetlink.h> 57#include <linux/init.h> 58 59#include <net/protocol.h> 60#include <net/dst.h> 61#include <net/sock.h> 62#include <net/checksum.h> 63#include <net/xfrm.h> 64 65#include <asm/uaccess.h> 66#include <asm/system.h> 67 68#include "kmap_skb.h" 69 70static struct kmem_cache *skbuff_head_cache __read_mostly; 71static struct kmem_cache *skbuff_fclone_cache __read_mostly; 72 73/* 74 * Keep out-of-line to prevent kernel bloat. 75 * __builtin_return_address is not used because it is not always 76 * reliable. 77 */ 78 79/** 80 * skb_over_panic - private function 81 * @skb: buffer 82 * @sz: size 83 * @here: address 84 * 85 * Out of line support code for skb_put(). Not user callable. 86 */ 87void skb_over_panic(struct sk_buff *skb, int sz, void *here) 88{ 89 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 90 "data:%p tail:%p end:%p dev:%s\n", 91 here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, 92 skb->dev ? skb->dev->name : "<NULL>"); 93 BUG(); 94} 95 96/** 97 * skb_under_panic - private function 98 * @skb: buffer 99 * @sz: size 100 * @here: address 101 * 102 * Out of line support code for skb_push(). Not user callable. 103 */ 104 105void skb_under_panic(struct sk_buff *skb, int sz, void *here) 106{ 107 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 108 "data:%p tail:%p end:%p dev:%s\n", 109 here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, 110 skb->dev ? skb->dev->name : "<NULL>"); 111 BUG(); 112} 113 114void skb_truesize_bug(struct sk_buff *skb) 115{ 116 printk(KERN_ERR "SKB BUG: Invalid truesize (%u) " 117 "len=%u, sizeof(sk_buff)=%Zd\n", 118 skb->truesize, skb->len, sizeof(struct sk_buff)); 119} 120EXPORT_SYMBOL(skb_truesize_bug); 121 122/* Allocate a new skbuff. We do this ourselves so we can fill in a few 123 * 'private' fields and also do memory statistics to find all the 124 * [BEEP] leaks. 125 * 126 */ 127 128/** 129 * __alloc_skb - allocate a network buffer 130 * @size: size to allocate 131 * @gfp_mask: allocation mask 132 * @fclone: allocate from fclone cache instead of head cache 133 * and allocate a cloned (child) skb 134 * @node: numa node to allocate memory on 135 * 136 * Allocate a new &sk_buff. The returned buffer has no headroom and a 137 * tail room of size bytes. The object has a reference count of one. 138 * The return is the buffer. On a failure the return is %NULL. 139 * 140 * Buffers may only be allocated from interrupts using a @gfp_mask of 141 * %GFP_ATOMIC. 142 */ 143struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, 144 int fclone, int node) 145{ 146 struct kmem_cache *cache; 147 struct skb_shared_info *shinfo; 148 struct sk_buff *skb; 149 u8 *data; 150 151 cache = fclone ? skbuff_fclone_cache : skbuff_head_cache; 152 153 /* Get the HEAD */ 154 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); 155 if (!skb) 156 goto out; 157 158 /* Get the DATA. Size must match skb_add_mtu(). */ 159 size = SKB_DATA_ALIGN(size); 160 data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info), 161 gfp_mask, node); 162 if (!data) 163 goto nodata; 164 165 memset(skb, 0, offsetof(struct sk_buff, truesize)); 166 skb->truesize = size + sizeof(struct sk_buff); 167 atomic_set(&skb->users, 1); 168 skb->head = data; 169 skb->data = data; 170 skb->tail = data; 171 skb->end = data + size; 172 /* make sure we initialize shinfo sequentially */ 173 shinfo = skb_shinfo(skb); 174 atomic_set(&shinfo->dataref, 1); 175 shinfo->nr_frags = 0; 176 shinfo->gso_size = 0; 177 shinfo->gso_segs = 0; 178 shinfo->gso_type = 0; 179 shinfo->ip6_frag_id = 0; 180 shinfo->frag_list = NULL; 181 182 if (fclone) { 183 struct sk_buff *child = skb + 1; 184 atomic_t *fclone_ref = (atomic_t *) (child + 1); 185 186 skb->fclone = SKB_FCLONE_ORIG; 187 atomic_set(fclone_ref, 1); 188 189 child->fclone = SKB_FCLONE_UNAVAILABLE; 190 } 191out: 192 return skb; 193nodata: 194 kmem_cache_free(cache, skb); 195 skb = NULL; 196 goto out; 197} 198 199/** 200 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device 201 * @dev: network device to receive on 202 * @length: length to allocate 203 * @gfp_mask: get_free_pages mask, passed to alloc_skb 204 * 205 * Allocate a new &sk_buff and assign it a usage count of one. The 206 * buffer has unspecified headroom built in. Users should allocate 207 * the headroom they think they need without accounting for the 208 * built in space. The built in space is used for optimisations. 209 * 210 * %NULL is returned if there is no free memory. 211 */ 212struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 213 unsigned int length, gfp_t gfp_mask) 214{ 215 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; 216 struct sk_buff *skb; 217 218 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); 219 if (likely(skb)) { 220 skb_reserve(skb, NET_SKB_PAD); 221 skb->dev = dev; 222 } 223 return skb; 224} 225 226static void skb_drop_list(struct sk_buff **listp) 227{ 228 struct sk_buff *list = *listp; 229 230 *listp = NULL; 231 232 do { 233 struct sk_buff *this = list; 234 list = list->next; 235 kfree_skb(this); 236 } while (list); 237} 238 239static inline void skb_drop_fraglist(struct sk_buff *skb) 240{ 241 skb_drop_list(&skb_shinfo(skb)->frag_list); 242} 243 244static void skb_clone_fraglist(struct sk_buff *skb) 245{ 246 struct sk_buff *list; 247 248 for (list = skb_shinfo(skb)->frag_list; list; list = list->next) 249 skb_get(list); 250} 251 252static void skb_release_data(struct sk_buff *skb) 253{ 254 if (!skb->cloned || 255 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, 256 &skb_shinfo(skb)->dataref)) { 257 if (skb_shinfo(skb)->nr_frags) { 258 int i; 259 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 260 put_page(skb_shinfo(skb)->frags[i].page); 261 } 262 263 if (skb_shinfo(skb)->frag_list) 264 skb_drop_fraglist(skb); 265 266 kfree(skb->head); 267 } 268} 269 270/* 271 * Free an skbuff by memory without cleaning the state. 272 */ 273void kfree_skbmem(struct sk_buff *skb) 274{ 275 struct sk_buff *other; 276 atomic_t *fclone_ref; 277 278 skb_release_data(skb); 279 switch (skb->fclone) { 280 case SKB_FCLONE_UNAVAILABLE: 281 kmem_cache_free(skbuff_head_cache, skb); 282 break; 283 284 case SKB_FCLONE_ORIG: 285 fclone_ref = (atomic_t *) (skb + 2); 286 if (atomic_dec_and_test(fclone_ref)) 287 kmem_cache_free(skbuff_fclone_cache, skb); 288 break; 289 290 case SKB_FCLONE_CLONE: 291 fclone_ref = (atomic_t *) (skb + 1); 292 other = skb - 1; 293 294 /* The clone portion is available for 295 * fast-cloning again. 296 */ 297 skb->fclone = SKB_FCLONE_UNAVAILABLE; 298 299 if (atomic_dec_and_test(fclone_ref)) 300 kmem_cache_free(skbuff_fclone_cache, other); 301 break; 302 }; 303} 304 305/** 306 * __kfree_skb - private function 307 * @skb: buffer 308 * 309 * Free an sk_buff. Release anything attached to the buffer. 310 * Clean the state. This is an internal helper function. Users should 311 * always call kfree_skb 312 */ 313 314void __kfree_skb(struct sk_buff *skb) 315{ 316 dst_release(skb->dst); 317#ifdef CONFIG_XFRM 318 secpath_put(skb->sp); 319#endif 320 if (skb->destructor) { 321 WARN_ON(in_irq()); 322 skb->destructor(skb); 323 } 324#ifdef CONFIG_NETFILTER 325 nf_conntrack_put(skb->nfct); 326#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 327 nf_conntrack_put_reasm(skb->nfct_reasm); 328#endif 329#ifdef CONFIG_BRIDGE_NETFILTER 330 nf_bridge_put(skb->nf_bridge); 331#endif 332#endif 333/* XXX: IS this still necessary? - JHS */ 334#ifdef CONFIG_NET_SCHED 335 skb->tc_index = 0; 336#ifdef CONFIG_NET_CLS_ACT 337 skb->tc_verd = 0; 338#endif 339#endif 340 341 kfree_skbmem(skb); 342} 343 344/** 345 * kfree_skb - free an sk_buff 346 * @skb: buffer to free 347 * 348 * Drop a reference to the buffer and free it if the usage count has 349 * hit zero. 350 */ 351void kfree_skb(struct sk_buff *skb) 352{ 353 if (unlikely(!skb)) 354 return; 355 if (likely(atomic_read(&skb->users) == 1)) 356 smp_rmb(); 357 else if (likely(!atomic_dec_and_test(&skb->users))) 358 return; 359 __kfree_skb(skb); 360} 361 362/** 363 * skb_clone - duplicate an sk_buff 364 * @skb: buffer to clone 365 * @gfp_mask: allocation priority 366 * 367 * Duplicate an &sk_buff. The new one is not owned by a socket. Both 368 * copies share the same packet data but not structure. The new 369 * buffer has a reference count of 1. If the allocation fails the 370 * function returns %NULL otherwise the new buffer is returned. 371 * 372 * If this function is called from an interrupt gfp_mask() must be 373 * %GFP_ATOMIC. 374 */ 375 376struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) 377{ 378 struct sk_buff *n; 379 380 n = skb + 1; 381 if (skb->fclone == SKB_FCLONE_ORIG && 382 n->fclone == SKB_FCLONE_UNAVAILABLE) { 383 atomic_t *fclone_ref = (atomic_t *) (n + 1); 384 n->fclone = SKB_FCLONE_CLONE; 385 atomic_inc(fclone_ref); 386 } else { 387 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); 388 if (!n) 389 return NULL; 390 n->fclone = SKB_FCLONE_UNAVAILABLE; 391 } 392 393#define C(x) n->x = skb->x 394 395 n->next = n->prev = NULL; 396 n->sk = NULL; 397 C(tstamp); 398 C(dev); 399 C(h); 400 C(nh); 401 C(mac); 402 C(dst); 403 dst_clone(skb->dst); 404 C(sp); 405#ifdef CONFIG_INET 406 secpath_get(skb->sp); 407#endif 408 memcpy(n->cb, skb->cb, sizeof(skb->cb)); 409 C(len); 410 C(data_len); 411 C(mac_len); 412 C(csum); 413 C(local_df); 414 n->cloned = 1; 415 n->nohdr = 0; 416 C(pkt_type); 417 C(ip_summed); 418 C(priority); 419#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 420 C(ipvs_property); 421#endif 422 C(protocol); 423 n->destructor = NULL; 424 C(mark); 425#ifdef CONFIG_NETFILTER 426 C(nfct); 427 nf_conntrack_get(skb->nfct); 428 C(nfctinfo); 429#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 430 C(nfct_reasm); 431 nf_conntrack_get_reasm(skb->nfct_reasm); 432#endif 433#ifdef CONFIG_BRIDGE_NETFILTER 434 C(nf_bridge); 435 nf_bridge_get(skb->nf_bridge); 436#endif 437#endif /*CONFIG_NETFILTER*/ 438#ifdef CONFIG_NET_SCHED 439 C(tc_index); 440#ifdef CONFIG_NET_CLS_ACT 441 n->tc_verd = SET_TC_VERD(skb->tc_verd,0); 442 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd); 443 n->tc_verd = CLR_TC_MUNGED(n->tc_verd); 444 C(iif); 445#endif 446 skb_copy_secmark(n, skb); 447#endif 448 C(truesize); 449 atomic_set(&n->users, 1); 450 C(head); 451 C(data); 452 C(tail); 453 C(end); 454 455 atomic_inc(&(skb_shinfo(skb)->dataref)); 456 skb->cloned = 1; 457 458 return n; 459} 460 461static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 462{ 463 /* 464 * Shift between the two data areas in bytes 465 */ 466 unsigned long offset = new->data - old->data; 467 468 new->sk = NULL; 469 new->dev = old->dev; 470 new->priority = old->priority; 471 new->protocol = old->protocol; 472 new->dst = dst_clone(old->dst); 473#ifdef CONFIG_INET 474 new->sp = secpath_get(old->sp); 475#endif 476 new->h.raw = old->h.raw + offset; 477 new->nh.raw = old->nh.raw + offset; 478 new->mac.raw = old->mac.raw + offset; 479 memcpy(new->cb, old->cb, sizeof(old->cb)); 480 new->local_df = old->local_df; 481 new->fclone = SKB_FCLONE_UNAVAILABLE; 482 new->pkt_type = old->pkt_type; 483 new->tstamp = old->tstamp; 484 new->destructor = NULL; 485 new->mark = old->mark; 486#ifdef CONFIG_NETFILTER 487 new->nfct = old->nfct; 488 nf_conntrack_get(old->nfct); 489 new->nfctinfo = old->nfctinfo; 490#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 491 new->nfct_reasm = old->nfct_reasm; 492 nf_conntrack_get_reasm(old->nfct_reasm); 493#endif 494#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 495 new->ipvs_property = old->ipvs_property; 496#endif 497#ifdef CONFIG_BRIDGE_NETFILTER 498 new->nf_bridge = old->nf_bridge; 499 nf_bridge_get(old->nf_bridge); 500#endif 501#endif 502#ifdef CONFIG_NET_SCHED 503#ifdef CONFIG_NET_CLS_ACT 504 new->tc_verd = old->tc_verd; 505#endif 506 new->tc_index = old->tc_index; 507#endif 508 skb_copy_secmark(new, old); 509 atomic_set(&new->users, 1); 510 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 511 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 512 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type; 513} 514 515/** 516 * skb_copy - create private copy of an sk_buff 517 * @skb: buffer to copy 518 * @gfp_mask: allocation priority 519 * 520 * Make a copy of both an &sk_buff and its data. This is used when the 521 * caller wishes to modify the data and needs a private copy of the 522 * data to alter. Returns %NULL on failure or the pointer to the buffer 523 * on success. The returned buffer has a reference count of 1. 524 * 525 * As by-product this function converts non-linear &sk_buff to linear 526 * one, so that &sk_buff becomes completely private and caller is allowed 527 * to modify all the data of returned buffer. This means that this 528 * function is not recommended for use in circumstances when only 529 * header is going to be modified. Use pskb_copy() instead. 530 */ 531 532struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 533{ 534 int headerlen = skb->data - skb->head; 535 /* 536 * Allocate the copy buffer 537 */ 538 struct sk_buff *n = alloc_skb(skb->end - skb->head + skb->data_len, 539 gfp_mask); 540 if (!n) 541 return NULL; 542 543 /* Set the data pointer */ 544 skb_reserve(n, headerlen); 545 /* Set the tail pointer and length */ 546 skb_put(n, skb->len); 547 n->csum = skb->csum; 548 n->ip_summed = skb->ip_summed; 549 550 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) 551 BUG(); 552 553 copy_skb_header(n, skb); 554 return n; 555} 556 557 558/** 559 * pskb_copy - create copy of an sk_buff with private head. 560 * @skb: buffer to copy 561 * @gfp_mask: allocation priority 562 * 563 * Make a copy of both an &sk_buff and part of its data, located 564 * in header. Fragmented data remain shared. This is used when 565 * the caller wishes to modify only header of &sk_buff and needs 566 * private copy of the header to alter. Returns %NULL on failure 567 * or the pointer to the buffer on success. 568 * The returned buffer has a reference count of 1. 569 */ 570 571struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) 572{ 573 /* 574 * Allocate the copy buffer 575 */ 576 struct sk_buff *n = alloc_skb(skb->end - skb->head, gfp_mask); 577 578 if (!n) 579 goto out; 580 581 /* Set the data pointer */ 582 skb_reserve(n, skb->data - skb->head); 583 /* Set the tail pointer and length */ 584 skb_put(n, skb_headlen(skb)); 585 /* Copy the bytes */ 586 memcpy(n->data, skb->data, n->len); 587 n->csum = skb->csum; 588 n->ip_summed = skb->ip_summed; 589 590 n->truesize += skb->data_len; 591 n->data_len = skb->data_len; 592 n->len = skb->len; 593 594 if (skb_shinfo(skb)->nr_frags) { 595 int i; 596 597 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 598 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; 599 get_page(skb_shinfo(n)->frags[i].page); 600 } 601 skb_shinfo(n)->nr_frags = i; 602 } 603 604 if (skb_shinfo(skb)->frag_list) { 605 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; 606 skb_clone_fraglist(n); 607 } 608 609 copy_skb_header(n, skb); 610out: 611 return n; 612} 613 614/** 615 * pskb_expand_head - reallocate header of &sk_buff 616 * @skb: buffer to reallocate 617 * @nhead: room to add at head 618 * @ntail: room to add at tail 619 * @gfp_mask: allocation priority 620 * 621 * Expands (or creates identical copy, if &nhead and &ntail are zero) 622 * header of skb. &sk_buff itself is not changed. &sk_buff MUST have 623 * reference count of 1. Returns zero in the case of success or error, 624 * if expansion failed. In the last case, &sk_buff is not changed. 625 * 626 * All the pointers pointing into skb header may change and must be 627 * reloaded after call to this function. 628 */ 629 630int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, 631 gfp_t gfp_mask) 632{ 633 int i; 634 u8 *data; 635 int size = nhead + (skb->end - skb->head) + ntail; 636 long off; 637 638 if (skb_shared(skb)) 639 BUG(); 640 641 size = SKB_DATA_ALIGN(size); 642 643 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 644 if (!data) 645 goto nodata; 646 647 /* Copy only real data... and, alas, header. This should be 648 * optimized for the cases when header is void. */ 649 memcpy(data + nhead, skb->head, skb->tail - skb->head); 650 memcpy(data + size, skb->end, sizeof(struct skb_shared_info)); 651 652 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 653 get_page(skb_shinfo(skb)->frags[i].page); 654 655 if (skb_shinfo(skb)->frag_list) 656 skb_clone_fraglist(skb); 657 658 skb_release_data(skb); 659 660 off = (data + nhead) - skb->head; 661 662 skb->head = data; 663 skb->end = data + size; 664 skb->data += off; 665 skb->tail += off; 666 skb->mac.raw += off; 667 skb->h.raw += off; 668 skb->nh.raw += off; 669 skb->cloned = 0; 670 skb->nohdr = 0; 671 atomic_set(&skb_shinfo(skb)->dataref, 1); 672 return 0; 673 674nodata: 675 return -ENOMEM; 676} 677 678/* Make private copy of skb with writable head and some headroom */ 679 680struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) 681{ 682 struct sk_buff *skb2; 683 int delta = headroom - skb_headroom(skb); 684 685 if (delta <= 0) 686 skb2 = pskb_copy(skb, GFP_ATOMIC); 687 else { 688 skb2 = skb_clone(skb, GFP_ATOMIC); 689 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0, 690 GFP_ATOMIC)) { 691 kfree_skb(skb2); 692 skb2 = NULL; 693 } 694 } 695 return skb2; 696} 697 698 699/** 700 * skb_copy_expand - copy and expand sk_buff 701 * @skb: buffer to copy 702 * @newheadroom: new free bytes at head 703 * @newtailroom: new free bytes at tail 704 * @gfp_mask: allocation priority 705 * 706 * Make a copy of both an &sk_buff and its data and while doing so 707 * allocate additional space. 708 * 709 * This is used when the caller wishes to modify the data and needs a 710 * private copy of the data to alter as well as more space for new fields. 711 * Returns %NULL on failure or the pointer to the buffer 712 * on success. The returned buffer has a reference count of 1. 713 * 714 * You must pass %GFP_ATOMIC as the allocation priority if this function 715 * is called from an interrupt. 716 * 717 * BUG ALERT: ip_summed is not copied. Why does this work? Is it used 718 * only by netfilter in the cases when checksum is recalculated? --ANK 719 */ 720struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 721 int newheadroom, int newtailroom, 722 gfp_t gfp_mask) 723{ 724 /* 725 * Allocate the copy buffer 726 */ 727 struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom, 728 gfp_mask); 729 int head_copy_len, head_copy_off; 730 731 if (!n) 732 return NULL; 733 734 skb_reserve(n, newheadroom); 735 736 /* Set the tail pointer and length */ 737 skb_put(n, skb->len); 738 739 head_copy_len = skb_headroom(skb); 740 head_copy_off = 0; 741 if (newheadroom <= head_copy_len) 742 head_copy_len = newheadroom; 743 else 744 head_copy_off = newheadroom - head_copy_len; 745 746 /* Copy the linear header and data. */ 747 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, 748 skb->len + head_copy_len)) 749 BUG(); 750 751 copy_skb_header(n, skb); 752 753 return n; 754} 755 756/** 757 * skb_pad - zero pad the tail of an skb 758 * @skb: buffer to pad 759 * @pad: space to pad 760 * 761 * Ensure that a buffer is followed by a padding area that is zero 762 * filled. Used by network drivers which may DMA or transfer data 763 * beyond the buffer end onto the wire. 764 * 765 * May return error in out of memory cases. The skb is freed on error. 766 */ 767 768int skb_pad(struct sk_buff *skb, int pad) 769{ 770 int err; 771 int ntail; 772 773 /* If the skbuff is non linear tailroom is always zero.. */ 774 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { 775 memset(skb->data+skb->len, 0, pad); 776 return 0; 777 } 778 779 ntail = skb->data_len + pad - (skb->end - skb->tail); 780 if (likely(skb_cloned(skb) || ntail > 0)) { 781 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 782 if (unlikely(err)) 783 goto free_skb; 784 } 785 786 /* FIXME: The use of this function with non-linear skb's really needs 787 * to be audited. 788 */ 789 err = skb_linearize(skb); 790 if (unlikely(err)) 791 goto free_skb; 792 793 memset(skb->data + skb->len, 0, pad); 794 return 0; 795 796free_skb: 797 kfree_skb(skb); 798 return err; 799} 800 801/* Trims skb to length len. It can change skb pointers. 802 */ 803 804int ___pskb_trim(struct sk_buff *skb, unsigned int len) 805{ 806 struct sk_buff **fragp; 807 struct sk_buff *frag; 808 int offset = skb_headlen(skb); 809 int nfrags = skb_shinfo(skb)->nr_frags; 810 int i; 811 int err; 812 813 if (skb_cloned(skb) && 814 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 815 return err; 816 817 i = 0; 818 if (offset >= len) 819 goto drop_pages; 820 821 for (; i < nfrags; i++) { 822 int end = offset + skb_shinfo(skb)->frags[i].size; 823 824 if (end < len) { 825 offset = end; 826 continue; 827 } 828 829 skb_shinfo(skb)->frags[i++].size = len - offset; 830 831drop_pages: 832 skb_shinfo(skb)->nr_frags = i; 833 834 for (; i < nfrags; i++) 835 put_page(skb_shinfo(skb)->frags[i].page); 836 837 if (skb_shinfo(skb)->frag_list) 838 skb_drop_fraglist(skb); 839 goto done; 840 } 841 842 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 843 fragp = &frag->next) { 844 int end = offset + frag->len; 845 846 if (skb_shared(frag)) { 847 struct sk_buff *nfrag; 848 849 nfrag = skb_clone(frag, GFP_ATOMIC); 850 if (unlikely(!nfrag)) 851 return -ENOMEM; 852 853 nfrag->next = frag->next; 854 kfree_skb(frag); 855 frag = nfrag; 856 *fragp = frag; 857 } 858 859 if (end < len) { 860 offset = end; 861 continue; 862 } 863 864 if (end > len && 865 unlikely((err = pskb_trim(frag, len - offset)))) 866 return err; 867 868 if (frag->next) 869 skb_drop_list(&frag->next); 870 break; 871 } 872 873done: 874 if (len > skb_headlen(skb)) { 875 skb->data_len -= skb->len - len; 876 skb->len = len; 877 } else { 878 skb->len = len; 879 skb->data_len = 0; 880 skb->tail = skb->data + len; 881 } 882 883 return 0; 884} 885 886/** 887 * __pskb_pull_tail - advance tail of skb header 888 * @skb: buffer to reallocate 889 * @delta: number of bytes to advance tail 890 * 891 * The function makes a sense only on a fragmented &sk_buff, 892 * it expands header moving its tail forward and copying necessary 893 * data from fragmented part. 894 * 895 * &sk_buff MUST have reference count of 1. 896 * 897 * Returns %NULL (and &sk_buff does not change) if pull failed 898 * or value of new tail of skb in the case of success. 899 * 900 * All the pointers pointing into skb header may change and must be 901 * reloaded after call to this function. 902 */ 903 904/* Moves tail of skb head forward, copying data from fragmented part, 905 * when it is necessary. 906 * 1. It may fail due to malloc failure. 907 * 2. It may change skb pointers. 908 * 909 * It is pretty complicated. Luckily, it is called only in exceptional cases. 910 */ 911unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) 912{ 913 /* If skb has not enough free space at tail, get new one 914 * plus 128 bytes for future expansions. If we have enough 915 * room at tail, reallocate without expansion only if skb is cloned. 916 */ 917 int i, k, eat = (skb->tail + delta) - skb->end; 918 919 if (eat > 0 || skb_cloned(skb)) { 920 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, 921 GFP_ATOMIC)) 922 return NULL; 923 } 924 925 if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta)) 926 BUG(); 927 928 /* Optimization: no fragments, no reasons to preestimate 929 * size of pulled pages. Superb. 930 */ 931 if (!skb_shinfo(skb)->frag_list) 932 goto pull_pages; 933 934 /* Estimate size of pulled pages. */ 935 eat = delta; 936 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 937 if (skb_shinfo(skb)->frags[i].size >= eat) 938 goto pull_pages; 939 eat -= skb_shinfo(skb)->frags[i].size; 940 } 941 942 /* If we need update frag list, we are in troubles. 943 * Certainly, it possible to add an offset to skb data, 944 * but taking into account that pulling is expected to 945 * be very rare operation, it is worth to fight against 946 * further bloating skb head and crucify ourselves here instead. 947 * Pure masohism, indeed. 8)8) 948 */ 949 if (eat) { 950 struct sk_buff *list = skb_shinfo(skb)->frag_list; 951 struct sk_buff *clone = NULL; 952 struct sk_buff *insp = NULL; 953 954 do { 955 BUG_ON(!list); 956 957 if (list->len <= eat) { 958 /* Eaten as whole. */ 959 eat -= list->len; 960 list = list->next; 961 insp = list; 962 } else { 963 /* Eaten partially. */ 964 965 if (skb_shared(list)) { 966 /* Sucks! We need to fork list. :-( */ 967 clone = skb_clone(list, GFP_ATOMIC); 968 if (!clone) 969 return NULL; 970 insp = list->next; 971 list = clone; 972 } else { 973 /* This may be pulled without 974 * problems. */ 975 insp = list; 976 } 977 if (!pskb_pull(list, eat)) { 978 if (clone) 979 kfree_skb(clone); 980 return NULL; 981 } 982 break; 983 } 984 } while (eat); 985 986 /* Free pulled out fragments. */ 987 while ((list = skb_shinfo(skb)->frag_list) != insp) { 988 skb_shinfo(skb)->frag_list = list->next; 989 kfree_skb(list); 990 } 991 /* And insert new clone at head. */ 992 if (clone) { 993 clone->next = list; 994 skb_shinfo(skb)->frag_list = clone; 995 } 996 } 997 /* Success! Now we may commit changes to skb data. */ 998 999pull_pages: 1000 eat = delta; 1001 k = 0; 1002 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1003 if (skb_shinfo(skb)->frags[i].size <= eat) { 1004 put_page(skb_shinfo(skb)->frags[i].page); 1005 eat -= skb_shinfo(skb)->frags[i].size; 1006 } else { 1007 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1008 if (eat) { 1009 skb_shinfo(skb)->frags[k].page_offset += eat; 1010 skb_shinfo(skb)->frags[k].size -= eat; 1011 eat = 0; 1012 } 1013 k++; 1014 } 1015 } 1016 skb_shinfo(skb)->nr_frags = k; 1017 1018 skb->tail += delta; 1019 skb->data_len -= delta; 1020 1021 return skb->tail; 1022} 1023 1024/* Copy some data bits from skb to kernel buffer. */ 1025 1026int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) 1027{ 1028 int i, copy; 1029 int start = skb_headlen(skb); 1030 1031 if (offset > (int)skb->len - len) 1032 goto fault; 1033 1034 /* Copy header. */ 1035 if ((copy = start - offset) > 0) { 1036 if (copy > len) 1037 copy = len; 1038 memcpy(to, skb->data + offset, copy); 1039 if ((len -= copy) == 0) 1040 return 0; 1041 offset += copy; 1042 to += copy; 1043 } 1044 1045 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1046 int end; 1047 1048 BUG_TRAP(start <= offset + len); 1049 1050 end = start + skb_shinfo(skb)->frags[i].size; 1051 if ((copy = end - offset) > 0) { 1052 u8 *vaddr; 1053 1054 if (copy > len) 1055 copy = len; 1056 1057 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1058 memcpy(to, 1059 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1060 offset - start, copy); 1061 kunmap_skb_frag(vaddr); 1062 1063 if ((len -= copy) == 0) 1064 return 0; 1065 offset += copy; 1066 to += copy; 1067 } 1068 start = end; 1069 } 1070 1071 if (skb_shinfo(skb)->frag_list) { 1072 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1073 1074 for (; list; list = list->next) { 1075 int end; 1076 1077 BUG_TRAP(start <= offset + len); 1078 1079 end = start + list->len; 1080 if ((copy = end - offset) > 0) { 1081 if (copy > len) 1082 copy = len; 1083 if (skb_copy_bits(list, offset - start, 1084 to, copy)) 1085 goto fault; 1086 if ((len -= copy) == 0) 1087 return 0; 1088 offset += copy; 1089 to += copy; 1090 } 1091 start = end; 1092 } 1093 } 1094 if (!len) 1095 return 0; 1096 1097fault: 1098 return -EFAULT; 1099} 1100 1101/** 1102 * skb_store_bits - store bits from kernel buffer to skb 1103 * @skb: destination buffer 1104 * @offset: offset in destination 1105 * @from: source buffer 1106 * @len: number of bytes to copy 1107 * 1108 * Copy the specified number of bytes from the source buffer to the 1109 * destination skb. This function handles all the messy bits of 1110 * traversing fragment lists and such. 1111 */ 1112 1113int skb_store_bits(const struct sk_buff *skb, int offset, void *from, int len) 1114{ 1115 int i, copy; 1116 int start = skb_headlen(skb); 1117 1118 if (offset > (int)skb->len - len) 1119 goto fault; 1120 1121 if ((copy = start - offset) > 0) { 1122 if (copy > len) 1123 copy = len; 1124 memcpy(skb->data + offset, from, copy); 1125 if ((len -= copy) == 0) 1126 return 0; 1127 offset += copy; 1128 from += copy; 1129 } 1130 1131 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1132 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1133 int end; 1134 1135 BUG_TRAP(start <= offset + len); 1136 1137 end = start + frag->size; 1138 if ((copy = end - offset) > 0) { 1139 u8 *vaddr; 1140 1141 if (copy > len) 1142 copy = len; 1143 1144 vaddr = kmap_skb_frag(frag); 1145 memcpy(vaddr + frag->page_offset + offset - start, 1146 from, copy); 1147 kunmap_skb_frag(vaddr); 1148 1149 if ((len -= copy) == 0) 1150 return 0; 1151 offset += copy; 1152 from += copy; 1153 } 1154 start = end; 1155 } 1156 1157 if (skb_shinfo(skb)->frag_list) { 1158 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1159 1160 for (; list; list = list->next) { 1161 int end; 1162 1163 BUG_TRAP(start <= offset + len); 1164 1165 end = start + list->len; 1166 if ((copy = end - offset) > 0) { 1167 if (copy > len) 1168 copy = len; 1169 if (skb_store_bits(list, offset - start, 1170 from, copy)) 1171 goto fault; 1172 if ((len -= copy) == 0) 1173 return 0; 1174 offset += copy; 1175 from += copy; 1176 } 1177 start = end; 1178 } 1179 } 1180 if (!len) 1181 return 0; 1182 1183fault: 1184 return -EFAULT; 1185} 1186 1187EXPORT_SYMBOL(skb_store_bits); 1188 1189/* Checksum skb data. */ 1190 1191__wsum skb_checksum(const struct sk_buff *skb, int offset, 1192 int len, __wsum csum) 1193{ 1194 int start = skb_headlen(skb); 1195 int i, copy = start - offset; 1196 int pos = 0; 1197 1198 /* Checksum header. */ 1199 if (copy > 0) { 1200 if (copy > len) 1201 copy = len; 1202 csum = csum_partial(skb->data + offset, copy, csum); 1203 if ((len -= copy) == 0) 1204 return csum; 1205 offset += copy; 1206 pos = copy; 1207 } 1208 1209 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1210 int end; 1211 1212 BUG_TRAP(start <= offset + len); 1213 1214 end = start + skb_shinfo(skb)->frags[i].size; 1215 if ((copy = end - offset) > 0) { 1216 __wsum csum2; 1217 u8 *vaddr; 1218 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1219 1220 if (copy > len) 1221 copy = len; 1222 vaddr = kmap_skb_frag(frag); 1223 csum2 = csum_partial(vaddr + frag->page_offset + 1224 offset - start, copy, 0); 1225 kunmap_skb_frag(vaddr); 1226 csum = csum_block_add(csum, csum2, pos); 1227 if (!(len -= copy)) 1228 return csum; 1229 offset += copy; 1230 pos += copy; 1231 } 1232 start = end; 1233 } 1234 1235 if (skb_shinfo(skb)->frag_list) { 1236 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1237 1238 for (; list; list = list->next) { 1239 int end; 1240 1241 BUG_TRAP(start <= offset + len); 1242 1243 end = start + list->len; 1244 if ((copy = end - offset) > 0) { 1245 __wsum csum2; 1246 if (copy > len) 1247 copy = len; 1248 csum2 = skb_checksum(list, offset - start, 1249 copy, 0); 1250 csum = csum_block_add(csum, csum2, pos); 1251 if ((len -= copy) == 0) 1252 return csum; 1253 offset += copy; 1254 pos += copy; 1255 } 1256 start = end; 1257 } 1258 } 1259 BUG_ON(len); 1260 1261 return csum; 1262} 1263 1264/* Both of above in one bottle. */ 1265 1266__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, 1267 u8 *to, int len, __wsum csum) 1268{ 1269 int start = skb_headlen(skb); 1270 int i, copy = start - offset; 1271 int pos = 0; 1272 1273 /* Copy header. */ 1274 if (copy > 0) { 1275 if (copy > len) 1276 copy = len; 1277 csum = csum_partial_copy_nocheck(skb->data + offset, to, 1278 copy, csum); 1279 if ((len -= copy) == 0) 1280 return csum; 1281 offset += copy; 1282 to += copy; 1283 pos = copy; 1284 } 1285 1286 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1287 int end; 1288 1289 BUG_TRAP(start <= offset + len); 1290 1291 end = start + skb_shinfo(skb)->frags[i].size; 1292 if ((copy = end - offset) > 0) { 1293 __wsum csum2; 1294 u8 *vaddr; 1295 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1296 1297 if (copy > len) 1298 copy = len; 1299 vaddr = kmap_skb_frag(frag); 1300 csum2 = csum_partial_copy_nocheck(vaddr + 1301 frag->page_offset + 1302 offset - start, to, 1303 copy, 0); 1304 kunmap_skb_frag(vaddr); 1305 csum = csum_block_add(csum, csum2, pos); 1306 if (!(len -= copy)) 1307 return csum; 1308 offset += copy; 1309 to += copy; 1310 pos += copy; 1311 } 1312 start = end; 1313 } 1314 1315 if (skb_shinfo(skb)->frag_list) { 1316 struct sk_buff *list = skb_shinfo(skb)->frag_list; 1317 1318 for (; list; list = list->next) { 1319 __wsum csum2; 1320 int end; 1321 1322 BUG_TRAP(start <= offset + len); 1323 1324 end = start + list->len; 1325 if ((copy = end - offset) > 0) { 1326 if (copy > len) 1327 copy = len; 1328 csum2 = skb_copy_and_csum_bits(list, 1329 offset - start, 1330 to, copy, 0); 1331 csum = csum_block_add(csum, csum2, pos); 1332 if ((len -= copy) == 0) 1333 return csum; 1334 offset += copy; 1335 to += copy; 1336 pos += copy; 1337 } 1338 start = end; 1339 } 1340 } 1341 BUG_ON(len); 1342 return csum; 1343} 1344 1345void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) 1346{ 1347 __wsum csum; 1348 long csstart; 1349 1350 if (skb->ip_summed == CHECKSUM_PARTIAL) 1351 csstart = skb->h.raw - skb->data; 1352 else 1353 csstart = skb_headlen(skb); 1354 1355 BUG_ON(csstart > skb_headlen(skb)); 1356 1357 memcpy(to, skb->data, csstart); 1358 1359 csum = 0; 1360 if (csstart != skb->len) 1361 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, 1362 skb->len - csstart, 0); 1363 1364 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1365 long csstuff = csstart + skb->csum_offset; 1366 1367 *((__sum16 *)(to + csstuff)) = csum_fold(csum); 1368 } 1369} 1370 1371/** 1372 * skb_dequeue - remove from the head of the queue 1373 * @list: list to dequeue from 1374 * 1375 * Remove the head of the list. The list lock is taken so the function 1376 * may be used safely with other locking list functions. The head item is 1377 * returned or %NULL if the list is empty. 1378 */ 1379 1380struct sk_buff *skb_dequeue(struct sk_buff_head *list) 1381{ 1382 unsigned long flags; 1383 struct sk_buff *result; 1384 1385 spin_lock_irqsave(&list->lock, flags); 1386 result = __skb_dequeue(list); 1387 spin_unlock_irqrestore(&list->lock, flags); 1388 return result; 1389} 1390 1391/** 1392 * skb_dequeue_tail - remove from the tail of the queue 1393 * @list: list to dequeue from 1394 * 1395 * Remove the tail of the list. The list lock is taken so the function 1396 * may be used safely with other locking list functions. The tail item is 1397 * returned or %NULL if the list is empty. 1398 */ 1399struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list) 1400{ 1401 unsigned long flags; 1402 struct sk_buff *result; 1403 1404 spin_lock_irqsave(&list->lock, flags); 1405 result = __skb_dequeue_tail(list); 1406 spin_unlock_irqrestore(&list->lock, flags); 1407 return result; 1408} 1409 1410/** 1411 * skb_queue_purge - empty a list 1412 * @list: list to empty 1413 * 1414 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1415 * the list and one reference dropped. This function takes the list 1416 * lock and is atomic with respect to other list locking functions. 1417 */ 1418void skb_queue_purge(struct sk_buff_head *list) 1419{ 1420 struct sk_buff *skb; 1421 while ((skb = skb_dequeue(list)) != NULL) 1422 kfree_skb(skb); 1423} 1424 1425/** 1426 * skb_queue_head - queue a buffer at the list head 1427 * @list: list to use 1428 * @newsk: buffer to queue 1429 * 1430 * Queue a buffer at the start of the list. This function takes the 1431 * list lock and can be used safely with other locking &sk_buff functions 1432 * safely. 1433 * 1434 * A buffer cannot be placed on two lists at the same time. 1435 */ 1436void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk) 1437{ 1438 unsigned long flags; 1439 1440 spin_lock_irqsave(&list->lock, flags); 1441 __skb_queue_head(list, newsk); 1442 spin_unlock_irqrestore(&list->lock, flags); 1443} 1444 1445/** 1446 * skb_queue_tail - queue a buffer at the list tail 1447 * @list: list to use 1448 * @newsk: buffer to queue 1449 * 1450 * Queue a buffer at the tail of the list. This function takes the 1451 * list lock and can be used safely with other locking &sk_buff functions 1452 * safely. 1453 * 1454 * A buffer cannot be placed on two lists at the same time. 1455 */ 1456void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk) 1457{ 1458 unsigned long flags; 1459 1460 spin_lock_irqsave(&list->lock, flags); 1461 __skb_queue_tail(list, newsk); 1462 spin_unlock_irqrestore(&list->lock, flags); 1463} 1464 1465/** 1466 * skb_unlink - remove a buffer from a list 1467 * @skb: buffer to remove 1468 * @list: list to use 1469 * 1470 * Remove a packet from a list. The list locks are taken and this 1471 * function is atomic with respect to other list locked calls 1472 * 1473 * You must know what list the SKB is on. 1474 */ 1475void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1476{ 1477 unsigned long flags; 1478 1479 spin_lock_irqsave(&list->lock, flags); 1480 __skb_unlink(skb, list); 1481 spin_unlock_irqrestore(&list->lock, flags); 1482} 1483 1484/** 1485 * skb_append - append a buffer 1486 * @old: buffer to insert after 1487 * @newsk: buffer to insert 1488 * @list: list to use 1489 * 1490 * Place a packet after a given packet in a list. The list locks are taken 1491 * and this function is atomic with respect to other list locked calls. 1492 * A buffer cannot be placed on two lists at the same time. 1493 */ 1494void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1495{ 1496 unsigned long flags; 1497 1498 spin_lock_irqsave(&list->lock, flags); 1499 __skb_append(old, newsk, list); 1500 spin_unlock_irqrestore(&list->lock, flags); 1501} 1502 1503 1504/** 1505 * skb_insert - insert a buffer 1506 * @old: buffer to insert before 1507 * @newsk: buffer to insert 1508 * @list: list to use 1509 * 1510 * Place a packet before a given packet in a list. The list locks are 1511 * taken and this function is atomic with respect to other list locked 1512 * calls. 1513 * 1514 * A buffer cannot be placed on two lists at the same time. 1515 */ 1516void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 1517{ 1518 unsigned long flags; 1519 1520 spin_lock_irqsave(&list->lock, flags); 1521 __skb_insert(newsk, old->prev, old, list); 1522 spin_unlock_irqrestore(&list->lock, flags); 1523} 1524 1525#if 0 1526/* 1527 * Tune the memory allocator for a new MTU size. 1528 */ 1529void skb_add_mtu(int mtu) 1530{ 1531 /* Must match allocation in alloc_skb */ 1532 mtu = SKB_DATA_ALIGN(mtu) + sizeof(struct skb_shared_info); 1533 1534 kmem_add_cache_size(mtu); 1535} 1536#endif 1537 1538static inline void skb_split_inside_header(struct sk_buff *skb, 1539 struct sk_buff* skb1, 1540 const u32 len, const int pos) 1541{ 1542 int i; 1543 1544 memcpy(skb_put(skb1, pos - len), skb->data + len, pos - len); 1545 1546 /* And move data appendix as is. */ 1547 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1548 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; 1549 1550 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; 1551 skb_shinfo(skb)->nr_frags = 0; 1552 skb1->data_len = skb->data_len; 1553 skb1->len += skb1->data_len; 1554 skb->data_len = 0; 1555 skb->len = len; 1556 skb->tail = skb->data + len; 1557} 1558 1559static inline void skb_split_no_header(struct sk_buff *skb, 1560 struct sk_buff* skb1, 1561 const u32 len, int pos) 1562{ 1563 int i, k = 0; 1564 const int nfrags = skb_shinfo(skb)->nr_frags; 1565 1566 skb_shinfo(skb)->nr_frags = 0; 1567 skb1->len = skb1->data_len = skb->len - len; 1568 skb->len = len; 1569 skb->data_len = len - pos; 1570 1571 for (i = 0; i < nfrags; i++) { 1572 int size = skb_shinfo(skb)->frags[i].size; 1573 1574 if (pos + size > len) { 1575 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; 1576 1577 if (pos < len) { 1578 /* Split frag. 1579 * We have two variants in this case: 1580 * 1. Move all the frag to the second 1581 * part, if it is possible. F.e. 1582 * this approach is mandatory for TUX, 1583 * where splitting is expensive. 1584 * 2. Split is accurately. We make this. 1585 */ 1586 get_page(skb_shinfo(skb)->frags[i].page); 1587 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 1588 skb_shinfo(skb1)->frags[0].size -= len - pos; 1589 skb_shinfo(skb)->frags[i].size = len - pos; 1590 skb_shinfo(skb)->nr_frags++; 1591 } 1592 k++; 1593 } else 1594 skb_shinfo(skb)->nr_frags++; 1595 pos += size; 1596 } 1597 skb_shinfo(skb1)->nr_frags = k; 1598} 1599 1600/** 1601 * skb_split - Split fragmented skb to two parts at length len. 1602 * @skb: the buffer to split 1603 * @skb1: the buffer to receive the second part 1604 * @len: new length for skb 1605 */ 1606void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) 1607{ 1608 int pos = skb_headlen(skb); 1609 1610 if (len < pos) /* Split line is inside header. */ 1611 skb_split_inside_header(skb, skb1, len, pos); 1612 else /* Second chunk has no header, nothing to copy. */ 1613 skb_split_no_header(skb, skb1, len, pos); 1614} 1615 1616/** 1617 * skb_prepare_seq_read - Prepare a sequential read of skb data 1618 * @skb: the buffer to read 1619 * @from: lower offset of data to be read 1620 * @to: upper offset of data to be read 1621 * @st: state variable 1622 * 1623 * Initializes the specified state variable. Must be called before 1624 * invoking skb_seq_read() for the first time. 1625 */ 1626void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, 1627 unsigned int to, struct skb_seq_state *st) 1628{ 1629 st->lower_offset = from; 1630 st->upper_offset = to; 1631 st->root_skb = st->cur_skb = skb; 1632 st->frag_idx = st->stepped_offset = 0; 1633 st->frag_data = NULL; 1634} 1635 1636/** 1637 * skb_seq_read - Sequentially read skb data 1638 * @consumed: number of bytes consumed by the caller so far 1639 * @data: destination pointer for data to be returned 1640 * @st: state variable 1641 * 1642 * Reads a block of skb data at &consumed relative to the 1643 * lower offset specified to skb_prepare_seq_read(). Assigns 1644 * the head of the data block to &data and returns the length 1645 * of the block or 0 if the end of the skb data or the upper 1646 * offset has been reached. 1647 * 1648 * The caller is not required to consume all of the data 1649 * returned, i.e. &consumed is typically set to the number 1650 * of bytes already consumed and the next call to 1651 * skb_seq_read() will return the remaining part of the block. 1652 * 1653 * Note: The size of each block of data returned can be arbitary, 1654 * this limitation is the cost for zerocopy seqeuental 1655 * reads of potentially non linear data. 1656 * 1657 * Note: Fragment lists within fragments are not implemented 1658 * at the moment, state->root_skb could be replaced with 1659 * a stack for this purpose. 1660 */ 1661unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 1662 struct skb_seq_state *st) 1663{ 1664 unsigned int block_limit, abs_offset = consumed + st->lower_offset; 1665 skb_frag_t *frag; 1666 1667 if (unlikely(abs_offset >= st->upper_offset)) 1668 return 0; 1669 1670next_skb: 1671 block_limit = skb_headlen(st->cur_skb); 1672 1673 if (abs_offset < block_limit) { 1674 *data = st->cur_skb->data + abs_offset; 1675 return block_limit - abs_offset; 1676 } 1677 1678 if (st->frag_idx == 0 && !st->frag_data) 1679 st->stepped_offset += skb_headlen(st->cur_skb); 1680 1681 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 1682 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 1683 block_limit = frag->size + st->stepped_offset; 1684 1685 if (abs_offset < block_limit) { 1686 if (!st->frag_data) 1687 st->frag_data = kmap_skb_frag(frag); 1688 1689 *data = (u8 *) st->frag_data + frag->page_offset + 1690 (abs_offset - st->stepped_offset); 1691 1692 return block_limit - abs_offset; 1693 } 1694 1695 if (st->frag_data) { 1696 kunmap_skb_frag(st->frag_data); 1697 st->frag_data = NULL; 1698 } 1699 1700 st->frag_idx++; 1701 st->stepped_offset += frag->size; 1702 } 1703 1704 if (st->cur_skb->next) { 1705 st->cur_skb = st->cur_skb->next; 1706 st->frag_idx = 0; 1707 goto next_skb; 1708 } else if (st->root_skb == st->cur_skb && 1709 skb_shinfo(st->root_skb)->frag_list) { 1710 st->cur_skb = skb_shinfo(st->root_skb)->frag_list; 1711 goto next_skb; 1712 } 1713 1714 return 0; 1715} 1716 1717/** 1718 * skb_abort_seq_read - Abort a sequential read of skb data 1719 * @st: state variable 1720 * 1721 * Must be called if skb_seq_read() was not called until it 1722 * returned 0. 1723 */ 1724void skb_abort_seq_read(struct skb_seq_state *st) 1725{ 1726 if (st->frag_data) 1727 kunmap_skb_frag(st->frag_data); 1728} 1729 1730#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) 1731 1732static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, 1733 struct ts_config *conf, 1734 struct ts_state *state) 1735{ 1736 return skb_seq_read(offset, text, TS_SKB_CB(state)); 1737} 1738 1739static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) 1740{ 1741 skb_abort_seq_read(TS_SKB_CB(state)); 1742} 1743 1744/** 1745 * skb_find_text - Find a text pattern in skb data 1746 * @skb: the buffer to look in 1747 * @from: search offset 1748 * @to: search limit 1749 * @config: textsearch configuration 1750 * @state: uninitialized textsearch state variable 1751 * 1752 * Finds a pattern in the skb data according to the specified 1753 * textsearch configuration. Use textsearch_next() to retrieve 1754 * subsequent occurrences of the pattern. Returns the offset 1755 * to the first occurrence or UINT_MAX if no match was found. 1756 */ 1757unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 1758 unsigned int to, struct ts_config *config, 1759 struct ts_state *state) 1760{ 1761 unsigned int ret; 1762 1763 config->get_next_block = skb_ts_get_next_block; 1764 config->finish = skb_ts_finish; 1765 1766 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); 1767 1768 ret = textsearch_find(config, state); 1769 return (ret <= to - from ? ret : UINT_MAX); 1770} 1771 1772/** 1773 * skb_append_datato_frags: - append the user data to a skb 1774 * @sk: sock structure 1775 * @skb: skb structure to be appened with user data. 1776 * @getfrag: call back function to be used for getting the user data 1777 * @from: pointer to user message iov 1778 * @length: length of the iov message 1779 * 1780 * Description: This procedure append the user data in the fragment part 1781 * of the skb if any page alloc fails user this procedure returns -ENOMEM 1782 */ 1783int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 1784 int (*getfrag)(void *from, char *to, int offset, 1785 int len, int odd, struct sk_buff *skb), 1786 void *from, int length) 1787{ 1788 int frg_cnt = 0; 1789 skb_frag_t *frag = NULL; 1790 struct page *page = NULL; 1791 int copy, left; 1792 int offset = 0; 1793 int ret; 1794 1795 do { 1796 /* Return error if we don't have space for new frag */ 1797 frg_cnt = skb_shinfo(skb)->nr_frags; 1798 if (frg_cnt >= MAX_SKB_FRAGS) 1799 return -EFAULT; 1800 1801 /* allocate a new page for next frag */ 1802 page = alloc_pages(sk->sk_allocation, 0); 1803 1804 /* If alloc_page fails just return failure and caller will 1805 * free previous allocated pages by doing kfree_skb() 1806 */ 1807 if (page == NULL) 1808 return -ENOMEM; 1809 1810 /* initialize the next frag */ 1811 sk->sk_sndmsg_page = page; 1812 sk->sk_sndmsg_off = 0; 1813 skb_fill_page_desc(skb, frg_cnt, page, 0, 0); 1814 skb->truesize += PAGE_SIZE; 1815 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc); 1816 1817 /* get the new initialized frag */ 1818 frg_cnt = skb_shinfo(skb)->nr_frags; 1819 frag = &skb_shinfo(skb)->frags[frg_cnt - 1]; 1820 1821 /* copy the user data to page */ 1822 left = PAGE_SIZE - frag->page_offset; 1823 copy = (length > left)? left : length; 1824 1825 ret = getfrag(from, (page_address(frag->page) + 1826 frag->page_offset + frag->size), 1827 offset, copy, 0, skb); 1828 if (ret < 0) 1829 return -EFAULT; 1830 1831 /* copy was successful so update the size parameters */ 1832 sk->sk_sndmsg_off += copy; 1833 frag->size += copy; 1834 skb->len += copy; 1835 skb->data_len += copy; 1836 offset += copy; 1837 length -= copy; 1838 1839 } while (length > 0); 1840 1841 return 0; 1842} 1843 1844/** 1845 * skb_pull_rcsum - pull skb and update receive checksum 1846 * @skb: buffer to update 1847 * @start: start of data before pull 1848 * @len: length of data pulled 1849 * 1850 * This function performs an skb_pull on the packet and updates 1851 * update the CHECKSUM_COMPLETE checksum. It should be used on 1852 * receive path processing instead of skb_pull unless you know 1853 * that the checksum difference is zero (e.g., a valid IP header) 1854 * or you are setting ip_summed to CHECKSUM_NONE. 1855 */ 1856unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) 1857{ 1858 BUG_ON(len > skb->len); 1859 skb->len -= len; 1860 BUG_ON(skb->len < skb->data_len); 1861 skb_postpull_rcsum(skb, skb->data, len); 1862 return skb->data += len; 1863} 1864 1865EXPORT_SYMBOL_GPL(skb_pull_rcsum); 1866 1867/** 1868 * skb_segment - Perform protocol segmentation on skb. 1869 * @skb: buffer to segment 1870 * @features: features for the output path (see dev->features) 1871 * 1872 * This function performs segmentation on the given skb. It returns 1873 * the segment at the given position. It returns NULL if there are 1874 * no more segments to generate, or when an error is encountered. 1875 */ 1876struct sk_buff *skb_segment(struct sk_buff *skb, int features) 1877{ 1878 struct sk_buff *segs = NULL; 1879 struct sk_buff *tail = NULL; 1880 unsigned int mss = skb_shinfo(skb)->gso_size; 1881 unsigned int doffset = skb->data - skb->mac.raw; 1882 unsigned int offset = doffset; 1883 unsigned int headroom; 1884 unsigned int len; 1885 int sg = features & NETIF_F_SG; 1886 int nfrags = skb_shinfo(skb)->nr_frags; 1887 int err = -ENOMEM; 1888 int i = 0; 1889 int pos; 1890 1891 __skb_push(skb, doffset); 1892 headroom = skb_headroom(skb); 1893 pos = skb_headlen(skb); 1894 1895 do { 1896 struct sk_buff *nskb; 1897 skb_frag_t *frag; 1898 int hsize; 1899 int k; 1900 int size; 1901 1902 len = skb->len - offset; 1903 if (len > mss) 1904 len = mss; 1905 1906 hsize = skb_headlen(skb) - offset; 1907 if (hsize < 0) 1908 hsize = 0; 1909 if (hsize > len || !sg) 1910 hsize = len; 1911 1912 nskb = alloc_skb(hsize + doffset + headroom, GFP_ATOMIC); 1913 if (unlikely(!nskb)) 1914 goto err; 1915 1916 if (segs) 1917 tail->next = nskb; 1918 else 1919 segs = nskb; 1920 tail = nskb; 1921 1922 nskb->dev = skb->dev; 1923 nskb->priority = skb->priority; 1924 nskb->protocol = skb->protocol; 1925 nskb->dst = dst_clone(skb->dst); 1926 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 1927 nskb->pkt_type = skb->pkt_type; 1928 nskb->mac_len = skb->mac_len; 1929 1930 skb_reserve(nskb, headroom); 1931 nskb->mac.raw = nskb->data; 1932 nskb->nh.raw = nskb->data + skb->mac_len; 1933 nskb->h.raw = nskb->nh.raw + (skb->h.raw - skb->nh.raw); 1934 memcpy(skb_put(nskb, doffset), skb->data, doffset); 1935 1936 if (!sg) { 1937 nskb->csum = skb_copy_and_csum_bits(skb, offset, 1938 skb_put(nskb, len), 1939 len, 0); 1940 continue; 1941 } 1942 1943 frag = skb_shinfo(nskb)->frags; 1944 k = 0; 1945 1946 nskb->ip_summed = CHECKSUM_PARTIAL; 1947 nskb->csum = skb->csum; 1948 memcpy(skb_put(nskb, hsize), skb->data + offset, hsize); 1949 1950 while (pos < offset + len) { 1951 BUG_ON(i >= nfrags); 1952 1953 *frag = skb_shinfo(skb)->frags[i]; 1954 get_page(frag->page); 1955 size = frag->size; 1956 1957 if (pos < offset) { 1958 frag->page_offset += offset - pos; 1959 frag->size -= offset - pos; 1960 } 1961 1962 k++; 1963 1964 if (pos + size <= offset + len) { 1965 i++; 1966 pos += size; 1967 } else { 1968 frag->size -= pos + size - (offset + len); 1969 break; 1970 } 1971 1972 frag++; 1973 } 1974 1975 skb_shinfo(nskb)->nr_frags = k; 1976 nskb->data_len = len - hsize; 1977 nskb->len += nskb->data_len; 1978 nskb->truesize += nskb->data_len; 1979 } while ((offset += len) < skb->len); 1980 1981 return segs; 1982 1983err: 1984 while ((skb = segs)) { 1985 segs = skb->next; 1986 kfree_skb(skb); 1987 } 1988 return ERR_PTR(err); 1989} 1990 1991EXPORT_SYMBOL_GPL(skb_segment); 1992 1993void __init skb_init(void) 1994{ 1995 skbuff_head_cache = kmem_cache_create("skbuff_head_cache", 1996 sizeof(struct sk_buff), 1997 0, 1998 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 1999 NULL, NULL); 2000 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache", 2001 (2*sizeof(struct sk_buff)) + 2002 sizeof(atomic_t), 2003 0, 2004 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2005 NULL, NULL); 2006} 2007 2008EXPORT_SYMBOL(___pskb_trim); 2009EXPORT_SYMBOL(__kfree_skb); 2010EXPORT_SYMBOL(kfree_skb); 2011EXPORT_SYMBOL(__pskb_pull_tail); 2012EXPORT_SYMBOL(__alloc_skb); 2013EXPORT_SYMBOL(__netdev_alloc_skb); 2014EXPORT_SYMBOL(pskb_copy); 2015EXPORT_SYMBOL(pskb_expand_head); 2016EXPORT_SYMBOL(skb_checksum); 2017EXPORT_SYMBOL(skb_clone); 2018EXPORT_SYMBOL(skb_clone_fraglist); 2019EXPORT_SYMBOL(skb_copy); 2020EXPORT_SYMBOL(skb_copy_and_csum_bits); 2021EXPORT_SYMBOL(skb_copy_and_csum_dev); 2022EXPORT_SYMBOL(skb_copy_bits); 2023EXPORT_SYMBOL(skb_copy_expand); 2024EXPORT_SYMBOL(skb_over_panic); 2025EXPORT_SYMBOL(skb_pad); 2026EXPORT_SYMBOL(skb_realloc_headroom); 2027EXPORT_SYMBOL(skb_under_panic); 2028EXPORT_SYMBOL(skb_dequeue); 2029EXPORT_SYMBOL(skb_dequeue_tail); 2030EXPORT_SYMBOL(skb_insert); 2031EXPORT_SYMBOL(skb_queue_purge); 2032EXPORT_SYMBOL(skb_queue_head); 2033EXPORT_SYMBOL(skb_queue_tail); 2034EXPORT_SYMBOL(skb_unlink); 2035EXPORT_SYMBOL(skb_append); 2036EXPORT_SYMBOL(skb_split); 2037EXPORT_SYMBOL(skb_prepare_seq_read); 2038EXPORT_SYMBOL(skb_seq_read); 2039EXPORT_SYMBOL(skb_abort_seq_read); 2040EXPORT_SYMBOL(skb_find_text); 2041EXPORT_SYMBOL(skb_append_datato_frags);