at v2.6.19 42 kB view raw
1/* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <gw4pts@gw4pts.ampr.org> 6 * Florian La Roche, <rzsfl@rz.uni-sb.de> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14#ifndef _LINUX_SKBUFF_H 15#define _LINUX_SKBUFF_H 16 17#include <linux/kernel.h> 18#include <linux/compiler.h> 19#include <linux/time.h> 20#include <linux/cache.h> 21 22#include <asm/atomic.h> 23#include <asm/types.h> 24#include <linux/spinlock.h> 25#include <linux/mm.h> 26#include <linux/highmem.h> 27#include <linux/poll.h> 28#include <linux/net.h> 29#include <linux/textsearch.h> 30#include <net/checksum.h> 31#include <linux/dmaengine.h> 32 33#define HAVE_ALLOC_SKB /* For the drivers to know */ 34#define HAVE_ALIGNABLE_SKB /* Ditto 8) */ 35 36#define CHECKSUM_NONE 0 37#define CHECKSUM_PARTIAL 1 38#define CHECKSUM_UNNECESSARY 2 39#define CHECKSUM_COMPLETE 3 40 41#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 42 ~(SMP_CACHE_BYTES - 1)) 43#define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \ 44 sizeof(struct skb_shared_info)) & \ 45 ~(SMP_CACHE_BYTES - 1)) 46#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 47#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 48 49/* A. Checksumming of received packets by device. 50 * 51 * NONE: device failed to checksum this packet. 52 * skb->csum is undefined. 53 * 54 * UNNECESSARY: device parsed packet and wouldbe verified checksum. 55 * skb->csum is undefined. 56 * It is bad option, but, unfortunately, many of vendors do this. 57 * Apparently with secret goal to sell you new device, when you 58 * will add new protocol to your host. F.e. IPv6. 8) 59 * 60 * COMPLETE: the most generic way. Device supplied checksum of _all_ 61 * the packet as seen by netif_rx in skb->csum. 62 * NOTE: Even if device supports only some protocols, but 63 * is able to produce some skb->csum, it MUST use COMPLETE, 64 * not UNNECESSARY. 65 * 66 * B. Checksumming on output. 67 * 68 * NONE: skb is checksummed by protocol or csum is not required. 69 * 70 * PARTIAL: device is required to csum packet as seen by hard_start_xmit 71 * from skb->h.raw to the end and to record the checksum 72 * at skb->h.raw+skb->csum. 73 * 74 * Device must show its capabilities in dev->features, set 75 * at device setup time. 76 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 77 * everything. 78 * NETIF_F_NO_CSUM - loopback or reliable single hop media. 79 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 80 * TCP/UDP over IPv4. Sigh. Vendors like this 81 * way by an unknown reason. Though, see comment above 82 * about CHECKSUM_UNNECESSARY. 8) 83 * 84 * Any questions? No questions, good. --ANK 85 */ 86 87struct net_device; 88 89#ifdef CONFIG_NETFILTER 90struct nf_conntrack { 91 atomic_t use; 92 void (*destroy)(struct nf_conntrack *); 93}; 94 95#ifdef CONFIG_BRIDGE_NETFILTER 96struct nf_bridge_info { 97 atomic_t use; 98 struct net_device *physindev; 99 struct net_device *physoutdev; 100#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 101 struct net_device *netoutdev; 102#endif 103 unsigned int mask; 104 unsigned long data[32 / sizeof(unsigned long)]; 105}; 106#endif 107 108#endif 109 110struct sk_buff_head { 111 /* These two members must be first. */ 112 struct sk_buff *next; 113 struct sk_buff *prev; 114 115 __u32 qlen; 116 spinlock_t lock; 117}; 118 119struct sk_buff; 120 121/* To allow 64K frame to be packed as single skb without frag_list */ 122#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 123 124typedef struct skb_frag_struct skb_frag_t; 125 126struct skb_frag_struct { 127 struct page *page; 128 __u16 page_offset; 129 __u16 size; 130}; 131 132/* This data is invariant across clones and lives at 133 * the end of the header data, ie. at skb->end. 134 */ 135struct skb_shared_info { 136 atomic_t dataref; 137 unsigned short nr_frags; 138 unsigned short gso_size; 139 /* Warning: this field is not always filled in (UFO)! */ 140 unsigned short gso_segs; 141 unsigned short gso_type; 142 unsigned int ip6_frag_id; 143 struct sk_buff *frag_list; 144 skb_frag_t frags[MAX_SKB_FRAGS]; 145}; 146 147/* We divide dataref into two halves. The higher 16 bits hold references 148 * to the payload part of skb->data. The lower 16 bits hold references to 149 * the entire skb->data. It is up to the users of the skb to agree on 150 * where the payload starts. 151 * 152 * All users must obey the rule that the skb->data reference count must be 153 * greater than or equal to the payload reference count. 154 * 155 * Holding a reference to the payload part means that the user does not 156 * care about modifications to the header part of skb->data. 157 */ 158#define SKB_DATAREF_SHIFT 16 159#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 160 161struct skb_timeval { 162 u32 off_sec; 163 u32 off_usec; 164}; 165 166 167enum { 168 SKB_FCLONE_UNAVAILABLE, 169 SKB_FCLONE_ORIG, 170 SKB_FCLONE_CLONE, 171}; 172 173enum { 174 SKB_GSO_TCPV4 = 1 << 0, 175 SKB_GSO_UDP = 1 << 1, 176 177 /* This indicates the skb is from an untrusted source. */ 178 SKB_GSO_DODGY = 1 << 2, 179 180 /* This indicates the tcp segment has CWR set. */ 181 SKB_GSO_TCP_ECN = 1 << 3, 182 183 SKB_GSO_TCPV6 = 1 << 4, 184}; 185 186/** 187 * struct sk_buff - socket buffer 188 * @next: Next buffer in list 189 * @prev: Previous buffer in list 190 * @sk: Socket we are owned by 191 * @tstamp: Time we arrived 192 * @dev: Device we arrived on/are leaving by 193 * @input_dev: Device we arrived on 194 * @h: Transport layer header 195 * @nh: Network layer header 196 * @mac: Link layer header 197 * @dst: destination entry 198 * @sp: the security path, used for xfrm 199 * @cb: Control buffer. Free for use by every layer. Put private vars here 200 * @len: Length of actual data 201 * @data_len: Data length 202 * @mac_len: Length of link layer header 203 * @csum: Checksum 204 * @local_df: allow local fragmentation 205 * @cloned: Head may be cloned (check refcnt to be sure) 206 * @nohdr: Payload reference only, must not modify header 207 * @pkt_type: Packet class 208 * @fclone: skbuff clone status 209 * @ip_summed: Driver fed us an IP checksum 210 * @priority: Packet queueing priority 211 * @users: User count - see {datagram,tcp}.c 212 * @protocol: Packet protocol from driver 213 * @truesize: Buffer size 214 * @head: Head of buffer 215 * @data: Data head pointer 216 * @tail: Tail pointer 217 * @end: End pointer 218 * @destructor: Destruct function 219 * @nfmark: Can be used for communication between hooks 220 * @nfct: Associated connection, if any 221 * @ipvs_property: skbuff is owned by ipvs 222 * @nfctinfo: Relationship of this skb to the connection 223 * @nfct_reasm: netfilter conntrack re-assembly pointer 224 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 225 * @tc_index: Traffic control index 226 * @tc_verd: traffic control verdict 227 * @dma_cookie: a cookie to one of several possible DMA operations 228 * done by skb DMA functions 229 * @secmark: security marking 230 */ 231 232struct sk_buff { 233 /* These two members must be first. */ 234 struct sk_buff *next; 235 struct sk_buff *prev; 236 237 struct sock *sk; 238 struct skb_timeval tstamp; 239 struct net_device *dev; 240 struct net_device *input_dev; 241 242 union { 243 struct tcphdr *th; 244 struct udphdr *uh; 245 struct icmphdr *icmph; 246 struct igmphdr *igmph; 247 struct iphdr *ipiph; 248 struct ipv6hdr *ipv6h; 249 unsigned char *raw; 250 } h; 251 252 union { 253 struct iphdr *iph; 254 struct ipv6hdr *ipv6h; 255 struct arphdr *arph; 256 unsigned char *raw; 257 } nh; 258 259 union { 260 unsigned char *raw; 261 } mac; 262 263 struct dst_entry *dst; 264 struct sec_path *sp; 265 266 /* 267 * This is the control buffer. It is free to use for every 268 * layer. Please put your private variables there. If you 269 * want to keep them across layers you have to do a skb_clone() 270 * first. This is owned by whoever has the skb queued ATM. 271 */ 272 char cb[48]; 273 274 unsigned int len, 275 data_len, 276 mac_len, 277 csum; 278 __u32 priority; 279 __u8 local_df:1, 280 cloned:1, 281 ip_summed:2, 282 nohdr:1, 283 nfctinfo:3; 284 __u8 pkt_type:3, 285 fclone:2, 286 ipvs_property:1; 287 __be16 protocol; 288 289 void (*destructor)(struct sk_buff *skb); 290#ifdef CONFIG_NETFILTER 291 struct nf_conntrack *nfct; 292#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 293 struct sk_buff *nfct_reasm; 294#endif 295#ifdef CONFIG_BRIDGE_NETFILTER 296 struct nf_bridge_info *nf_bridge; 297#endif 298 __u32 nfmark; 299#endif /* CONFIG_NETFILTER */ 300#ifdef CONFIG_NET_SCHED 301 __u16 tc_index; /* traffic control index */ 302#ifdef CONFIG_NET_CLS_ACT 303 __u16 tc_verd; /* traffic control verdict */ 304#endif 305#endif 306#ifdef CONFIG_NET_DMA 307 dma_cookie_t dma_cookie; 308#endif 309#ifdef CONFIG_NETWORK_SECMARK 310 __u32 secmark; 311#endif 312 313 314 /* These elements must be at the end, see alloc_skb() for details. */ 315 unsigned int truesize; 316 atomic_t users; 317 unsigned char *head, 318 *data, 319 *tail, 320 *end; 321}; 322 323#ifdef __KERNEL__ 324/* 325 * Handling routines are only of interest to the kernel 326 */ 327#include <linux/slab.h> 328 329#include <asm/system.h> 330 331extern void kfree_skb(struct sk_buff *skb); 332extern void __kfree_skb(struct sk_buff *skb); 333extern struct sk_buff *__alloc_skb(unsigned int size, 334 gfp_t priority, int fclone); 335static inline struct sk_buff *alloc_skb(unsigned int size, 336 gfp_t priority) 337{ 338 return __alloc_skb(size, priority, 0); 339} 340 341static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 342 gfp_t priority) 343{ 344 return __alloc_skb(size, priority, 1); 345} 346 347extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, 348 unsigned int size, 349 gfp_t priority); 350extern void kfree_skbmem(struct sk_buff *skb); 351extern struct sk_buff *skb_clone(struct sk_buff *skb, 352 gfp_t priority); 353extern struct sk_buff *skb_copy(const struct sk_buff *skb, 354 gfp_t priority); 355extern struct sk_buff *pskb_copy(struct sk_buff *skb, 356 gfp_t gfp_mask); 357extern int pskb_expand_head(struct sk_buff *skb, 358 int nhead, int ntail, 359 gfp_t gfp_mask); 360extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 361 unsigned int headroom); 362extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 363 int newheadroom, int newtailroom, 364 gfp_t priority); 365extern int skb_pad(struct sk_buff *skb, int pad); 366#define dev_kfree_skb(a) kfree_skb(a) 367extern void skb_over_panic(struct sk_buff *skb, int len, 368 void *here); 369extern void skb_under_panic(struct sk_buff *skb, int len, 370 void *here); 371extern void skb_truesize_bug(struct sk_buff *skb); 372 373static inline void skb_truesize_check(struct sk_buff *skb) 374{ 375 if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len)) 376 skb_truesize_bug(skb); 377} 378 379extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 380 int getfrag(void *from, char *to, int offset, 381 int len,int odd, struct sk_buff *skb), 382 void *from, int length); 383 384struct skb_seq_state 385{ 386 __u32 lower_offset; 387 __u32 upper_offset; 388 __u32 frag_idx; 389 __u32 stepped_offset; 390 struct sk_buff *root_skb; 391 struct sk_buff *cur_skb; 392 __u8 *frag_data; 393}; 394 395extern void skb_prepare_seq_read(struct sk_buff *skb, 396 unsigned int from, unsigned int to, 397 struct skb_seq_state *st); 398extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 399 struct skb_seq_state *st); 400extern void skb_abort_seq_read(struct skb_seq_state *st); 401 402extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 403 unsigned int to, struct ts_config *config, 404 struct ts_state *state); 405 406/* Internal */ 407#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) 408 409/** 410 * skb_queue_empty - check if a queue is empty 411 * @list: queue head 412 * 413 * Returns true if the queue is empty, false otherwise. 414 */ 415static inline int skb_queue_empty(const struct sk_buff_head *list) 416{ 417 return list->next == (struct sk_buff *)list; 418} 419 420/** 421 * skb_get - reference buffer 422 * @skb: buffer to reference 423 * 424 * Makes another reference to a socket buffer and returns a pointer 425 * to the buffer. 426 */ 427static inline struct sk_buff *skb_get(struct sk_buff *skb) 428{ 429 atomic_inc(&skb->users); 430 return skb; 431} 432 433/* 434 * If users == 1, we are the only owner and are can avoid redundant 435 * atomic change. 436 */ 437 438/** 439 * skb_cloned - is the buffer a clone 440 * @skb: buffer to check 441 * 442 * Returns true if the buffer was generated with skb_clone() and is 443 * one of multiple shared copies of the buffer. Cloned buffers are 444 * shared data so must not be written to under normal circumstances. 445 */ 446static inline int skb_cloned(const struct sk_buff *skb) 447{ 448 return skb->cloned && 449 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 450} 451 452/** 453 * skb_header_cloned - is the header a clone 454 * @skb: buffer to check 455 * 456 * Returns true if modifying the header part of the buffer requires 457 * the data to be copied. 458 */ 459static inline int skb_header_cloned(const struct sk_buff *skb) 460{ 461 int dataref; 462 463 if (!skb->cloned) 464 return 0; 465 466 dataref = atomic_read(&skb_shinfo(skb)->dataref); 467 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 468 return dataref != 1; 469} 470 471/** 472 * skb_header_release - release reference to header 473 * @skb: buffer to operate on 474 * 475 * Drop a reference to the header part of the buffer. This is done 476 * by acquiring a payload reference. You must not read from the header 477 * part of skb->data after this. 478 */ 479static inline void skb_header_release(struct sk_buff *skb) 480{ 481 BUG_ON(skb->nohdr); 482 skb->nohdr = 1; 483 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 484} 485 486/** 487 * skb_shared - is the buffer shared 488 * @skb: buffer to check 489 * 490 * Returns true if more than one person has a reference to this 491 * buffer. 492 */ 493static inline int skb_shared(const struct sk_buff *skb) 494{ 495 return atomic_read(&skb->users) != 1; 496} 497 498/** 499 * skb_share_check - check if buffer is shared and if so clone it 500 * @skb: buffer to check 501 * @pri: priority for memory allocation 502 * 503 * If the buffer is shared the buffer is cloned and the old copy 504 * drops a reference. A new clone with a single reference is returned. 505 * If the buffer is not shared the original buffer is returned. When 506 * being called from interrupt status or with spinlocks held pri must 507 * be GFP_ATOMIC. 508 * 509 * NULL is returned on a memory allocation failure. 510 */ 511static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 512 gfp_t pri) 513{ 514 might_sleep_if(pri & __GFP_WAIT); 515 if (skb_shared(skb)) { 516 struct sk_buff *nskb = skb_clone(skb, pri); 517 kfree_skb(skb); 518 skb = nskb; 519 } 520 return skb; 521} 522 523/* 524 * Copy shared buffers into a new sk_buff. We effectively do COW on 525 * packets to handle cases where we have a local reader and forward 526 * and a couple of other messy ones. The normal one is tcpdumping 527 * a packet thats being forwarded. 528 */ 529 530/** 531 * skb_unshare - make a copy of a shared buffer 532 * @skb: buffer to check 533 * @pri: priority for memory allocation 534 * 535 * If the socket buffer is a clone then this function creates a new 536 * copy of the data, drops a reference count on the old copy and returns 537 * the new copy with the reference count at 1. If the buffer is not a clone 538 * the original buffer is returned. When called with a spinlock held or 539 * from interrupt state @pri must be %GFP_ATOMIC 540 * 541 * %NULL is returned on a memory allocation failure. 542 */ 543static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 544 gfp_t pri) 545{ 546 might_sleep_if(pri & __GFP_WAIT); 547 if (skb_cloned(skb)) { 548 struct sk_buff *nskb = skb_copy(skb, pri); 549 kfree_skb(skb); /* Free our shared copy */ 550 skb = nskb; 551 } 552 return skb; 553} 554 555/** 556 * skb_peek 557 * @list_: list to peek at 558 * 559 * Peek an &sk_buff. Unlike most other operations you _MUST_ 560 * be careful with this one. A peek leaves the buffer on the 561 * list and someone else may run off with it. You must hold 562 * the appropriate locks or have a private queue to do this. 563 * 564 * Returns %NULL for an empty list or a pointer to the head element. 565 * The reference count is not incremented and the reference is therefore 566 * volatile. Use with caution. 567 */ 568static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) 569{ 570 struct sk_buff *list = ((struct sk_buff *)list_)->next; 571 if (list == (struct sk_buff *)list_) 572 list = NULL; 573 return list; 574} 575 576/** 577 * skb_peek_tail 578 * @list_: list to peek at 579 * 580 * Peek an &sk_buff. Unlike most other operations you _MUST_ 581 * be careful with this one. A peek leaves the buffer on the 582 * list and someone else may run off with it. You must hold 583 * the appropriate locks or have a private queue to do this. 584 * 585 * Returns %NULL for an empty list or a pointer to the tail element. 586 * The reference count is not incremented and the reference is therefore 587 * volatile. Use with caution. 588 */ 589static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) 590{ 591 struct sk_buff *list = ((struct sk_buff *)list_)->prev; 592 if (list == (struct sk_buff *)list_) 593 list = NULL; 594 return list; 595} 596 597/** 598 * skb_queue_len - get queue length 599 * @list_: list to measure 600 * 601 * Return the length of an &sk_buff queue. 602 */ 603static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 604{ 605 return list_->qlen; 606} 607 608/* 609 * This function creates a split out lock class for each invocation; 610 * this is needed for now since a whole lot of users of the skb-queue 611 * infrastructure in drivers have different locking usage (in hardirq) 612 * than the networking core (in softirq only). In the long run either the 613 * network layer or drivers should need annotation to consolidate the 614 * main types of usage into 3 classes. 615 */ 616static inline void skb_queue_head_init(struct sk_buff_head *list) 617{ 618 spin_lock_init(&list->lock); 619 list->prev = list->next = (struct sk_buff *)list; 620 list->qlen = 0; 621} 622 623/* 624 * Insert an sk_buff at the start of a list. 625 * 626 * The "__skb_xxxx()" functions are the non-atomic ones that 627 * can only be called with interrupts disabled. 628 */ 629 630/** 631 * __skb_queue_after - queue a buffer at the list head 632 * @list: list to use 633 * @prev: place after this buffer 634 * @newsk: buffer to queue 635 * 636 * Queue a buffer int the middle of a list. This function takes no locks 637 * and you must therefore hold required locks before calling it. 638 * 639 * A buffer cannot be placed on two lists at the same time. 640 */ 641static inline void __skb_queue_after(struct sk_buff_head *list, 642 struct sk_buff *prev, 643 struct sk_buff *newsk) 644{ 645 struct sk_buff *next; 646 list->qlen++; 647 648 next = prev->next; 649 newsk->next = next; 650 newsk->prev = prev; 651 next->prev = prev->next = newsk; 652} 653 654/** 655 * __skb_queue_head - queue a buffer at the list head 656 * @list: list to use 657 * @newsk: buffer to queue 658 * 659 * Queue a buffer at the start of a list. This function takes no locks 660 * and you must therefore hold required locks before calling it. 661 * 662 * A buffer cannot be placed on two lists at the same time. 663 */ 664extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 665static inline void __skb_queue_head(struct sk_buff_head *list, 666 struct sk_buff *newsk) 667{ 668 __skb_queue_after(list, (struct sk_buff *)list, newsk); 669} 670 671/** 672 * __skb_queue_tail - queue a buffer at the list tail 673 * @list: list to use 674 * @newsk: buffer to queue 675 * 676 * Queue a buffer at the end of a list. This function takes no locks 677 * and you must therefore hold required locks before calling it. 678 * 679 * A buffer cannot be placed on two lists at the same time. 680 */ 681extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 682static inline void __skb_queue_tail(struct sk_buff_head *list, 683 struct sk_buff *newsk) 684{ 685 struct sk_buff *prev, *next; 686 687 list->qlen++; 688 next = (struct sk_buff *)list; 689 prev = next->prev; 690 newsk->next = next; 691 newsk->prev = prev; 692 next->prev = prev->next = newsk; 693} 694 695 696/** 697 * __skb_dequeue - remove from the head of the queue 698 * @list: list to dequeue from 699 * 700 * Remove the head of the list. This function does not take any locks 701 * so must be used with appropriate locks held only. The head item is 702 * returned or %NULL if the list is empty. 703 */ 704extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 705static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 706{ 707 struct sk_buff *next, *prev, *result; 708 709 prev = (struct sk_buff *) list; 710 next = prev->next; 711 result = NULL; 712 if (next != prev) { 713 result = next; 714 next = next->next; 715 list->qlen--; 716 next->prev = prev; 717 prev->next = next; 718 result->next = result->prev = NULL; 719 } 720 return result; 721} 722 723 724/* 725 * Insert a packet on a list. 726 */ 727extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 728static inline void __skb_insert(struct sk_buff *newsk, 729 struct sk_buff *prev, struct sk_buff *next, 730 struct sk_buff_head *list) 731{ 732 newsk->next = next; 733 newsk->prev = prev; 734 next->prev = prev->next = newsk; 735 list->qlen++; 736} 737 738/* 739 * Place a packet after a given packet in a list. 740 */ 741extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 742static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 743{ 744 __skb_insert(newsk, old, old->next, list); 745} 746 747/* 748 * remove sk_buff from list. _Must_ be called atomically, and with 749 * the list known.. 750 */ 751extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 752static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 753{ 754 struct sk_buff *next, *prev; 755 756 list->qlen--; 757 next = skb->next; 758 prev = skb->prev; 759 skb->next = skb->prev = NULL; 760 next->prev = prev; 761 prev->next = next; 762} 763 764 765/* XXX: more streamlined implementation */ 766 767/** 768 * __skb_dequeue_tail - remove from the tail of the queue 769 * @list: list to dequeue from 770 * 771 * Remove the tail of the list. This function does not take any locks 772 * so must be used with appropriate locks held only. The tail item is 773 * returned or %NULL if the list is empty. 774 */ 775extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 776static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 777{ 778 struct sk_buff *skb = skb_peek_tail(list); 779 if (skb) 780 __skb_unlink(skb, list); 781 return skb; 782} 783 784 785static inline int skb_is_nonlinear(const struct sk_buff *skb) 786{ 787 return skb->data_len; 788} 789 790static inline unsigned int skb_headlen(const struct sk_buff *skb) 791{ 792 return skb->len - skb->data_len; 793} 794 795static inline int skb_pagelen(const struct sk_buff *skb) 796{ 797 int i, len = 0; 798 799 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 800 len += skb_shinfo(skb)->frags[i].size; 801 return len + skb_headlen(skb); 802} 803 804static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 805 struct page *page, int off, int size) 806{ 807 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 808 809 frag->page = page; 810 frag->page_offset = off; 811 frag->size = size; 812 skb_shinfo(skb)->nr_frags = i + 1; 813} 814 815#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 816#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) 817#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 818 819/* 820 * Add data to an sk_buff 821 */ 822static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 823{ 824 unsigned char *tmp = skb->tail; 825 SKB_LINEAR_ASSERT(skb); 826 skb->tail += len; 827 skb->len += len; 828 return tmp; 829} 830 831/** 832 * skb_put - add data to a buffer 833 * @skb: buffer to use 834 * @len: amount of data to add 835 * 836 * This function extends the used data area of the buffer. If this would 837 * exceed the total buffer size the kernel will panic. A pointer to the 838 * first byte of the extra data is returned. 839 */ 840static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 841{ 842 unsigned char *tmp = skb->tail; 843 SKB_LINEAR_ASSERT(skb); 844 skb->tail += len; 845 skb->len += len; 846 if (unlikely(skb->tail>skb->end)) 847 skb_over_panic(skb, len, current_text_addr()); 848 return tmp; 849} 850 851static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 852{ 853 skb->data -= len; 854 skb->len += len; 855 return skb->data; 856} 857 858/** 859 * skb_push - add data to the start of a buffer 860 * @skb: buffer to use 861 * @len: amount of data to add 862 * 863 * This function extends the used data area of the buffer at the buffer 864 * start. If this would exceed the total buffer headroom the kernel will 865 * panic. A pointer to the first byte of the extra data is returned. 866 */ 867static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 868{ 869 skb->data -= len; 870 skb->len += len; 871 if (unlikely(skb->data<skb->head)) 872 skb_under_panic(skb, len, current_text_addr()); 873 return skb->data; 874} 875 876static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 877{ 878 skb->len -= len; 879 BUG_ON(skb->len < skb->data_len); 880 return skb->data += len; 881} 882 883/** 884 * skb_pull - remove data from the start of a buffer 885 * @skb: buffer to use 886 * @len: amount of data to remove 887 * 888 * This function removes data from the start of a buffer, returning 889 * the memory to the headroom. A pointer to the next data in the buffer 890 * is returned. Once the data has been pulled future pushes will overwrite 891 * the old data. 892 */ 893static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 894{ 895 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 896} 897 898extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 899 900static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 901{ 902 if (len > skb_headlen(skb) && 903 !__pskb_pull_tail(skb, len-skb_headlen(skb))) 904 return NULL; 905 skb->len -= len; 906 return skb->data += len; 907} 908 909static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 910{ 911 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 912} 913 914static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 915{ 916 if (likely(len <= skb_headlen(skb))) 917 return 1; 918 if (unlikely(len > skb->len)) 919 return 0; 920 return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; 921} 922 923/** 924 * skb_headroom - bytes at buffer head 925 * @skb: buffer to check 926 * 927 * Return the number of bytes of free space at the head of an &sk_buff. 928 */ 929static inline int skb_headroom(const struct sk_buff *skb) 930{ 931 return skb->data - skb->head; 932} 933 934/** 935 * skb_tailroom - bytes at buffer end 936 * @skb: buffer to check 937 * 938 * Return the number of bytes of free space at the tail of an sk_buff 939 */ 940static inline int skb_tailroom(const struct sk_buff *skb) 941{ 942 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 943} 944 945/** 946 * skb_reserve - adjust headroom 947 * @skb: buffer to alter 948 * @len: bytes to move 949 * 950 * Increase the headroom of an empty &sk_buff by reducing the tail 951 * room. This is only allowed for an empty buffer. 952 */ 953static inline void skb_reserve(struct sk_buff *skb, int len) 954{ 955 skb->data += len; 956 skb->tail += len; 957} 958 959/* 960 * CPUs often take a performance hit when accessing unaligned memory 961 * locations. The actual performance hit varies, it can be small if the 962 * hardware handles it or large if we have to take an exception and fix it 963 * in software. 964 * 965 * Since an ethernet header is 14 bytes network drivers often end up with 966 * the IP header at an unaligned offset. The IP header can be aligned by 967 * shifting the start of the packet by 2 bytes. Drivers should do this 968 * with: 969 * 970 * skb_reserve(NET_IP_ALIGN); 971 * 972 * The downside to this alignment of the IP header is that the DMA is now 973 * unaligned. On some architectures the cost of an unaligned DMA is high 974 * and this cost outweighs the gains made by aligning the IP header. 975 * 976 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 977 * to be overridden. 978 */ 979#ifndef NET_IP_ALIGN 980#define NET_IP_ALIGN 2 981#endif 982 983/* 984 * The networking layer reserves some headroom in skb data (via 985 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 986 * the header has to grow. In the default case, if the header has to grow 987 * 16 bytes or less we avoid the reallocation. 988 * 989 * Unfortunately this headroom changes the DMA alignment of the resulting 990 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 991 * on some architectures. An architecture can override this value, 992 * perhaps setting it to a cacheline in size (since that will maintain 993 * cacheline alignment of the DMA). It must be a power of 2. 994 * 995 * Various parts of the networking layer expect at least 16 bytes of 996 * headroom, you should not reduce this. 997 */ 998#ifndef NET_SKB_PAD 999#define NET_SKB_PAD 16 1000#endif 1001 1002extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1003 1004static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1005{ 1006 if (unlikely(skb->data_len)) { 1007 WARN_ON(1); 1008 return; 1009 } 1010 skb->len = len; 1011 skb->tail = skb->data + len; 1012} 1013 1014/** 1015 * skb_trim - remove end from a buffer 1016 * @skb: buffer to alter 1017 * @len: new length 1018 * 1019 * Cut the length of a buffer down by removing data from the tail. If 1020 * the buffer is already under the length specified it is not modified. 1021 * The skb must be linear. 1022 */ 1023static inline void skb_trim(struct sk_buff *skb, unsigned int len) 1024{ 1025 if (skb->len > len) 1026 __skb_trim(skb, len); 1027} 1028 1029 1030static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1031{ 1032 if (skb->data_len) 1033 return ___pskb_trim(skb, len); 1034 __skb_trim(skb, len); 1035 return 0; 1036} 1037 1038static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 1039{ 1040 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 1041} 1042 1043/** 1044 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 1045 * @skb: buffer to alter 1046 * @len: new length 1047 * 1048 * This is identical to pskb_trim except that the caller knows that 1049 * the skb is not cloned so we should never get an error due to out- 1050 * of-memory. 1051 */ 1052static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 1053{ 1054 int err = pskb_trim(skb, len); 1055 BUG_ON(err); 1056} 1057 1058/** 1059 * skb_orphan - orphan a buffer 1060 * @skb: buffer to orphan 1061 * 1062 * If a buffer currently has an owner then we call the owner's 1063 * destructor function and make the @skb unowned. The buffer continues 1064 * to exist but is no longer charged to its former owner. 1065 */ 1066static inline void skb_orphan(struct sk_buff *skb) 1067{ 1068 if (skb->destructor) 1069 skb->destructor(skb); 1070 skb->destructor = NULL; 1071 skb->sk = NULL; 1072} 1073 1074/** 1075 * __skb_queue_purge - empty a list 1076 * @list: list to empty 1077 * 1078 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1079 * the list and one reference dropped. This function does not take the 1080 * list lock and the caller must hold the relevant locks to use it. 1081 */ 1082extern void skb_queue_purge(struct sk_buff_head *list); 1083static inline void __skb_queue_purge(struct sk_buff_head *list) 1084{ 1085 struct sk_buff *skb; 1086 while ((skb = __skb_dequeue(list)) != NULL) 1087 kfree_skb(skb); 1088} 1089 1090/** 1091 * __dev_alloc_skb - allocate an skbuff for receiving 1092 * @length: length to allocate 1093 * @gfp_mask: get_free_pages mask, passed to alloc_skb 1094 * 1095 * Allocate a new &sk_buff and assign it a usage count of one. The 1096 * buffer has unspecified headroom built in. Users should allocate 1097 * the headroom they think they need without accounting for the 1098 * built in space. The built in space is used for optimisations. 1099 * 1100 * %NULL is returned if there is no free memory. 1101 */ 1102static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1103 gfp_t gfp_mask) 1104{ 1105 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 1106 if (likely(skb)) 1107 skb_reserve(skb, NET_SKB_PAD); 1108 return skb; 1109} 1110 1111/** 1112 * dev_alloc_skb - allocate an skbuff for receiving 1113 * @length: length to allocate 1114 * 1115 * Allocate a new &sk_buff and assign it a usage count of one. The 1116 * buffer has unspecified headroom built in. Users should allocate 1117 * the headroom they think they need without accounting for the 1118 * built in space. The built in space is used for optimisations. 1119 * 1120 * %NULL is returned if there is no free memory. Although this function 1121 * allocates memory it can be called from an interrupt. 1122 */ 1123static inline struct sk_buff *dev_alloc_skb(unsigned int length) 1124{ 1125 return __dev_alloc_skb(length, GFP_ATOMIC); 1126} 1127 1128extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1129 unsigned int length, gfp_t gfp_mask); 1130 1131/** 1132 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1133 * @dev: network device to receive on 1134 * @length: length to allocate 1135 * 1136 * Allocate a new &sk_buff and assign it a usage count of one. The 1137 * buffer has unspecified headroom built in. Users should allocate 1138 * the headroom they think they need without accounting for the 1139 * built in space. The built in space is used for optimisations. 1140 * 1141 * %NULL is returned if there is no free memory. Although this function 1142 * allocates memory it can be called from an interrupt. 1143 */ 1144static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1145 unsigned int length) 1146{ 1147 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1148} 1149 1150/** 1151 * skb_cow - copy header of skb when it is required 1152 * @skb: buffer to cow 1153 * @headroom: needed headroom 1154 * 1155 * If the skb passed lacks sufficient headroom or its data part 1156 * is shared, data is reallocated. If reallocation fails, an error 1157 * is returned and original skb is not changed. 1158 * 1159 * The result is skb with writable area skb->head...skb->tail 1160 * and at least @headroom of space at head. 1161 */ 1162static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1163{ 1164 int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) - 1165 skb_headroom(skb); 1166 1167 if (delta < 0) 1168 delta = 0; 1169 1170 if (delta || skb_cloned(skb)) 1171 return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) & 1172 ~(NET_SKB_PAD-1), 0, GFP_ATOMIC); 1173 return 0; 1174} 1175 1176/** 1177 * skb_padto - pad an skbuff up to a minimal size 1178 * @skb: buffer to pad 1179 * @len: minimal length 1180 * 1181 * Pads up a buffer to ensure the trailing bytes exist and are 1182 * blanked. If the buffer already contains sufficient data it 1183 * is untouched. Otherwise it is extended. Returns zero on 1184 * success. The skb is freed on error. 1185 */ 1186 1187static inline int skb_padto(struct sk_buff *skb, unsigned int len) 1188{ 1189 unsigned int size = skb->len; 1190 if (likely(size >= len)) 1191 return 0; 1192 return skb_pad(skb, len-size); 1193} 1194 1195static inline int skb_add_data(struct sk_buff *skb, 1196 char __user *from, int copy) 1197{ 1198 const int off = skb->len; 1199 1200 if (skb->ip_summed == CHECKSUM_NONE) { 1201 int err = 0; 1202 unsigned int csum = csum_and_copy_from_user(from, 1203 skb_put(skb, copy), 1204 copy, 0, &err); 1205 if (!err) { 1206 skb->csum = csum_block_add(skb->csum, csum, off); 1207 return 0; 1208 } 1209 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 1210 return 0; 1211 1212 __skb_trim(skb, off); 1213 return -EFAULT; 1214} 1215 1216static inline int skb_can_coalesce(struct sk_buff *skb, int i, 1217 struct page *page, int off) 1218{ 1219 if (i) { 1220 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1221 1222 return page == frag->page && 1223 off == frag->page_offset + frag->size; 1224 } 1225 return 0; 1226} 1227 1228static inline int __skb_linearize(struct sk_buff *skb) 1229{ 1230 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 1231} 1232 1233/** 1234 * skb_linearize - convert paged skb to linear one 1235 * @skb: buffer to linarize 1236 * 1237 * If there is no free memory -ENOMEM is returned, otherwise zero 1238 * is returned and the old skb data released. 1239 */ 1240static inline int skb_linearize(struct sk_buff *skb) 1241{ 1242 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 1243} 1244 1245/** 1246 * skb_linearize_cow - make sure skb is linear and writable 1247 * @skb: buffer to process 1248 * 1249 * If there is no free memory -ENOMEM is returned, otherwise zero 1250 * is returned and the old skb data released. 1251 */ 1252static inline int skb_linearize_cow(struct sk_buff *skb) 1253{ 1254 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 1255 __skb_linearize(skb) : 0; 1256} 1257 1258/** 1259 * skb_postpull_rcsum - update checksum for received skb after pull 1260 * @skb: buffer to update 1261 * @start: start of data before pull 1262 * @len: length of data pulled 1263 * 1264 * After doing a pull on a received packet, you need to call this to 1265 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 1266 * CHECKSUM_NONE so that it can be recomputed from scratch. 1267 */ 1268 1269static inline void skb_postpull_rcsum(struct sk_buff *skb, 1270 const void *start, unsigned int len) 1271{ 1272 if (skb->ip_summed == CHECKSUM_COMPLETE) 1273 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 1274} 1275 1276unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 1277 1278/** 1279 * pskb_trim_rcsum - trim received skb and update checksum 1280 * @skb: buffer to trim 1281 * @len: new length 1282 * 1283 * This is exactly the same as pskb_trim except that it ensures the 1284 * checksum of received packets are still valid after the operation. 1285 */ 1286 1287static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 1288{ 1289 if (likely(len >= skb->len)) 1290 return 0; 1291 if (skb->ip_summed == CHECKSUM_COMPLETE) 1292 skb->ip_summed = CHECKSUM_NONE; 1293 return __pskb_trim(skb, len); 1294} 1295 1296static inline void *kmap_skb_frag(const skb_frag_t *frag) 1297{ 1298#ifdef CONFIG_HIGHMEM 1299 BUG_ON(in_irq()); 1300 1301 local_bh_disable(); 1302#endif 1303 return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ); 1304} 1305 1306static inline void kunmap_skb_frag(void *vaddr) 1307{ 1308 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 1309#ifdef CONFIG_HIGHMEM 1310 local_bh_enable(); 1311#endif 1312} 1313 1314#define skb_queue_walk(queue, skb) \ 1315 for (skb = (queue)->next; \ 1316 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ 1317 skb = skb->next) 1318 1319#define skb_queue_reverse_walk(queue, skb) \ 1320 for (skb = (queue)->prev; \ 1321 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ 1322 skb = skb->prev) 1323 1324 1325extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 1326 int noblock, int *err); 1327extern unsigned int datagram_poll(struct file *file, struct socket *sock, 1328 struct poll_table_struct *wait); 1329extern int skb_copy_datagram_iovec(const struct sk_buff *from, 1330 int offset, struct iovec *to, 1331 int size); 1332extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 1333 int hlen, 1334 struct iovec *iov); 1335extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 1336extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 1337 unsigned int flags); 1338extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, 1339 int len, unsigned int csum); 1340extern int skb_copy_bits(const struct sk_buff *skb, int offset, 1341 void *to, int len); 1342extern int skb_store_bits(const struct sk_buff *skb, int offset, 1343 void *from, int len); 1344extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, 1345 int offset, u8 *to, int len, 1346 unsigned int csum); 1347extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 1348extern void skb_split(struct sk_buff *skb, 1349 struct sk_buff *skb1, const u32 len); 1350 1351extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); 1352 1353static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1354 int len, void *buffer) 1355{ 1356 int hlen = skb_headlen(skb); 1357 1358 if (hlen - offset >= len) 1359 return skb->data + offset; 1360 1361 if (skb_copy_bits(skb, offset, buffer, len) < 0) 1362 return NULL; 1363 1364 return buffer; 1365} 1366 1367extern void skb_init(void); 1368extern void skb_add_mtu(int mtu); 1369 1370/** 1371 * skb_get_timestamp - get timestamp from a skb 1372 * @skb: skb to get stamp from 1373 * @stamp: pointer to struct timeval to store stamp in 1374 * 1375 * Timestamps are stored in the skb as offsets to a base timestamp. 1376 * This function converts the offset back to a struct timeval and stores 1377 * it in stamp. 1378 */ 1379static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp) 1380{ 1381 stamp->tv_sec = skb->tstamp.off_sec; 1382 stamp->tv_usec = skb->tstamp.off_usec; 1383} 1384 1385/** 1386 * skb_set_timestamp - set timestamp of a skb 1387 * @skb: skb to set stamp of 1388 * @stamp: pointer to struct timeval to get stamp from 1389 * 1390 * Timestamps are stored in the skb as offsets to a base timestamp. 1391 * This function converts a struct timeval to an offset and stores 1392 * it in the skb. 1393 */ 1394static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp) 1395{ 1396 skb->tstamp.off_sec = stamp->tv_sec; 1397 skb->tstamp.off_usec = stamp->tv_usec; 1398} 1399 1400extern void __net_timestamp(struct sk_buff *skb); 1401 1402extern unsigned int __skb_checksum_complete(struct sk_buff *skb); 1403 1404/** 1405 * skb_checksum_complete - Calculate checksum of an entire packet 1406 * @skb: packet to process 1407 * 1408 * This function calculates the checksum over the entire packet plus 1409 * the value of skb->csum. The latter can be used to supply the 1410 * checksum of a pseudo header as used by TCP/UDP. It returns the 1411 * checksum. 1412 * 1413 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 1414 * this function can be used to verify that checksum on received 1415 * packets. In that case the function should return zero if the 1416 * checksum is correct. In particular, this function will return zero 1417 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 1418 * hardware has already verified the correctness of the checksum. 1419 */ 1420static inline unsigned int skb_checksum_complete(struct sk_buff *skb) 1421{ 1422 return skb->ip_summed != CHECKSUM_UNNECESSARY && 1423 __skb_checksum_complete(skb); 1424} 1425 1426#ifdef CONFIG_NETFILTER 1427static inline void nf_conntrack_put(struct nf_conntrack *nfct) 1428{ 1429 if (nfct && atomic_dec_and_test(&nfct->use)) 1430 nfct->destroy(nfct); 1431} 1432static inline void nf_conntrack_get(struct nf_conntrack *nfct) 1433{ 1434 if (nfct) 1435 atomic_inc(&nfct->use); 1436} 1437#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1438static inline void nf_conntrack_get_reasm(struct sk_buff *skb) 1439{ 1440 if (skb) 1441 atomic_inc(&skb->users); 1442} 1443static inline void nf_conntrack_put_reasm(struct sk_buff *skb) 1444{ 1445 if (skb) 1446 kfree_skb(skb); 1447} 1448#endif 1449#ifdef CONFIG_BRIDGE_NETFILTER 1450static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 1451{ 1452 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 1453 kfree(nf_bridge); 1454} 1455static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 1456{ 1457 if (nf_bridge) 1458 atomic_inc(&nf_bridge->use); 1459} 1460#endif /* CONFIG_BRIDGE_NETFILTER */ 1461static inline void nf_reset(struct sk_buff *skb) 1462{ 1463 nf_conntrack_put(skb->nfct); 1464 skb->nfct = NULL; 1465#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1466 nf_conntrack_put_reasm(skb->nfct_reasm); 1467 skb->nfct_reasm = NULL; 1468#endif 1469#ifdef CONFIG_BRIDGE_NETFILTER 1470 nf_bridge_put(skb->nf_bridge); 1471 skb->nf_bridge = NULL; 1472#endif 1473} 1474 1475#else /* CONFIG_NETFILTER */ 1476static inline void nf_reset(struct sk_buff *skb) {} 1477#endif /* CONFIG_NETFILTER */ 1478 1479#ifdef CONFIG_NETWORK_SECMARK 1480static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 1481{ 1482 to->secmark = from->secmark; 1483} 1484 1485static inline void skb_init_secmark(struct sk_buff *skb) 1486{ 1487 skb->secmark = 0; 1488} 1489#else 1490static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 1491{ } 1492 1493static inline void skb_init_secmark(struct sk_buff *skb) 1494{ } 1495#endif 1496 1497static inline int skb_is_gso(const struct sk_buff *skb) 1498{ 1499 return skb_shinfo(skb)->gso_size; 1500} 1501 1502#endif /* __KERNEL__ */ 1503#endif /* _LINUX_SKBUFF_H */