at v2.6.21 42 kB view raw
1/* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <gw4pts@gw4pts.ampr.org> 6 * Florian La Roche, <rzsfl@rz.uni-sb.de> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14#ifndef _LINUX_SKBUFF_H 15#define _LINUX_SKBUFF_H 16 17#include <linux/kernel.h> 18#include <linux/compiler.h> 19#include <linux/time.h> 20#include <linux/cache.h> 21 22#include <asm/atomic.h> 23#include <asm/types.h> 24#include <linux/spinlock.h> 25#include <linux/net.h> 26#include <linux/textsearch.h> 27#include <net/checksum.h> 28#include <linux/rcupdate.h> 29#include <linux/dmaengine.h> 30 31#define HAVE_ALLOC_SKB /* For the drivers to know */ 32#define HAVE_ALIGNABLE_SKB /* Ditto 8) */ 33 34#define CHECKSUM_NONE 0 35#define CHECKSUM_PARTIAL 1 36#define CHECKSUM_UNNECESSARY 2 37#define CHECKSUM_COMPLETE 3 38 39#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 40 ~(SMP_CACHE_BYTES - 1)) 41#define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \ 42 sizeof(struct skb_shared_info)) & \ 43 ~(SMP_CACHE_BYTES - 1)) 44#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 45#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 46 47/* A. Checksumming of received packets by device. 48 * 49 * NONE: device failed to checksum this packet. 50 * skb->csum is undefined. 51 * 52 * UNNECESSARY: device parsed packet and wouldbe verified checksum. 53 * skb->csum is undefined. 54 * It is bad option, but, unfortunately, many of vendors do this. 55 * Apparently with secret goal to sell you new device, when you 56 * will add new protocol to your host. F.e. IPv6. 8) 57 * 58 * COMPLETE: the most generic way. Device supplied checksum of _all_ 59 * the packet as seen by netif_rx in skb->csum. 60 * NOTE: Even if device supports only some protocols, but 61 * is able to produce some skb->csum, it MUST use COMPLETE, 62 * not UNNECESSARY. 63 * 64 * B. Checksumming on output. 65 * 66 * NONE: skb is checksummed by protocol or csum is not required. 67 * 68 * PARTIAL: device is required to csum packet as seen by hard_start_xmit 69 * from skb->h.raw to the end and to record the checksum 70 * at skb->h.raw+skb->csum. 71 * 72 * Device must show its capabilities in dev->features, set 73 * at device setup time. 74 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 75 * everything. 76 * NETIF_F_NO_CSUM - loopback or reliable single hop media. 77 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 78 * TCP/UDP over IPv4. Sigh. Vendors like this 79 * way by an unknown reason. Though, see comment above 80 * about CHECKSUM_UNNECESSARY. 8) 81 * 82 * Any questions? No questions, good. --ANK 83 */ 84 85struct net_device; 86 87#ifdef CONFIG_NETFILTER 88struct nf_conntrack { 89 atomic_t use; 90 void (*destroy)(struct nf_conntrack *); 91}; 92 93#ifdef CONFIG_BRIDGE_NETFILTER 94struct nf_bridge_info { 95 atomic_t use; 96 struct net_device *physindev; 97 struct net_device *physoutdev; 98#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 99 struct net_device *netoutdev; 100#endif 101 unsigned int mask; 102 unsigned long data[32 / sizeof(unsigned long)]; 103}; 104#endif 105 106#endif 107 108struct sk_buff_head { 109 /* These two members must be first. */ 110 struct sk_buff *next; 111 struct sk_buff *prev; 112 113 __u32 qlen; 114 spinlock_t lock; 115}; 116 117struct sk_buff; 118 119/* To allow 64K frame to be packed as single skb without frag_list */ 120#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2) 121 122typedef struct skb_frag_struct skb_frag_t; 123 124struct skb_frag_struct { 125 struct page *page; 126 __u16 page_offset; 127 __u16 size; 128}; 129 130/* This data is invariant across clones and lives at 131 * the end of the header data, ie. at skb->end. 132 */ 133struct skb_shared_info { 134 atomic_t dataref; 135 unsigned short nr_frags; 136 unsigned short gso_size; 137 /* Warning: this field is not always filled in (UFO)! */ 138 unsigned short gso_segs; 139 unsigned short gso_type; 140 __be32 ip6_frag_id; 141 struct sk_buff *frag_list; 142 skb_frag_t frags[MAX_SKB_FRAGS]; 143}; 144 145/* We divide dataref into two halves. The higher 16 bits hold references 146 * to the payload part of skb->data. The lower 16 bits hold references to 147 * the entire skb->data. It is up to the users of the skb to agree on 148 * where the payload starts. 149 * 150 * All users must obey the rule that the skb->data reference count must be 151 * greater than or equal to the payload reference count. 152 * 153 * Holding a reference to the payload part means that the user does not 154 * care about modifications to the header part of skb->data. 155 */ 156#define SKB_DATAREF_SHIFT 16 157#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 158 159struct skb_timeval { 160 u32 off_sec; 161 u32 off_usec; 162}; 163 164 165enum { 166 SKB_FCLONE_UNAVAILABLE, 167 SKB_FCLONE_ORIG, 168 SKB_FCLONE_CLONE, 169}; 170 171enum { 172 SKB_GSO_TCPV4 = 1 << 0, 173 SKB_GSO_UDP = 1 << 1, 174 175 /* This indicates the skb is from an untrusted source. */ 176 SKB_GSO_DODGY = 1 << 2, 177 178 /* This indicates the tcp segment has CWR set. */ 179 SKB_GSO_TCP_ECN = 1 << 3, 180 181 SKB_GSO_TCPV6 = 1 << 4, 182}; 183 184/** 185 * struct sk_buff - socket buffer 186 * @next: Next buffer in list 187 * @prev: Previous buffer in list 188 * @sk: Socket we are owned by 189 * @tstamp: Time we arrived 190 * @dev: Device we arrived on/are leaving by 191 * @iif: ifindex of device we arrived on 192 * @h: Transport layer header 193 * @nh: Network layer header 194 * @mac: Link layer header 195 * @dst: destination entry 196 * @sp: the security path, used for xfrm 197 * @cb: Control buffer. Free for use by every layer. Put private vars here 198 * @len: Length of actual data 199 * @data_len: Data length 200 * @mac_len: Length of link layer header 201 * @csum: Checksum 202 * @local_df: allow local fragmentation 203 * @cloned: Head may be cloned (check refcnt to be sure) 204 * @nohdr: Payload reference only, must not modify header 205 * @pkt_type: Packet class 206 * @fclone: skbuff clone status 207 * @ip_summed: Driver fed us an IP checksum 208 * @priority: Packet queueing priority 209 * @users: User count - see {datagram,tcp}.c 210 * @protocol: Packet protocol from driver 211 * @truesize: Buffer size 212 * @head: Head of buffer 213 * @data: Data head pointer 214 * @tail: Tail pointer 215 * @end: End pointer 216 * @destructor: Destruct function 217 * @mark: Generic packet mark 218 * @nfct: Associated connection, if any 219 * @ipvs_property: skbuff is owned by ipvs 220 * @nfctinfo: Relationship of this skb to the connection 221 * @nfct_reasm: netfilter conntrack re-assembly pointer 222 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 223 * @tc_index: Traffic control index 224 * @tc_verd: traffic control verdict 225 * @dma_cookie: a cookie to one of several possible DMA operations 226 * done by skb DMA functions 227 * @secmark: security marking 228 */ 229 230struct sk_buff { 231 /* These two members must be first. */ 232 struct sk_buff *next; 233 struct sk_buff *prev; 234 235 struct sock *sk; 236 struct skb_timeval tstamp; 237 struct net_device *dev; 238 int iif; 239 /* 4 byte hole on 64 bit*/ 240 241 union { 242 struct tcphdr *th; 243 struct udphdr *uh; 244 struct icmphdr *icmph; 245 struct igmphdr *igmph; 246 struct iphdr *ipiph; 247 struct ipv6hdr *ipv6h; 248 unsigned char *raw; 249 } h; 250 251 union { 252 struct iphdr *iph; 253 struct ipv6hdr *ipv6h; 254 struct arphdr *arph; 255 unsigned char *raw; 256 } nh; 257 258 union { 259 unsigned char *raw; 260 } mac; 261 262 struct dst_entry *dst; 263 struct sec_path *sp; 264 265 /* 266 * This is the control buffer. It is free to use for every 267 * layer. Please put your private variables there. If you 268 * want to keep them across layers you have to do a skb_clone() 269 * first. This is owned by whoever has the skb queued ATM. 270 */ 271 char cb[48]; 272 273 unsigned int len, 274 data_len, 275 mac_len; 276 union { 277 __wsum csum; 278 __u32 csum_offset; 279 }; 280 __u32 priority; 281 __u8 local_df:1, 282 cloned:1, 283 ip_summed:2, 284 nohdr:1, 285 nfctinfo:3; 286 __u8 pkt_type:3, 287 fclone:2, 288 ipvs_property:1; 289 __be16 protocol; 290 291 void (*destructor)(struct sk_buff *skb); 292#ifdef CONFIG_NETFILTER 293 struct nf_conntrack *nfct; 294#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 295 struct sk_buff *nfct_reasm; 296#endif 297#ifdef CONFIG_BRIDGE_NETFILTER 298 struct nf_bridge_info *nf_bridge; 299#endif 300#endif /* CONFIG_NETFILTER */ 301#ifdef CONFIG_NET_SCHED 302 __u16 tc_index; /* traffic control index */ 303#ifdef CONFIG_NET_CLS_ACT 304 __u16 tc_verd; /* traffic control verdict */ 305#endif 306#endif 307#ifdef CONFIG_NET_DMA 308 dma_cookie_t dma_cookie; 309#endif 310#ifdef CONFIG_NETWORK_SECMARK 311 __u32 secmark; 312#endif 313 314 __u32 mark; 315 316 /* These elements must be at the end, see alloc_skb() for details. */ 317 unsigned int truesize; 318 atomic_t users; 319 unsigned char *head, 320 *data, 321 *tail, 322 *end; 323}; 324 325#ifdef __KERNEL__ 326/* 327 * Handling routines are only of interest to the kernel 328 */ 329#include <linux/slab.h> 330 331#include <asm/system.h> 332 333extern void kfree_skb(struct sk_buff *skb); 334extern void __kfree_skb(struct sk_buff *skb); 335extern struct sk_buff *__alloc_skb(unsigned int size, 336 gfp_t priority, int fclone, int node); 337static inline struct sk_buff *alloc_skb(unsigned int size, 338 gfp_t priority) 339{ 340 return __alloc_skb(size, priority, 0, -1); 341} 342 343static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 344 gfp_t priority) 345{ 346 return __alloc_skb(size, priority, 1, -1); 347} 348 349extern void kfree_skbmem(struct sk_buff *skb); 350extern struct sk_buff *skb_clone(struct sk_buff *skb, 351 gfp_t priority); 352extern struct sk_buff *skb_copy(const struct sk_buff *skb, 353 gfp_t priority); 354extern struct sk_buff *pskb_copy(struct sk_buff *skb, 355 gfp_t gfp_mask); 356extern int pskb_expand_head(struct sk_buff *skb, 357 int nhead, int ntail, 358 gfp_t gfp_mask); 359extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 360 unsigned int headroom); 361extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 362 int newheadroom, int newtailroom, 363 gfp_t priority); 364extern int skb_pad(struct sk_buff *skb, int pad); 365#define dev_kfree_skb(a) kfree_skb(a) 366extern void skb_over_panic(struct sk_buff *skb, int len, 367 void *here); 368extern void skb_under_panic(struct sk_buff *skb, int len, 369 void *here); 370extern void skb_truesize_bug(struct sk_buff *skb); 371 372static inline void skb_truesize_check(struct sk_buff *skb) 373{ 374 if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len)) 375 skb_truesize_bug(skb); 376} 377 378extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 379 int getfrag(void *from, char *to, int offset, 380 int len,int odd, struct sk_buff *skb), 381 void *from, int length); 382 383struct skb_seq_state 384{ 385 __u32 lower_offset; 386 __u32 upper_offset; 387 __u32 frag_idx; 388 __u32 stepped_offset; 389 struct sk_buff *root_skb; 390 struct sk_buff *cur_skb; 391 __u8 *frag_data; 392}; 393 394extern void skb_prepare_seq_read(struct sk_buff *skb, 395 unsigned int from, unsigned int to, 396 struct skb_seq_state *st); 397extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 398 struct skb_seq_state *st); 399extern void skb_abort_seq_read(struct skb_seq_state *st); 400 401extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 402 unsigned int to, struct ts_config *config, 403 struct ts_state *state); 404 405/* Internal */ 406#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end)) 407 408/** 409 * skb_queue_empty - check if a queue is empty 410 * @list: queue head 411 * 412 * Returns true if the queue is empty, false otherwise. 413 */ 414static inline int skb_queue_empty(const struct sk_buff_head *list) 415{ 416 return list->next == (struct sk_buff *)list; 417} 418 419/** 420 * skb_get - reference buffer 421 * @skb: buffer to reference 422 * 423 * Makes another reference to a socket buffer and returns a pointer 424 * to the buffer. 425 */ 426static inline struct sk_buff *skb_get(struct sk_buff *skb) 427{ 428 atomic_inc(&skb->users); 429 return skb; 430} 431 432/* 433 * If users == 1, we are the only owner and are can avoid redundant 434 * atomic change. 435 */ 436 437/** 438 * skb_cloned - is the buffer a clone 439 * @skb: buffer to check 440 * 441 * Returns true if the buffer was generated with skb_clone() and is 442 * one of multiple shared copies of the buffer. Cloned buffers are 443 * shared data so must not be written to under normal circumstances. 444 */ 445static inline int skb_cloned(const struct sk_buff *skb) 446{ 447 return skb->cloned && 448 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 449} 450 451/** 452 * skb_header_cloned - is the header a clone 453 * @skb: buffer to check 454 * 455 * Returns true if modifying the header part of the buffer requires 456 * the data to be copied. 457 */ 458static inline int skb_header_cloned(const struct sk_buff *skb) 459{ 460 int dataref; 461 462 if (!skb->cloned) 463 return 0; 464 465 dataref = atomic_read(&skb_shinfo(skb)->dataref); 466 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 467 return dataref != 1; 468} 469 470/** 471 * skb_header_release - release reference to header 472 * @skb: buffer to operate on 473 * 474 * Drop a reference to the header part of the buffer. This is done 475 * by acquiring a payload reference. You must not read from the header 476 * part of skb->data after this. 477 */ 478static inline void skb_header_release(struct sk_buff *skb) 479{ 480 BUG_ON(skb->nohdr); 481 skb->nohdr = 1; 482 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 483} 484 485/** 486 * skb_shared - is the buffer shared 487 * @skb: buffer to check 488 * 489 * Returns true if more than one person has a reference to this 490 * buffer. 491 */ 492static inline int skb_shared(const struct sk_buff *skb) 493{ 494 return atomic_read(&skb->users) != 1; 495} 496 497/** 498 * skb_share_check - check if buffer is shared and if so clone it 499 * @skb: buffer to check 500 * @pri: priority for memory allocation 501 * 502 * If the buffer is shared the buffer is cloned and the old copy 503 * drops a reference. A new clone with a single reference is returned. 504 * If the buffer is not shared the original buffer is returned. When 505 * being called from interrupt status or with spinlocks held pri must 506 * be GFP_ATOMIC. 507 * 508 * NULL is returned on a memory allocation failure. 509 */ 510static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 511 gfp_t pri) 512{ 513 might_sleep_if(pri & __GFP_WAIT); 514 if (skb_shared(skb)) { 515 struct sk_buff *nskb = skb_clone(skb, pri); 516 kfree_skb(skb); 517 skb = nskb; 518 } 519 return skb; 520} 521 522/* 523 * Copy shared buffers into a new sk_buff. We effectively do COW on 524 * packets to handle cases where we have a local reader and forward 525 * and a couple of other messy ones. The normal one is tcpdumping 526 * a packet thats being forwarded. 527 */ 528 529/** 530 * skb_unshare - make a copy of a shared buffer 531 * @skb: buffer to check 532 * @pri: priority for memory allocation 533 * 534 * If the socket buffer is a clone then this function creates a new 535 * copy of the data, drops a reference count on the old copy and returns 536 * the new copy with the reference count at 1. If the buffer is not a clone 537 * the original buffer is returned. When called with a spinlock held or 538 * from interrupt state @pri must be %GFP_ATOMIC 539 * 540 * %NULL is returned on a memory allocation failure. 541 */ 542static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 543 gfp_t pri) 544{ 545 might_sleep_if(pri & __GFP_WAIT); 546 if (skb_cloned(skb)) { 547 struct sk_buff *nskb = skb_copy(skb, pri); 548 kfree_skb(skb); /* Free our shared copy */ 549 skb = nskb; 550 } 551 return skb; 552} 553 554/** 555 * skb_peek 556 * @list_: list to peek at 557 * 558 * Peek an &sk_buff. Unlike most other operations you _MUST_ 559 * be careful with this one. A peek leaves the buffer on the 560 * list and someone else may run off with it. You must hold 561 * the appropriate locks or have a private queue to do this. 562 * 563 * Returns %NULL for an empty list or a pointer to the head element. 564 * The reference count is not incremented and the reference is therefore 565 * volatile. Use with caution. 566 */ 567static inline struct sk_buff *skb_peek(struct sk_buff_head *list_) 568{ 569 struct sk_buff *list = ((struct sk_buff *)list_)->next; 570 if (list == (struct sk_buff *)list_) 571 list = NULL; 572 return list; 573} 574 575/** 576 * skb_peek_tail 577 * @list_: list to peek at 578 * 579 * Peek an &sk_buff. Unlike most other operations you _MUST_ 580 * be careful with this one. A peek leaves the buffer on the 581 * list and someone else may run off with it. You must hold 582 * the appropriate locks or have a private queue to do this. 583 * 584 * Returns %NULL for an empty list or a pointer to the tail element. 585 * The reference count is not incremented and the reference is therefore 586 * volatile. Use with caution. 587 */ 588static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_) 589{ 590 struct sk_buff *list = ((struct sk_buff *)list_)->prev; 591 if (list == (struct sk_buff *)list_) 592 list = NULL; 593 return list; 594} 595 596/** 597 * skb_queue_len - get queue length 598 * @list_: list to measure 599 * 600 * Return the length of an &sk_buff queue. 601 */ 602static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 603{ 604 return list_->qlen; 605} 606 607/* 608 * This function creates a split out lock class for each invocation; 609 * this is needed for now since a whole lot of users of the skb-queue 610 * infrastructure in drivers have different locking usage (in hardirq) 611 * than the networking core (in softirq only). In the long run either the 612 * network layer or drivers should need annotation to consolidate the 613 * main types of usage into 3 classes. 614 */ 615static inline void skb_queue_head_init(struct sk_buff_head *list) 616{ 617 spin_lock_init(&list->lock); 618 list->prev = list->next = (struct sk_buff *)list; 619 list->qlen = 0; 620} 621 622static inline void skb_queue_head_init_class(struct sk_buff_head *list, 623 struct lock_class_key *class) 624{ 625 skb_queue_head_init(list); 626 lockdep_set_class(&list->lock, class); 627} 628 629/* 630 * Insert an sk_buff at the start of a list. 631 * 632 * The "__skb_xxxx()" functions are the non-atomic ones that 633 * can only be called with interrupts disabled. 634 */ 635 636/** 637 * __skb_queue_after - queue a buffer at the list head 638 * @list: list to use 639 * @prev: place after this buffer 640 * @newsk: buffer to queue 641 * 642 * Queue a buffer int the middle of a list. This function takes no locks 643 * and you must therefore hold required locks before calling it. 644 * 645 * A buffer cannot be placed on two lists at the same time. 646 */ 647static inline void __skb_queue_after(struct sk_buff_head *list, 648 struct sk_buff *prev, 649 struct sk_buff *newsk) 650{ 651 struct sk_buff *next; 652 list->qlen++; 653 654 next = prev->next; 655 newsk->next = next; 656 newsk->prev = prev; 657 next->prev = prev->next = newsk; 658} 659 660/** 661 * __skb_queue_head - queue a buffer at the list head 662 * @list: list to use 663 * @newsk: buffer to queue 664 * 665 * Queue a buffer at the start of a list. This function takes no locks 666 * and you must therefore hold required locks before calling it. 667 * 668 * A buffer cannot be placed on two lists at the same time. 669 */ 670extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 671static inline void __skb_queue_head(struct sk_buff_head *list, 672 struct sk_buff *newsk) 673{ 674 __skb_queue_after(list, (struct sk_buff *)list, newsk); 675} 676 677/** 678 * __skb_queue_tail - queue a buffer at the list tail 679 * @list: list to use 680 * @newsk: buffer to queue 681 * 682 * Queue a buffer at the end of a list. This function takes no locks 683 * and you must therefore hold required locks before calling it. 684 * 685 * A buffer cannot be placed on two lists at the same time. 686 */ 687extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 688static inline void __skb_queue_tail(struct sk_buff_head *list, 689 struct sk_buff *newsk) 690{ 691 struct sk_buff *prev, *next; 692 693 list->qlen++; 694 next = (struct sk_buff *)list; 695 prev = next->prev; 696 newsk->next = next; 697 newsk->prev = prev; 698 next->prev = prev->next = newsk; 699} 700 701 702/** 703 * __skb_dequeue - remove from the head of the queue 704 * @list: list to dequeue from 705 * 706 * Remove the head of the list. This function does not take any locks 707 * so must be used with appropriate locks held only. The head item is 708 * returned or %NULL if the list is empty. 709 */ 710extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 711static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 712{ 713 struct sk_buff *next, *prev, *result; 714 715 prev = (struct sk_buff *) list; 716 next = prev->next; 717 result = NULL; 718 if (next != prev) { 719 result = next; 720 next = next->next; 721 list->qlen--; 722 next->prev = prev; 723 prev->next = next; 724 result->next = result->prev = NULL; 725 } 726 return result; 727} 728 729 730/* 731 * Insert a packet on a list. 732 */ 733extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 734static inline void __skb_insert(struct sk_buff *newsk, 735 struct sk_buff *prev, struct sk_buff *next, 736 struct sk_buff_head *list) 737{ 738 newsk->next = next; 739 newsk->prev = prev; 740 next->prev = prev->next = newsk; 741 list->qlen++; 742} 743 744/* 745 * Place a packet after a given packet in a list. 746 */ 747extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 748static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list) 749{ 750 __skb_insert(newsk, old, old->next, list); 751} 752 753/* 754 * remove sk_buff from list. _Must_ be called atomically, and with 755 * the list known.. 756 */ 757extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 758static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 759{ 760 struct sk_buff *next, *prev; 761 762 list->qlen--; 763 next = skb->next; 764 prev = skb->prev; 765 skb->next = skb->prev = NULL; 766 next->prev = prev; 767 prev->next = next; 768} 769 770 771/* XXX: more streamlined implementation */ 772 773/** 774 * __skb_dequeue_tail - remove from the tail of the queue 775 * @list: list to dequeue from 776 * 777 * Remove the tail of the list. This function does not take any locks 778 * so must be used with appropriate locks held only. The tail item is 779 * returned or %NULL if the list is empty. 780 */ 781extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 782static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 783{ 784 struct sk_buff *skb = skb_peek_tail(list); 785 if (skb) 786 __skb_unlink(skb, list); 787 return skb; 788} 789 790 791static inline int skb_is_nonlinear(const struct sk_buff *skb) 792{ 793 return skb->data_len; 794} 795 796static inline unsigned int skb_headlen(const struct sk_buff *skb) 797{ 798 return skb->len - skb->data_len; 799} 800 801static inline int skb_pagelen(const struct sk_buff *skb) 802{ 803 int i, len = 0; 804 805 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 806 len += skb_shinfo(skb)->frags[i].size; 807 return len + skb_headlen(skb); 808} 809 810static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 811 struct page *page, int off, int size) 812{ 813 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 814 815 frag->page = page; 816 frag->page_offset = off; 817 frag->size = size; 818 skb_shinfo(skb)->nr_frags = i + 1; 819} 820 821#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 822#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) 823#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 824 825/* 826 * Add data to an sk_buff 827 */ 828static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 829{ 830 unsigned char *tmp = skb->tail; 831 SKB_LINEAR_ASSERT(skb); 832 skb->tail += len; 833 skb->len += len; 834 return tmp; 835} 836 837/** 838 * skb_put - add data to a buffer 839 * @skb: buffer to use 840 * @len: amount of data to add 841 * 842 * This function extends the used data area of the buffer. If this would 843 * exceed the total buffer size the kernel will panic. A pointer to the 844 * first byte of the extra data is returned. 845 */ 846static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 847{ 848 unsigned char *tmp = skb->tail; 849 SKB_LINEAR_ASSERT(skb); 850 skb->tail += len; 851 skb->len += len; 852 if (unlikely(skb->tail>skb->end)) 853 skb_over_panic(skb, len, current_text_addr()); 854 return tmp; 855} 856 857static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 858{ 859 skb->data -= len; 860 skb->len += len; 861 return skb->data; 862} 863 864/** 865 * skb_push - add data to the start of a buffer 866 * @skb: buffer to use 867 * @len: amount of data to add 868 * 869 * This function extends the used data area of the buffer at the buffer 870 * start. If this would exceed the total buffer headroom the kernel will 871 * panic. A pointer to the first byte of the extra data is returned. 872 */ 873static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len) 874{ 875 skb->data -= len; 876 skb->len += len; 877 if (unlikely(skb->data<skb->head)) 878 skb_under_panic(skb, len, current_text_addr()); 879 return skb->data; 880} 881 882static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 883{ 884 skb->len -= len; 885 BUG_ON(skb->len < skb->data_len); 886 return skb->data += len; 887} 888 889/** 890 * skb_pull - remove data from the start of a buffer 891 * @skb: buffer to use 892 * @len: amount of data to remove 893 * 894 * This function removes data from the start of a buffer, returning 895 * the memory to the headroom. A pointer to the next data in the buffer 896 * is returned. Once the data has been pulled future pushes will overwrite 897 * the old data. 898 */ 899static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) 900{ 901 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 902} 903 904extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 905 906static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 907{ 908 if (len > skb_headlen(skb) && 909 !__pskb_pull_tail(skb, len-skb_headlen(skb))) 910 return NULL; 911 skb->len -= len; 912 return skb->data += len; 913} 914 915static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 916{ 917 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 918} 919 920static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 921{ 922 if (likely(len <= skb_headlen(skb))) 923 return 1; 924 if (unlikely(len > skb->len)) 925 return 0; 926 return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL; 927} 928 929/** 930 * skb_headroom - bytes at buffer head 931 * @skb: buffer to check 932 * 933 * Return the number of bytes of free space at the head of an &sk_buff. 934 */ 935static inline int skb_headroom(const struct sk_buff *skb) 936{ 937 return skb->data - skb->head; 938} 939 940/** 941 * skb_tailroom - bytes at buffer end 942 * @skb: buffer to check 943 * 944 * Return the number of bytes of free space at the tail of an sk_buff 945 */ 946static inline int skb_tailroom(const struct sk_buff *skb) 947{ 948 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 949} 950 951/** 952 * skb_reserve - adjust headroom 953 * @skb: buffer to alter 954 * @len: bytes to move 955 * 956 * Increase the headroom of an empty &sk_buff by reducing the tail 957 * room. This is only allowed for an empty buffer. 958 */ 959static inline void skb_reserve(struct sk_buff *skb, int len) 960{ 961 skb->data += len; 962 skb->tail += len; 963} 964 965/* 966 * CPUs often take a performance hit when accessing unaligned memory 967 * locations. The actual performance hit varies, it can be small if the 968 * hardware handles it or large if we have to take an exception and fix it 969 * in software. 970 * 971 * Since an ethernet header is 14 bytes network drivers often end up with 972 * the IP header at an unaligned offset. The IP header can be aligned by 973 * shifting the start of the packet by 2 bytes. Drivers should do this 974 * with: 975 * 976 * skb_reserve(NET_IP_ALIGN); 977 * 978 * The downside to this alignment of the IP header is that the DMA is now 979 * unaligned. On some architectures the cost of an unaligned DMA is high 980 * and this cost outweighs the gains made by aligning the IP header. 981 * 982 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 983 * to be overridden. 984 */ 985#ifndef NET_IP_ALIGN 986#define NET_IP_ALIGN 2 987#endif 988 989/* 990 * The networking layer reserves some headroom in skb data (via 991 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 992 * the header has to grow. In the default case, if the header has to grow 993 * 16 bytes or less we avoid the reallocation. 994 * 995 * Unfortunately this headroom changes the DMA alignment of the resulting 996 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 997 * on some architectures. An architecture can override this value, 998 * perhaps setting it to a cacheline in size (since that will maintain 999 * cacheline alignment of the DMA). It must be a power of 2. 1000 * 1001 * Various parts of the networking layer expect at least 16 bytes of 1002 * headroom, you should not reduce this. 1003 */ 1004#ifndef NET_SKB_PAD 1005#define NET_SKB_PAD 16 1006#endif 1007 1008extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1009 1010static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1011{ 1012 if (unlikely(skb->data_len)) { 1013 WARN_ON(1); 1014 return; 1015 } 1016 skb->len = len; 1017 skb->tail = skb->data + len; 1018} 1019 1020/** 1021 * skb_trim - remove end from a buffer 1022 * @skb: buffer to alter 1023 * @len: new length 1024 * 1025 * Cut the length of a buffer down by removing data from the tail. If 1026 * the buffer is already under the length specified it is not modified. 1027 * The skb must be linear. 1028 */ 1029static inline void skb_trim(struct sk_buff *skb, unsigned int len) 1030{ 1031 if (skb->len > len) 1032 __skb_trim(skb, len); 1033} 1034 1035 1036static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1037{ 1038 if (skb->data_len) 1039 return ___pskb_trim(skb, len); 1040 __skb_trim(skb, len); 1041 return 0; 1042} 1043 1044static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 1045{ 1046 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 1047} 1048 1049/** 1050 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 1051 * @skb: buffer to alter 1052 * @len: new length 1053 * 1054 * This is identical to pskb_trim except that the caller knows that 1055 * the skb is not cloned so we should never get an error due to out- 1056 * of-memory. 1057 */ 1058static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 1059{ 1060 int err = pskb_trim(skb, len); 1061 BUG_ON(err); 1062} 1063 1064/** 1065 * skb_orphan - orphan a buffer 1066 * @skb: buffer to orphan 1067 * 1068 * If a buffer currently has an owner then we call the owner's 1069 * destructor function and make the @skb unowned. The buffer continues 1070 * to exist but is no longer charged to its former owner. 1071 */ 1072static inline void skb_orphan(struct sk_buff *skb) 1073{ 1074 if (skb->destructor) 1075 skb->destructor(skb); 1076 skb->destructor = NULL; 1077 skb->sk = NULL; 1078} 1079 1080/** 1081 * __skb_queue_purge - empty a list 1082 * @list: list to empty 1083 * 1084 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1085 * the list and one reference dropped. This function does not take the 1086 * list lock and the caller must hold the relevant locks to use it. 1087 */ 1088extern void skb_queue_purge(struct sk_buff_head *list); 1089static inline void __skb_queue_purge(struct sk_buff_head *list) 1090{ 1091 struct sk_buff *skb; 1092 while ((skb = __skb_dequeue(list)) != NULL) 1093 kfree_skb(skb); 1094} 1095 1096/** 1097 * __dev_alloc_skb - allocate an skbuff for receiving 1098 * @length: length to allocate 1099 * @gfp_mask: get_free_pages mask, passed to alloc_skb 1100 * 1101 * Allocate a new &sk_buff and assign it a usage count of one. The 1102 * buffer has unspecified headroom built in. Users should allocate 1103 * the headroom they think they need without accounting for the 1104 * built in space. The built in space is used for optimisations. 1105 * 1106 * %NULL is returned if there is no free memory. 1107 */ 1108static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1109 gfp_t gfp_mask) 1110{ 1111 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 1112 if (likely(skb)) 1113 skb_reserve(skb, NET_SKB_PAD); 1114 return skb; 1115} 1116 1117/** 1118 * dev_alloc_skb - allocate an skbuff for receiving 1119 * @length: length to allocate 1120 * 1121 * Allocate a new &sk_buff and assign it a usage count of one. The 1122 * buffer has unspecified headroom built in. Users should allocate 1123 * the headroom they think they need without accounting for the 1124 * built in space. The built in space is used for optimisations. 1125 * 1126 * %NULL is returned if there is no free memory. Although this function 1127 * allocates memory it can be called from an interrupt. 1128 */ 1129static inline struct sk_buff *dev_alloc_skb(unsigned int length) 1130{ 1131 return __dev_alloc_skb(length, GFP_ATOMIC); 1132} 1133 1134extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1135 unsigned int length, gfp_t gfp_mask); 1136 1137/** 1138 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1139 * @dev: network device to receive on 1140 * @length: length to allocate 1141 * 1142 * Allocate a new &sk_buff and assign it a usage count of one. The 1143 * buffer has unspecified headroom built in. Users should allocate 1144 * the headroom they think they need without accounting for the 1145 * built in space. The built in space is used for optimisations. 1146 * 1147 * %NULL is returned if there is no free memory. Although this function 1148 * allocates memory it can be called from an interrupt. 1149 */ 1150static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1151 unsigned int length) 1152{ 1153 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1154} 1155 1156/** 1157 * skb_cow - copy header of skb when it is required 1158 * @skb: buffer to cow 1159 * @headroom: needed headroom 1160 * 1161 * If the skb passed lacks sufficient headroom or its data part 1162 * is shared, data is reallocated. If reallocation fails, an error 1163 * is returned and original skb is not changed. 1164 * 1165 * The result is skb with writable area skb->head...skb->tail 1166 * and at least @headroom of space at head. 1167 */ 1168static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1169{ 1170 int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) - 1171 skb_headroom(skb); 1172 1173 if (delta < 0) 1174 delta = 0; 1175 1176 if (delta || skb_cloned(skb)) 1177 return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) & 1178 ~(NET_SKB_PAD-1), 0, GFP_ATOMIC); 1179 return 0; 1180} 1181 1182/** 1183 * skb_padto - pad an skbuff up to a minimal size 1184 * @skb: buffer to pad 1185 * @len: minimal length 1186 * 1187 * Pads up a buffer to ensure the trailing bytes exist and are 1188 * blanked. If the buffer already contains sufficient data it 1189 * is untouched. Otherwise it is extended. Returns zero on 1190 * success. The skb is freed on error. 1191 */ 1192 1193static inline int skb_padto(struct sk_buff *skb, unsigned int len) 1194{ 1195 unsigned int size = skb->len; 1196 if (likely(size >= len)) 1197 return 0; 1198 return skb_pad(skb, len-size); 1199} 1200 1201static inline int skb_add_data(struct sk_buff *skb, 1202 char __user *from, int copy) 1203{ 1204 const int off = skb->len; 1205 1206 if (skb->ip_summed == CHECKSUM_NONE) { 1207 int err = 0; 1208 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), 1209 copy, 0, &err); 1210 if (!err) { 1211 skb->csum = csum_block_add(skb->csum, csum, off); 1212 return 0; 1213 } 1214 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 1215 return 0; 1216 1217 __skb_trim(skb, off); 1218 return -EFAULT; 1219} 1220 1221static inline int skb_can_coalesce(struct sk_buff *skb, int i, 1222 struct page *page, int off) 1223{ 1224 if (i) { 1225 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1226 1227 return page == frag->page && 1228 off == frag->page_offset + frag->size; 1229 } 1230 return 0; 1231} 1232 1233static inline int __skb_linearize(struct sk_buff *skb) 1234{ 1235 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 1236} 1237 1238/** 1239 * skb_linearize - convert paged skb to linear one 1240 * @skb: buffer to linarize 1241 * 1242 * If there is no free memory -ENOMEM is returned, otherwise zero 1243 * is returned and the old skb data released. 1244 */ 1245static inline int skb_linearize(struct sk_buff *skb) 1246{ 1247 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 1248} 1249 1250/** 1251 * skb_linearize_cow - make sure skb is linear and writable 1252 * @skb: buffer to process 1253 * 1254 * If there is no free memory -ENOMEM is returned, otherwise zero 1255 * is returned and the old skb data released. 1256 */ 1257static inline int skb_linearize_cow(struct sk_buff *skb) 1258{ 1259 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 1260 __skb_linearize(skb) : 0; 1261} 1262 1263/** 1264 * skb_postpull_rcsum - update checksum for received skb after pull 1265 * @skb: buffer to update 1266 * @start: start of data before pull 1267 * @len: length of data pulled 1268 * 1269 * After doing a pull on a received packet, you need to call this to 1270 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 1271 * CHECKSUM_NONE so that it can be recomputed from scratch. 1272 */ 1273 1274static inline void skb_postpull_rcsum(struct sk_buff *skb, 1275 const void *start, unsigned int len) 1276{ 1277 if (skb->ip_summed == CHECKSUM_COMPLETE) 1278 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 1279} 1280 1281unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 1282 1283/** 1284 * pskb_trim_rcsum - trim received skb and update checksum 1285 * @skb: buffer to trim 1286 * @len: new length 1287 * 1288 * This is exactly the same as pskb_trim except that it ensures the 1289 * checksum of received packets are still valid after the operation. 1290 */ 1291 1292static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 1293{ 1294 if (likely(len >= skb->len)) 1295 return 0; 1296 if (skb->ip_summed == CHECKSUM_COMPLETE) 1297 skb->ip_summed = CHECKSUM_NONE; 1298 return __pskb_trim(skb, len); 1299} 1300 1301#define skb_queue_walk(queue, skb) \ 1302 for (skb = (queue)->next; \ 1303 prefetch(skb->next), (skb != (struct sk_buff *)(queue)); \ 1304 skb = skb->next) 1305 1306#define skb_queue_reverse_walk(queue, skb) \ 1307 for (skb = (queue)->prev; \ 1308 prefetch(skb->prev), (skb != (struct sk_buff *)(queue)); \ 1309 skb = skb->prev) 1310 1311 1312extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 1313 int noblock, int *err); 1314extern unsigned int datagram_poll(struct file *file, struct socket *sock, 1315 struct poll_table_struct *wait); 1316extern int skb_copy_datagram_iovec(const struct sk_buff *from, 1317 int offset, struct iovec *to, 1318 int size); 1319extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 1320 int hlen, 1321 struct iovec *iov); 1322extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 1323extern void skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 1324 unsigned int flags); 1325extern __wsum skb_checksum(const struct sk_buff *skb, int offset, 1326 int len, __wsum csum); 1327extern int skb_copy_bits(const struct sk_buff *skb, int offset, 1328 void *to, int len); 1329extern int skb_store_bits(const struct sk_buff *skb, int offset, 1330 void *from, int len); 1331extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, 1332 int offset, u8 *to, int len, 1333 __wsum csum); 1334extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 1335extern void skb_split(struct sk_buff *skb, 1336 struct sk_buff *skb1, const u32 len); 1337 1338extern struct sk_buff *skb_segment(struct sk_buff *skb, int features); 1339 1340static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 1341 int len, void *buffer) 1342{ 1343 int hlen = skb_headlen(skb); 1344 1345 if (hlen - offset >= len) 1346 return skb->data + offset; 1347 1348 if (skb_copy_bits(skb, offset, buffer, len) < 0) 1349 return NULL; 1350 1351 return buffer; 1352} 1353 1354extern void skb_init(void); 1355extern void skb_add_mtu(int mtu); 1356 1357/** 1358 * skb_get_timestamp - get timestamp from a skb 1359 * @skb: skb to get stamp from 1360 * @stamp: pointer to struct timeval to store stamp in 1361 * 1362 * Timestamps are stored in the skb as offsets to a base timestamp. 1363 * This function converts the offset back to a struct timeval and stores 1364 * it in stamp. 1365 */ 1366static inline void skb_get_timestamp(const struct sk_buff *skb, struct timeval *stamp) 1367{ 1368 stamp->tv_sec = skb->tstamp.off_sec; 1369 stamp->tv_usec = skb->tstamp.off_usec; 1370} 1371 1372/** 1373 * skb_set_timestamp - set timestamp of a skb 1374 * @skb: skb to set stamp of 1375 * @stamp: pointer to struct timeval to get stamp from 1376 * 1377 * Timestamps are stored in the skb as offsets to a base timestamp. 1378 * This function converts a struct timeval to an offset and stores 1379 * it in the skb. 1380 */ 1381static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *stamp) 1382{ 1383 skb->tstamp.off_sec = stamp->tv_sec; 1384 skb->tstamp.off_usec = stamp->tv_usec; 1385} 1386 1387extern void __net_timestamp(struct sk_buff *skb); 1388 1389extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 1390 1391/** 1392 * skb_checksum_complete - Calculate checksum of an entire packet 1393 * @skb: packet to process 1394 * 1395 * This function calculates the checksum over the entire packet plus 1396 * the value of skb->csum. The latter can be used to supply the 1397 * checksum of a pseudo header as used by TCP/UDP. It returns the 1398 * checksum. 1399 * 1400 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 1401 * this function can be used to verify that checksum on received 1402 * packets. In that case the function should return zero if the 1403 * checksum is correct. In particular, this function will return zero 1404 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 1405 * hardware has already verified the correctness of the checksum. 1406 */ 1407static inline unsigned int skb_checksum_complete(struct sk_buff *skb) 1408{ 1409 return skb->ip_summed != CHECKSUM_UNNECESSARY && 1410 __skb_checksum_complete(skb); 1411} 1412 1413#ifdef CONFIG_NETFILTER 1414static inline void nf_conntrack_put(struct nf_conntrack *nfct) 1415{ 1416 if (nfct && atomic_dec_and_test(&nfct->use)) 1417 nfct->destroy(nfct); 1418} 1419static inline void nf_conntrack_get(struct nf_conntrack *nfct) 1420{ 1421 if (nfct) 1422 atomic_inc(&nfct->use); 1423} 1424#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1425static inline void nf_conntrack_get_reasm(struct sk_buff *skb) 1426{ 1427 if (skb) 1428 atomic_inc(&skb->users); 1429} 1430static inline void nf_conntrack_put_reasm(struct sk_buff *skb) 1431{ 1432 if (skb) 1433 kfree_skb(skb); 1434} 1435#endif 1436#ifdef CONFIG_BRIDGE_NETFILTER 1437static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 1438{ 1439 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 1440 kfree(nf_bridge); 1441} 1442static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 1443{ 1444 if (nf_bridge) 1445 atomic_inc(&nf_bridge->use); 1446} 1447#endif /* CONFIG_BRIDGE_NETFILTER */ 1448static inline void nf_reset(struct sk_buff *skb) 1449{ 1450 nf_conntrack_put(skb->nfct); 1451 skb->nfct = NULL; 1452#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 1453 nf_conntrack_put_reasm(skb->nfct_reasm); 1454 skb->nfct_reasm = NULL; 1455#endif 1456#ifdef CONFIG_BRIDGE_NETFILTER 1457 nf_bridge_put(skb->nf_bridge); 1458 skb->nf_bridge = NULL; 1459#endif 1460} 1461 1462#else /* CONFIG_NETFILTER */ 1463static inline void nf_reset(struct sk_buff *skb) {} 1464#endif /* CONFIG_NETFILTER */ 1465 1466#ifdef CONFIG_NETWORK_SECMARK 1467static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 1468{ 1469 to->secmark = from->secmark; 1470} 1471 1472static inline void skb_init_secmark(struct sk_buff *skb) 1473{ 1474 skb->secmark = 0; 1475} 1476#else 1477static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 1478{ } 1479 1480static inline void skb_init_secmark(struct sk_buff *skb) 1481{ } 1482#endif 1483 1484static inline int skb_is_gso(const struct sk_buff *skb) 1485{ 1486 return skb_shinfo(skb)->gso_size; 1487} 1488 1489#endif /* __KERNEL__ */ 1490#endif /* _LINUX_SKBUFF_H */