at v3.4-rc4 72 kB view raw
1/* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <gw4pts@gw4pts.ampr.org> 6 * Florian La Roche, <rzsfl@rz.uni-sb.de> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14#ifndef _LINUX_SKBUFF_H 15#define _LINUX_SKBUFF_H 16 17#include <linux/kernel.h> 18#include <linux/kmemcheck.h> 19#include <linux/compiler.h> 20#include <linux/time.h> 21#include <linux/bug.h> 22#include <linux/cache.h> 23 24#include <linux/atomic.h> 25#include <asm/types.h> 26#include <linux/spinlock.h> 27#include <linux/net.h> 28#include <linux/textsearch.h> 29#include <net/checksum.h> 30#include <linux/rcupdate.h> 31#include <linux/dmaengine.h> 32#include <linux/hrtimer.h> 33#include <linux/dma-mapping.h> 34#include <linux/netdev_features.h> 35 36/* Don't change this without changing skb_csum_unnecessary! */ 37#define CHECKSUM_NONE 0 38#define CHECKSUM_UNNECESSARY 1 39#define CHECKSUM_COMPLETE 2 40#define CHECKSUM_PARTIAL 3 41 42#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 43 ~(SMP_CACHE_BYTES - 1)) 44#define SKB_WITH_OVERHEAD(X) \ 45 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 46#define SKB_MAX_ORDER(X, ORDER) \ 47 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 48#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 49#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 50 51/* return minimum truesize of one skb containing X bytes of data */ 52#define SKB_TRUESIZE(X) ((X) + \ 53 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ 54 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 55 56/* A. Checksumming of received packets by device. 57 * 58 * NONE: device failed to checksum this packet. 59 * skb->csum is undefined. 60 * 61 * UNNECESSARY: device parsed packet and wouldbe verified checksum. 62 * skb->csum is undefined. 63 * It is bad option, but, unfortunately, many of vendors do this. 64 * Apparently with secret goal to sell you new device, when you 65 * will add new protocol to your host. F.e. IPv6. 8) 66 * 67 * COMPLETE: the most generic way. Device supplied checksum of _all_ 68 * the packet as seen by netif_rx in skb->csum. 69 * NOTE: Even if device supports only some protocols, but 70 * is able to produce some skb->csum, it MUST use COMPLETE, 71 * not UNNECESSARY. 72 * 73 * PARTIAL: identical to the case for output below. This may occur 74 * on a packet received directly from another Linux OS, e.g., 75 * a virtualised Linux kernel on the same host. The packet can 76 * be treated in the same way as UNNECESSARY except that on 77 * output (i.e., forwarding) the checksum must be filled in 78 * by the OS or the hardware. 79 * 80 * B. Checksumming on output. 81 * 82 * NONE: skb is checksummed by protocol or csum is not required. 83 * 84 * PARTIAL: device is required to csum packet as seen by hard_start_xmit 85 * from skb->csum_start to the end and to record the checksum 86 * at skb->csum_start + skb->csum_offset. 87 * 88 * Device must show its capabilities in dev->features, set 89 * at device setup time. 90 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 91 * everything. 92 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 93 * TCP/UDP over IPv4. Sigh. Vendors like this 94 * way by an unknown reason. Though, see comment above 95 * about CHECKSUM_UNNECESSARY. 8) 96 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead. 97 * 98 * UNNECESSARY: device will do per protocol specific csum. Protocol drivers 99 * that do not want net to perform the checksum calculation should use 100 * this flag in their outgoing skbs. 101 * NETIF_F_FCOE_CRC this indicates the device can do FCoE FC CRC 102 * offload. Correspondingly, the FCoE protocol driver 103 * stack should use CHECKSUM_UNNECESSARY. 104 * 105 * Any questions? No questions, good. --ANK 106 */ 107 108struct net_device; 109struct scatterlist; 110struct pipe_inode_info; 111 112#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 113struct nf_conntrack { 114 atomic_t use; 115}; 116#endif 117 118#ifdef CONFIG_BRIDGE_NETFILTER 119struct nf_bridge_info { 120 atomic_t use; 121 struct net_device *physindev; 122 struct net_device *physoutdev; 123 unsigned int mask; 124 unsigned long data[32 / sizeof(unsigned long)]; 125}; 126#endif 127 128struct sk_buff_head { 129 /* These two members must be first. */ 130 struct sk_buff *next; 131 struct sk_buff *prev; 132 133 __u32 qlen; 134 spinlock_t lock; 135}; 136 137struct sk_buff; 138 139/* To allow 64K frame to be packed as single skb without frag_list we 140 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for 141 * buffers which do not start on a page boundary. 142 * 143 * Since GRO uses frags we allocate at least 16 regardless of page 144 * size. 145 */ 146#if (65536/PAGE_SIZE + 1) < 16 147#define MAX_SKB_FRAGS 16UL 148#else 149#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) 150#endif 151 152typedef struct skb_frag_struct skb_frag_t; 153 154struct skb_frag_struct { 155 struct { 156 struct page *p; 157 } page; 158#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 159 __u32 page_offset; 160 __u32 size; 161#else 162 __u16 page_offset; 163 __u16 size; 164#endif 165}; 166 167static inline unsigned int skb_frag_size(const skb_frag_t *frag) 168{ 169 return frag->size; 170} 171 172static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) 173{ 174 frag->size = size; 175} 176 177static inline void skb_frag_size_add(skb_frag_t *frag, int delta) 178{ 179 frag->size += delta; 180} 181 182static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) 183{ 184 frag->size -= delta; 185} 186 187#define HAVE_HW_TIME_STAMP 188 189/** 190 * struct skb_shared_hwtstamps - hardware time stamps 191 * @hwtstamp: hardware time stamp transformed into duration 192 * since arbitrary point in time 193 * @syststamp: hwtstamp transformed to system time base 194 * 195 * Software time stamps generated by ktime_get_real() are stored in 196 * skb->tstamp. The relation between the different kinds of time 197 * stamps is as follows: 198 * 199 * syststamp and tstamp can be compared against each other in 200 * arbitrary combinations. The accuracy of a 201 * syststamp/tstamp/"syststamp from other device" comparison is 202 * limited by the accuracy of the transformation into system time 203 * base. This depends on the device driver and its underlying 204 * hardware. 205 * 206 * hwtstamps can only be compared against other hwtstamps from 207 * the same device. 208 * 209 * This structure is attached to packets as part of the 210 * &skb_shared_info. Use skb_hwtstamps() to get a pointer. 211 */ 212struct skb_shared_hwtstamps { 213 ktime_t hwtstamp; 214 ktime_t syststamp; 215}; 216 217/* Definitions for tx_flags in struct skb_shared_info */ 218enum { 219 /* generate hardware time stamp */ 220 SKBTX_HW_TSTAMP = 1 << 0, 221 222 /* generate software time stamp */ 223 SKBTX_SW_TSTAMP = 1 << 1, 224 225 /* device driver is going to provide hardware time stamp */ 226 SKBTX_IN_PROGRESS = 1 << 2, 227 228 /* ensure the originating sk reference is available on driver level */ 229 SKBTX_DRV_NEEDS_SK_REF = 1 << 3, 230 231 /* device driver supports TX zero-copy buffers */ 232 SKBTX_DEV_ZEROCOPY = 1 << 4, 233 234 /* generate wifi status information (where possible) */ 235 SKBTX_WIFI_STATUS = 1 << 5, 236}; 237 238/* 239 * The callback notifies userspace to release buffers when skb DMA is done in 240 * lower device, the skb last reference should be 0 when calling this. 241 * The desc is used to track userspace buffer index. 242 */ 243struct ubuf_info { 244 void (*callback)(void *); 245 void *arg; 246 unsigned long desc; 247}; 248 249/* This data is invariant across clones and lives at 250 * the end of the header data, ie. at skb->end. 251 */ 252struct skb_shared_info { 253 unsigned char nr_frags; 254 __u8 tx_flags; 255 unsigned short gso_size; 256 /* Warning: this field is not always filled in (UFO)! */ 257 unsigned short gso_segs; 258 unsigned short gso_type; 259 struct sk_buff *frag_list; 260 struct skb_shared_hwtstamps hwtstamps; 261 __be32 ip6_frag_id; 262 263 /* 264 * Warning : all fields before dataref are cleared in __alloc_skb() 265 */ 266 atomic_t dataref; 267 268 /* Intermediate layers must ensure that destructor_arg 269 * remains valid until skb destructor */ 270 void * destructor_arg; 271 272 /* must be last field, see pskb_expand_head() */ 273 skb_frag_t frags[MAX_SKB_FRAGS]; 274}; 275 276/* We divide dataref into two halves. The higher 16 bits hold references 277 * to the payload part of skb->data. The lower 16 bits hold references to 278 * the entire skb->data. A clone of a headerless skb holds the length of 279 * the header in skb->hdr_len. 280 * 281 * All users must obey the rule that the skb->data reference count must be 282 * greater than or equal to the payload reference count. 283 * 284 * Holding a reference to the payload part means that the user does not 285 * care about modifications to the header part of skb->data. 286 */ 287#define SKB_DATAREF_SHIFT 16 288#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 289 290 291enum { 292 SKB_FCLONE_UNAVAILABLE, 293 SKB_FCLONE_ORIG, 294 SKB_FCLONE_CLONE, 295}; 296 297enum { 298 SKB_GSO_TCPV4 = 1 << 0, 299 SKB_GSO_UDP = 1 << 1, 300 301 /* This indicates the skb is from an untrusted source. */ 302 SKB_GSO_DODGY = 1 << 2, 303 304 /* This indicates the tcp segment has CWR set. */ 305 SKB_GSO_TCP_ECN = 1 << 3, 306 307 SKB_GSO_TCPV6 = 1 << 4, 308 309 SKB_GSO_FCOE = 1 << 5, 310}; 311 312#if BITS_PER_LONG > 32 313#define NET_SKBUFF_DATA_USES_OFFSET 1 314#endif 315 316#ifdef NET_SKBUFF_DATA_USES_OFFSET 317typedef unsigned int sk_buff_data_t; 318#else 319typedef unsigned char *sk_buff_data_t; 320#endif 321 322#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \ 323 defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE) 324#define NET_SKBUFF_NF_DEFRAG_NEEDED 1 325#endif 326 327/** 328 * struct sk_buff - socket buffer 329 * @next: Next buffer in list 330 * @prev: Previous buffer in list 331 * @tstamp: Time we arrived 332 * @sk: Socket we are owned by 333 * @dev: Device we arrived on/are leaving by 334 * @cb: Control buffer. Free for use by every layer. Put private vars here 335 * @_skb_refdst: destination entry (with norefcount bit) 336 * @sp: the security path, used for xfrm 337 * @len: Length of actual data 338 * @data_len: Data length 339 * @mac_len: Length of link layer header 340 * @hdr_len: writable header length of cloned skb 341 * @csum: Checksum (must include start/offset pair) 342 * @csum_start: Offset from skb->head where checksumming should start 343 * @csum_offset: Offset from csum_start where checksum should be stored 344 * @priority: Packet queueing priority 345 * @local_df: allow local fragmentation 346 * @cloned: Head may be cloned (check refcnt to be sure) 347 * @ip_summed: Driver fed us an IP checksum 348 * @nohdr: Payload reference only, must not modify header 349 * @nfctinfo: Relationship of this skb to the connection 350 * @pkt_type: Packet class 351 * @fclone: skbuff clone status 352 * @ipvs_property: skbuff is owned by ipvs 353 * @peeked: this packet has been seen already, so stats have been 354 * done for it, don't do them again 355 * @nf_trace: netfilter packet trace flag 356 * @protocol: Packet protocol from driver 357 * @destructor: Destruct function 358 * @nfct: Associated connection, if any 359 * @nfct_reasm: netfilter conntrack re-assembly pointer 360 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 361 * @skb_iif: ifindex of device we arrived on 362 * @tc_index: Traffic control index 363 * @tc_verd: traffic control verdict 364 * @rxhash: the packet hash computed on receive 365 * @queue_mapping: Queue mapping for multiqueue devices 366 * @ndisc_nodetype: router type (from link layer) 367 * @ooo_okay: allow the mapping of a socket to a queue to be changed 368 * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport 369 * ports. 370 * @wifi_acked_valid: wifi_acked was set 371 * @wifi_acked: whether frame was acked on wifi or not 372 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 373 * @dma_cookie: a cookie to one of several possible DMA operations 374 * done by skb DMA functions 375 * @secmark: security marking 376 * @mark: Generic packet mark 377 * @dropcount: total number of sk_receive_queue overflows 378 * @vlan_tci: vlan tag control information 379 * @transport_header: Transport layer header 380 * @network_header: Network layer header 381 * @mac_header: Link layer header 382 * @tail: Tail pointer 383 * @end: End pointer 384 * @head: Head of buffer 385 * @data: Data head pointer 386 * @truesize: Buffer size 387 * @users: User count - see {datagram,tcp}.c 388 */ 389 390struct sk_buff { 391 /* These two members must be first. */ 392 struct sk_buff *next; 393 struct sk_buff *prev; 394 395 ktime_t tstamp; 396 397 struct sock *sk; 398 struct net_device *dev; 399 400 /* 401 * This is the control buffer. It is free to use for every 402 * layer. Please put your private variables there. If you 403 * want to keep them across layers you have to do a skb_clone() 404 * first. This is owned by whoever has the skb queued ATM. 405 */ 406 char cb[48] __aligned(8); 407 408 unsigned long _skb_refdst; 409#ifdef CONFIG_XFRM 410 struct sec_path *sp; 411#endif 412 unsigned int len, 413 data_len; 414 __u16 mac_len, 415 hdr_len; 416 union { 417 __wsum csum; 418 struct { 419 __u16 csum_start; 420 __u16 csum_offset; 421 }; 422 }; 423 __u32 priority; 424 kmemcheck_bitfield_begin(flags1); 425 __u8 local_df:1, 426 cloned:1, 427 ip_summed:2, 428 nohdr:1, 429 nfctinfo:3; 430 __u8 pkt_type:3, 431 fclone:2, 432 ipvs_property:1, 433 peeked:1, 434 nf_trace:1; 435 kmemcheck_bitfield_end(flags1); 436 __be16 protocol; 437 438 void (*destructor)(struct sk_buff *skb); 439#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 440 struct nf_conntrack *nfct; 441#endif 442#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 443 struct sk_buff *nfct_reasm; 444#endif 445#ifdef CONFIG_BRIDGE_NETFILTER 446 struct nf_bridge_info *nf_bridge; 447#endif 448 449 int skb_iif; 450 451 __u32 rxhash; 452 453 __u16 vlan_tci; 454 455#ifdef CONFIG_NET_SCHED 456 __u16 tc_index; /* traffic control index */ 457#ifdef CONFIG_NET_CLS_ACT 458 __u16 tc_verd; /* traffic control verdict */ 459#endif 460#endif 461 462 __u16 queue_mapping; 463 kmemcheck_bitfield_begin(flags2); 464#ifdef CONFIG_IPV6_NDISC_NODETYPE 465 __u8 ndisc_nodetype:2; 466#endif 467 __u8 ooo_okay:1; 468 __u8 l4_rxhash:1; 469 __u8 wifi_acked_valid:1; 470 __u8 wifi_acked:1; 471 __u8 no_fcs:1; 472 /* 9/11 bit hole (depending on ndisc_nodetype presence) */ 473 kmemcheck_bitfield_end(flags2); 474 475#ifdef CONFIG_NET_DMA 476 dma_cookie_t dma_cookie; 477#endif 478#ifdef CONFIG_NETWORK_SECMARK 479 __u32 secmark; 480#endif 481 union { 482 __u32 mark; 483 __u32 dropcount; 484 __u32 avail_size; 485 }; 486 487 sk_buff_data_t transport_header; 488 sk_buff_data_t network_header; 489 sk_buff_data_t mac_header; 490 /* These elements must be at the end, see alloc_skb() for details. */ 491 sk_buff_data_t tail; 492 sk_buff_data_t end; 493 unsigned char *head, 494 *data; 495 unsigned int truesize; 496 atomic_t users; 497}; 498 499#ifdef __KERNEL__ 500/* 501 * Handling routines are only of interest to the kernel 502 */ 503#include <linux/slab.h> 504 505 506/* 507 * skb might have a dst pointer attached, refcounted or not. 508 * _skb_refdst low order bit is set if refcount was _not_ taken 509 */ 510#define SKB_DST_NOREF 1UL 511#define SKB_DST_PTRMASK ~(SKB_DST_NOREF) 512 513/** 514 * skb_dst - returns skb dst_entry 515 * @skb: buffer 516 * 517 * Returns skb dst_entry, regardless of reference taken or not. 518 */ 519static inline struct dst_entry *skb_dst(const struct sk_buff *skb) 520{ 521 /* If refdst was not refcounted, check we still are in a 522 * rcu_read_lock section 523 */ 524 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && 525 !rcu_read_lock_held() && 526 !rcu_read_lock_bh_held()); 527 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); 528} 529 530/** 531 * skb_dst_set - sets skb dst 532 * @skb: buffer 533 * @dst: dst entry 534 * 535 * Sets skb dst, assuming a reference was taken on dst and should 536 * be released by skb_dst_drop() 537 */ 538static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) 539{ 540 skb->_skb_refdst = (unsigned long)dst; 541} 542 543extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst); 544 545/** 546 * skb_dst_is_noref - Test if skb dst isn't refcounted 547 * @skb: buffer 548 */ 549static inline bool skb_dst_is_noref(const struct sk_buff *skb) 550{ 551 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); 552} 553 554static inline struct rtable *skb_rtable(const struct sk_buff *skb) 555{ 556 return (struct rtable *)skb_dst(skb); 557} 558 559extern void kfree_skb(struct sk_buff *skb); 560extern void consume_skb(struct sk_buff *skb); 561extern void __kfree_skb(struct sk_buff *skb); 562extern struct sk_buff *__alloc_skb(unsigned int size, 563 gfp_t priority, int fclone, int node); 564extern struct sk_buff *build_skb(void *data); 565static inline struct sk_buff *alloc_skb(unsigned int size, 566 gfp_t priority) 567{ 568 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); 569} 570 571static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 572 gfp_t priority) 573{ 574 return __alloc_skb(size, priority, 1, NUMA_NO_NODE); 575} 576 577extern void skb_recycle(struct sk_buff *skb); 578extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 579 580extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 581extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 582extern struct sk_buff *skb_clone(struct sk_buff *skb, 583 gfp_t priority); 584extern struct sk_buff *skb_copy(const struct sk_buff *skb, 585 gfp_t priority); 586extern struct sk_buff *__pskb_copy(struct sk_buff *skb, 587 int headroom, gfp_t gfp_mask); 588 589extern int pskb_expand_head(struct sk_buff *skb, 590 int nhead, int ntail, 591 gfp_t gfp_mask); 592extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 593 unsigned int headroom); 594extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 595 int newheadroom, int newtailroom, 596 gfp_t priority); 597extern int skb_to_sgvec(struct sk_buff *skb, 598 struct scatterlist *sg, int offset, 599 int len); 600extern int skb_cow_data(struct sk_buff *skb, int tailbits, 601 struct sk_buff **trailer); 602extern int skb_pad(struct sk_buff *skb, int pad); 603#define dev_kfree_skb(a) consume_skb(a) 604 605extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 606 int getfrag(void *from, char *to, int offset, 607 int len,int odd, struct sk_buff *skb), 608 void *from, int length); 609 610struct skb_seq_state { 611 __u32 lower_offset; 612 __u32 upper_offset; 613 __u32 frag_idx; 614 __u32 stepped_offset; 615 struct sk_buff *root_skb; 616 struct sk_buff *cur_skb; 617 __u8 *frag_data; 618}; 619 620extern void skb_prepare_seq_read(struct sk_buff *skb, 621 unsigned int from, unsigned int to, 622 struct skb_seq_state *st); 623extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 624 struct skb_seq_state *st); 625extern void skb_abort_seq_read(struct skb_seq_state *st); 626 627extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 628 unsigned int to, struct ts_config *config, 629 struct ts_state *state); 630 631extern void __skb_get_rxhash(struct sk_buff *skb); 632static inline __u32 skb_get_rxhash(struct sk_buff *skb) 633{ 634 if (!skb->rxhash) 635 __skb_get_rxhash(skb); 636 637 return skb->rxhash; 638} 639 640#ifdef NET_SKBUFF_DATA_USES_OFFSET 641static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 642{ 643 return skb->head + skb->end; 644} 645#else 646static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 647{ 648 return skb->end; 649} 650#endif 651 652/* Internal */ 653#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) 654 655static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) 656{ 657 return &skb_shinfo(skb)->hwtstamps; 658} 659 660/** 661 * skb_queue_empty - check if a queue is empty 662 * @list: queue head 663 * 664 * Returns true if the queue is empty, false otherwise. 665 */ 666static inline int skb_queue_empty(const struct sk_buff_head *list) 667{ 668 return list->next == (struct sk_buff *)list; 669} 670 671/** 672 * skb_queue_is_last - check if skb is the last entry in the queue 673 * @list: queue head 674 * @skb: buffer 675 * 676 * Returns true if @skb is the last buffer on the list. 677 */ 678static inline bool skb_queue_is_last(const struct sk_buff_head *list, 679 const struct sk_buff *skb) 680{ 681 return skb->next == (struct sk_buff *)list; 682} 683 684/** 685 * skb_queue_is_first - check if skb is the first entry in the queue 686 * @list: queue head 687 * @skb: buffer 688 * 689 * Returns true if @skb is the first buffer on the list. 690 */ 691static inline bool skb_queue_is_first(const struct sk_buff_head *list, 692 const struct sk_buff *skb) 693{ 694 return skb->prev == (struct sk_buff *)list; 695} 696 697/** 698 * skb_queue_next - return the next packet in the queue 699 * @list: queue head 700 * @skb: current buffer 701 * 702 * Return the next packet in @list after @skb. It is only valid to 703 * call this if skb_queue_is_last() evaluates to false. 704 */ 705static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, 706 const struct sk_buff *skb) 707{ 708 /* This BUG_ON may seem severe, but if we just return then we 709 * are going to dereference garbage. 710 */ 711 BUG_ON(skb_queue_is_last(list, skb)); 712 return skb->next; 713} 714 715/** 716 * skb_queue_prev - return the prev packet in the queue 717 * @list: queue head 718 * @skb: current buffer 719 * 720 * Return the prev packet in @list before @skb. It is only valid to 721 * call this if skb_queue_is_first() evaluates to false. 722 */ 723static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, 724 const struct sk_buff *skb) 725{ 726 /* This BUG_ON may seem severe, but if we just return then we 727 * are going to dereference garbage. 728 */ 729 BUG_ON(skb_queue_is_first(list, skb)); 730 return skb->prev; 731} 732 733/** 734 * skb_get - reference buffer 735 * @skb: buffer to reference 736 * 737 * Makes another reference to a socket buffer and returns a pointer 738 * to the buffer. 739 */ 740static inline struct sk_buff *skb_get(struct sk_buff *skb) 741{ 742 atomic_inc(&skb->users); 743 return skb; 744} 745 746/* 747 * If users == 1, we are the only owner and are can avoid redundant 748 * atomic change. 749 */ 750 751/** 752 * skb_cloned - is the buffer a clone 753 * @skb: buffer to check 754 * 755 * Returns true if the buffer was generated with skb_clone() and is 756 * one of multiple shared copies of the buffer. Cloned buffers are 757 * shared data so must not be written to under normal circumstances. 758 */ 759static inline int skb_cloned(const struct sk_buff *skb) 760{ 761 return skb->cloned && 762 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 763} 764 765/** 766 * skb_header_cloned - is the header a clone 767 * @skb: buffer to check 768 * 769 * Returns true if modifying the header part of the buffer requires 770 * the data to be copied. 771 */ 772static inline int skb_header_cloned(const struct sk_buff *skb) 773{ 774 int dataref; 775 776 if (!skb->cloned) 777 return 0; 778 779 dataref = atomic_read(&skb_shinfo(skb)->dataref); 780 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 781 return dataref != 1; 782} 783 784/** 785 * skb_header_release - release reference to header 786 * @skb: buffer to operate on 787 * 788 * Drop a reference to the header part of the buffer. This is done 789 * by acquiring a payload reference. You must not read from the header 790 * part of skb->data after this. 791 */ 792static inline void skb_header_release(struct sk_buff *skb) 793{ 794 BUG_ON(skb->nohdr); 795 skb->nohdr = 1; 796 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 797} 798 799/** 800 * skb_shared - is the buffer shared 801 * @skb: buffer to check 802 * 803 * Returns true if more than one person has a reference to this 804 * buffer. 805 */ 806static inline int skb_shared(const struct sk_buff *skb) 807{ 808 return atomic_read(&skb->users) != 1; 809} 810 811/** 812 * skb_share_check - check if buffer is shared and if so clone it 813 * @skb: buffer to check 814 * @pri: priority for memory allocation 815 * 816 * If the buffer is shared the buffer is cloned and the old copy 817 * drops a reference. A new clone with a single reference is returned. 818 * If the buffer is not shared the original buffer is returned. When 819 * being called from interrupt status or with spinlocks held pri must 820 * be GFP_ATOMIC. 821 * 822 * NULL is returned on a memory allocation failure. 823 */ 824static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 825 gfp_t pri) 826{ 827 might_sleep_if(pri & __GFP_WAIT); 828 if (skb_shared(skb)) { 829 struct sk_buff *nskb = skb_clone(skb, pri); 830 kfree_skb(skb); 831 skb = nskb; 832 } 833 return skb; 834} 835 836/* 837 * Copy shared buffers into a new sk_buff. We effectively do COW on 838 * packets to handle cases where we have a local reader and forward 839 * and a couple of other messy ones. The normal one is tcpdumping 840 * a packet thats being forwarded. 841 */ 842 843/** 844 * skb_unshare - make a copy of a shared buffer 845 * @skb: buffer to check 846 * @pri: priority for memory allocation 847 * 848 * If the socket buffer is a clone then this function creates a new 849 * copy of the data, drops a reference count on the old copy and returns 850 * the new copy with the reference count at 1. If the buffer is not a clone 851 * the original buffer is returned. When called with a spinlock held or 852 * from interrupt state @pri must be %GFP_ATOMIC 853 * 854 * %NULL is returned on a memory allocation failure. 855 */ 856static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 857 gfp_t pri) 858{ 859 might_sleep_if(pri & __GFP_WAIT); 860 if (skb_cloned(skb)) { 861 struct sk_buff *nskb = skb_copy(skb, pri); 862 kfree_skb(skb); /* Free our shared copy */ 863 skb = nskb; 864 } 865 return skb; 866} 867 868/** 869 * skb_peek - peek at the head of an &sk_buff_head 870 * @list_: list to peek at 871 * 872 * Peek an &sk_buff. Unlike most other operations you _MUST_ 873 * be careful with this one. A peek leaves the buffer on the 874 * list and someone else may run off with it. You must hold 875 * the appropriate locks or have a private queue to do this. 876 * 877 * Returns %NULL for an empty list or a pointer to the head element. 878 * The reference count is not incremented and the reference is therefore 879 * volatile. Use with caution. 880 */ 881static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) 882{ 883 struct sk_buff *list = ((const struct sk_buff *)list_)->next; 884 if (list == (struct sk_buff *)list_) 885 list = NULL; 886 return list; 887} 888 889/** 890 * skb_peek_next - peek skb following the given one from a queue 891 * @skb: skb to start from 892 * @list_: list to peek at 893 * 894 * Returns %NULL when the end of the list is met or a pointer to the 895 * next element. The reference count is not incremented and the 896 * reference is therefore volatile. Use with caution. 897 */ 898static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, 899 const struct sk_buff_head *list_) 900{ 901 struct sk_buff *next = skb->next; 902 if (next == (struct sk_buff *)list_) 903 next = NULL; 904 return next; 905} 906 907/** 908 * skb_peek_tail - peek at the tail of an &sk_buff_head 909 * @list_: list to peek at 910 * 911 * Peek an &sk_buff. Unlike most other operations you _MUST_ 912 * be careful with this one. A peek leaves the buffer on the 913 * list and someone else may run off with it. You must hold 914 * the appropriate locks or have a private queue to do this. 915 * 916 * Returns %NULL for an empty list or a pointer to the tail element. 917 * The reference count is not incremented and the reference is therefore 918 * volatile. Use with caution. 919 */ 920static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) 921{ 922 struct sk_buff *list = ((const struct sk_buff *)list_)->prev; 923 if (list == (struct sk_buff *)list_) 924 list = NULL; 925 return list; 926} 927 928/** 929 * skb_queue_len - get queue length 930 * @list_: list to measure 931 * 932 * Return the length of an &sk_buff queue. 933 */ 934static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 935{ 936 return list_->qlen; 937} 938 939/** 940 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 941 * @list: queue to initialize 942 * 943 * This initializes only the list and queue length aspects of 944 * an sk_buff_head object. This allows to initialize the list 945 * aspects of an sk_buff_head without reinitializing things like 946 * the spinlock. It can also be used for on-stack sk_buff_head 947 * objects where the spinlock is known to not be used. 948 */ 949static inline void __skb_queue_head_init(struct sk_buff_head *list) 950{ 951 list->prev = list->next = (struct sk_buff *)list; 952 list->qlen = 0; 953} 954 955/* 956 * This function creates a split out lock class for each invocation; 957 * this is needed for now since a whole lot of users of the skb-queue 958 * infrastructure in drivers have different locking usage (in hardirq) 959 * than the networking core (in softirq only). In the long run either the 960 * network layer or drivers should need annotation to consolidate the 961 * main types of usage into 3 classes. 962 */ 963static inline void skb_queue_head_init(struct sk_buff_head *list) 964{ 965 spin_lock_init(&list->lock); 966 __skb_queue_head_init(list); 967} 968 969static inline void skb_queue_head_init_class(struct sk_buff_head *list, 970 struct lock_class_key *class) 971{ 972 skb_queue_head_init(list); 973 lockdep_set_class(&list->lock, class); 974} 975 976/* 977 * Insert an sk_buff on a list. 978 * 979 * The "__skb_xxxx()" functions are the non-atomic ones that 980 * can only be called with interrupts disabled. 981 */ 982extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 983static inline void __skb_insert(struct sk_buff *newsk, 984 struct sk_buff *prev, struct sk_buff *next, 985 struct sk_buff_head *list) 986{ 987 newsk->next = next; 988 newsk->prev = prev; 989 next->prev = prev->next = newsk; 990 list->qlen++; 991} 992 993static inline void __skb_queue_splice(const struct sk_buff_head *list, 994 struct sk_buff *prev, 995 struct sk_buff *next) 996{ 997 struct sk_buff *first = list->next; 998 struct sk_buff *last = list->prev; 999 1000 first->prev = prev; 1001 prev->next = first; 1002 1003 last->next = next; 1004 next->prev = last; 1005} 1006 1007/** 1008 * skb_queue_splice - join two skb lists, this is designed for stacks 1009 * @list: the new list to add 1010 * @head: the place to add it in the first list 1011 */ 1012static inline void skb_queue_splice(const struct sk_buff_head *list, 1013 struct sk_buff_head *head) 1014{ 1015 if (!skb_queue_empty(list)) { 1016 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 1017 head->qlen += list->qlen; 1018 } 1019} 1020 1021/** 1022 * skb_queue_splice - join two skb lists and reinitialise the emptied list 1023 * @list: the new list to add 1024 * @head: the place to add it in the first list 1025 * 1026 * The list at @list is reinitialised 1027 */ 1028static inline void skb_queue_splice_init(struct sk_buff_head *list, 1029 struct sk_buff_head *head) 1030{ 1031 if (!skb_queue_empty(list)) { 1032 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 1033 head->qlen += list->qlen; 1034 __skb_queue_head_init(list); 1035 } 1036} 1037 1038/** 1039 * skb_queue_splice_tail - join two skb lists, each list being a queue 1040 * @list: the new list to add 1041 * @head: the place to add it in the first list 1042 */ 1043static inline void skb_queue_splice_tail(const struct sk_buff_head *list, 1044 struct sk_buff_head *head) 1045{ 1046 if (!skb_queue_empty(list)) { 1047 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 1048 head->qlen += list->qlen; 1049 } 1050} 1051 1052/** 1053 * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list 1054 * @list: the new list to add 1055 * @head: the place to add it in the first list 1056 * 1057 * Each of the lists is a queue. 1058 * The list at @list is reinitialised 1059 */ 1060static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, 1061 struct sk_buff_head *head) 1062{ 1063 if (!skb_queue_empty(list)) { 1064 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 1065 head->qlen += list->qlen; 1066 __skb_queue_head_init(list); 1067 } 1068} 1069 1070/** 1071 * __skb_queue_after - queue a buffer at the list head 1072 * @list: list to use 1073 * @prev: place after this buffer 1074 * @newsk: buffer to queue 1075 * 1076 * Queue a buffer int the middle of a list. This function takes no locks 1077 * and you must therefore hold required locks before calling it. 1078 * 1079 * A buffer cannot be placed on two lists at the same time. 1080 */ 1081static inline void __skb_queue_after(struct sk_buff_head *list, 1082 struct sk_buff *prev, 1083 struct sk_buff *newsk) 1084{ 1085 __skb_insert(newsk, prev, prev->next, list); 1086} 1087 1088extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, 1089 struct sk_buff_head *list); 1090 1091static inline void __skb_queue_before(struct sk_buff_head *list, 1092 struct sk_buff *next, 1093 struct sk_buff *newsk) 1094{ 1095 __skb_insert(newsk, next->prev, next, list); 1096} 1097 1098/** 1099 * __skb_queue_head - queue a buffer at the list head 1100 * @list: list to use 1101 * @newsk: buffer to queue 1102 * 1103 * Queue a buffer at the start of a list. This function takes no locks 1104 * and you must therefore hold required locks before calling it. 1105 * 1106 * A buffer cannot be placed on two lists at the same time. 1107 */ 1108extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 1109static inline void __skb_queue_head(struct sk_buff_head *list, 1110 struct sk_buff *newsk) 1111{ 1112 __skb_queue_after(list, (struct sk_buff *)list, newsk); 1113} 1114 1115/** 1116 * __skb_queue_tail - queue a buffer at the list tail 1117 * @list: list to use 1118 * @newsk: buffer to queue 1119 * 1120 * Queue a buffer at the end of a list. This function takes no locks 1121 * and you must therefore hold required locks before calling it. 1122 * 1123 * A buffer cannot be placed on two lists at the same time. 1124 */ 1125extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 1126static inline void __skb_queue_tail(struct sk_buff_head *list, 1127 struct sk_buff *newsk) 1128{ 1129 __skb_queue_before(list, (struct sk_buff *)list, newsk); 1130} 1131 1132/* 1133 * remove sk_buff from list. _Must_ be called atomically, and with 1134 * the list known.. 1135 */ 1136extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 1137static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1138{ 1139 struct sk_buff *next, *prev; 1140 1141 list->qlen--; 1142 next = skb->next; 1143 prev = skb->prev; 1144 skb->next = skb->prev = NULL; 1145 next->prev = prev; 1146 prev->next = next; 1147} 1148 1149/** 1150 * __skb_dequeue - remove from the head of the queue 1151 * @list: list to dequeue from 1152 * 1153 * Remove the head of the list. This function does not take any locks 1154 * so must be used with appropriate locks held only. The head item is 1155 * returned or %NULL if the list is empty. 1156 */ 1157extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1158static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 1159{ 1160 struct sk_buff *skb = skb_peek(list); 1161 if (skb) 1162 __skb_unlink(skb, list); 1163 return skb; 1164} 1165 1166/** 1167 * __skb_dequeue_tail - remove from the tail of the queue 1168 * @list: list to dequeue from 1169 * 1170 * Remove the tail of the list. This function does not take any locks 1171 * so must be used with appropriate locks held only. The tail item is 1172 * returned or %NULL if the list is empty. 1173 */ 1174extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1175static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 1176{ 1177 struct sk_buff *skb = skb_peek_tail(list); 1178 if (skb) 1179 __skb_unlink(skb, list); 1180 return skb; 1181} 1182 1183 1184static inline bool skb_is_nonlinear(const struct sk_buff *skb) 1185{ 1186 return skb->data_len; 1187} 1188 1189static inline unsigned int skb_headlen(const struct sk_buff *skb) 1190{ 1191 return skb->len - skb->data_len; 1192} 1193 1194static inline int skb_pagelen(const struct sk_buff *skb) 1195{ 1196 int i, len = 0; 1197 1198 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 1199 len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 1200 return len + skb_headlen(skb); 1201} 1202 1203/** 1204 * __skb_fill_page_desc - initialise a paged fragment in an skb 1205 * @skb: buffer containing fragment to be initialised 1206 * @i: paged fragment index to initialise 1207 * @page: the page to use for this fragment 1208 * @off: the offset to the data with @page 1209 * @size: the length of the data 1210 * 1211 * Initialises the @i'th fragment of @skb to point to &size bytes at 1212 * offset @off within @page. 1213 * 1214 * Does not take any additional reference on the fragment. 1215 */ 1216static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, 1217 struct page *page, int off, int size) 1218{ 1219 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1220 1221 frag->page.p = page; 1222 frag->page_offset = off; 1223 skb_frag_size_set(frag, size); 1224} 1225 1226/** 1227 * skb_fill_page_desc - initialise a paged fragment in an skb 1228 * @skb: buffer containing fragment to be initialised 1229 * @i: paged fragment index to initialise 1230 * @page: the page to use for this fragment 1231 * @off: the offset to the data with @page 1232 * @size: the length of the data 1233 * 1234 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of 1235 * @skb to point to &size bytes at offset @off within @page. In 1236 * addition updates @skb such that @i is the last fragment. 1237 * 1238 * Does not take any additional reference on the fragment. 1239 */ 1240static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 1241 struct page *page, int off, int size) 1242{ 1243 __skb_fill_page_desc(skb, i, page, off, size); 1244 skb_shinfo(skb)->nr_frags = i + 1; 1245} 1246 1247extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, 1248 int off, int size, unsigned int truesize); 1249 1250#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1251#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) 1252#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 1253 1254#ifdef NET_SKBUFF_DATA_USES_OFFSET 1255static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1256{ 1257 return skb->head + skb->tail; 1258} 1259 1260static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1261{ 1262 skb->tail = skb->data - skb->head; 1263} 1264 1265static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1266{ 1267 skb_reset_tail_pointer(skb); 1268 skb->tail += offset; 1269} 1270#else /* NET_SKBUFF_DATA_USES_OFFSET */ 1271static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1272{ 1273 return skb->tail; 1274} 1275 1276static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1277{ 1278 skb->tail = skb->data; 1279} 1280 1281static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1282{ 1283 skb->tail = skb->data + offset; 1284} 1285 1286#endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1287 1288/* 1289 * Add data to an sk_buff 1290 */ 1291extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); 1292static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 1293{ 1294 unsigned char *tmp = skb_tail_pointer(skb); 1295 SKB_LINEAR_ASSERT(skb); 1296 skb->tail += len; 1297 skb->len += len; 1298 return tmp; 1299} 1300 1301extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); 1302static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 1303{ 1304 skb->data -= len; 1305 skb->len += len; 1306 return skb->data; 1307} 1308 1309extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); 1310static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 1311{ 1312 skb->len -= len; 1313 BUG_ON(skb->len < skb->data_len); 1314 return skb->data += len; 1315} 1316 1317static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) 1318{ 1319 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1320} 1321 1322extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1323 1324static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 1325{ 1326 if (len > skb_headlen(skb) && 1327 !__pskb_pull_tail(skb, len - skb_headlen(skb))) 1328 return NULL; 1329 skb->len -= len; 1330 return skb->data += len; 1331} 1332 1333static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 1334{ 1335 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 1336} 1337 1338static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 1339{ 1340 if (likely(len <= skb_headlen(skb))) 1341 return 1; 1342 if (unlikely(len > skb->len)) 1343 return 0; 1344 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; 1345} 1346 1347/** 1348 * skb_headroom - bytes at buffer head 1349 * @skb: buffer to check 1350 * 1351 * Return the number of bytes of free space at the head of an &sk_buff. 1352 */ 1353static inline unsigned int skb_headroom(const struct sk_buff *skb) 1354{ 1355 return skb->data - skb->head; 1356} 1357 1358/** 1359 * skb_tailroom - bytes at buffer end 1360 * @skb: buffer to check 1361 * 1362 * Return the number of bytes of free space at the tail of an sk_buff 1363 */ 1364static inline int skb_tailroom(const struct sk_buff *skb) 1365{ 1366 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 1367} 1368 1369/** 1370 * skb_availroom - bytes at buffer end 1371 * @skb: buffer to check 1372 * 1373 * Return the number of bytes of free space at the tail of an sk_buff 1374 * allocated by sk_stream_alloc() 1375 */ 1376static inline int skb_availroom(const struct sk_buff *skb) 1377{ 1378 return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; 1379} 1380 1381/** 1382 * skb_reserve - adjust headroom 1383 * @skb: buffer to alter 1384 * @len: bytes to move 1385 * 1386 * Increase the headroom of an empty &sk_buff by reducing the tail 1387 * room. This is only allowed for an empty buffer. 1388 */ 1389static inline void skb_reserve(struct sk_buff *skb, int len) 1390{ 1391 skb->data += len; 1392 skb->tail += len; 1393} 1394 1395static inline void skb_reset_mac_len(struct sk_buff *skb) 1396{ 1397 skb->mac_len = skb->network_header - skb->mac_header; 1398} 1399 1400#ifdef NET_SKBUFF_DATA_USES_OFFSET 1401static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1402{ 1403 return skb->head + skb->transport_header; 1404} 1405 1406static inline void skb_reset_transport_header(struct sk_buff *skb) 1407{ 1408 skb->transport_header = skb->data - skb->head; 1409} 1410 1411static inline void skb_set_transport_header(struct sk_buff *skb, 1412 const int offset) 1413{ 1414 skb_reset_transport_header(skb); 1415 skb->transport_header += offset; 1416} 1417 1418static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1419{ 1420 return skb->head + skb->network_header; 1421} 1422 1423static inline void skb_reset_network_header(struct sk_buff *skb) 1424{ 1425 skb->network_header = skb->data - skb->head; 1426} 1427 1428static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1429{ 1430 skb_reset_network_header(skb); 1431 skb->network_header += offset; 1432} 1433 1434static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1435{ 1436 return skb->head + skb->mac_header; 1437} 1438 1439static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1440{ 1441 return skb->mac_header != ~0U; 1442} 1443 1444static inline void skb_reset_mac_header(struct sk_buff *skb) 1445{ 1446 skb->mac_header = skb->data - skb->head; 1447} 1448 1449static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1450{ 1451 skb_reset_mac_header(skb); 1452 skb->mac_header += offset; 1453} 1454 1455#else /* NET_SKBUFF_DATA_USES_OFFSET */ 1456 1457static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1458{ 1459 return skb->transport_header; 1460} 1461 1462static inline void skb_reset_transport_header(struct sk_buff *skb) 1463{ 1464 skb->transport_header = skb->data; 1465} 1466 1467static inline void skb_set_transport_header(struct sk_buff *skb, 1468 const int offset) 1469{ 1470 skb->transport_header = skb->data + offset; 1471} 1472 1473static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1474{ 1475 return skb->network_header; 1476} 1477 1478static inline void skb_reset_network_header(struct sk_buff *skb) 1479{ 1480 skb->network_header = skb->data; 1481} 1482 1483static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1484{ 1485 skb->network_header = skb->data + offset; 1486} 1487 1488static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1489{ 1490 return skb->mac_header; 1491} 1492 1493static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1494{ 1495 return skb->mac_header != NULL; 1496} 1497 1498static inline void skb_reset_mac_header(struct sk_buff *skb) 1499{ 1500 skb->mac_header = skb->data; 1501} 1502 1503static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1504{ 1505 skb->mac_header = skb->data + offset; 1506} 1507#endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1508 1509static inline void skb_mac_header_rebuild(struct sk_buff *skb) 1510{ 1511 if (skb_mac_header_was_set(skb)) { 1512 const unsigned char *old_mac = skb_mac_header(skb); 1513 1514 skb_set_mac_header(skb, -skb->mac_len); 1515 memmove(skb_mac_header(skb), old_mac, skb->mac_len); 1516 } 1517} 1518 1519static inline int skb_checksum_start_offset(const struct sk_buff *skb) 1520{ 1521 return skb->csum_start - skb_headroom(skb); 1522} 1523 1524static inline int skb_transport_offset(const struct sk_buff *skb) 1525{ 1526 return skb_transport_header(skb) - skb->data; 1527} 1528 1529static inline u32 skb_network_header_len(const struct sk_buff *skb) 1530{ 1531 return skb->transport_header - skb->network_header; 1532} 1533 1534static inline int skb_network_offset(const struct sk_buff *skb) 1535{ 1536 return skb_network_header(skb) - skb->data; 1537} 1538 1539static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) 1540{ 1541 return pskb_may_pull(skb, skb_network_offset(skb) + len); 1542} 1543 1544/* 1545 * CPUs often take a performance hit when accessing unaligned memory 1546 * locations. The actual performance hit varies, it can be small if the 1547 * hardware handles it or large if we have to take an exception and fix it 1548 * in software. 1549 * 1550 * Since an ethernet header is 14 bytes network drivers often end up with 1551 * the IP header at an unaligned offset. The IP header can be aligned by 1552 * shifting the start of the packet by 2 bytes. Drivers should do this 1553 * with: 1554 * 1555 * skb_reserve(skb, NET_IP_ALIGN); 1556 * 1557 * The downside to this alignment of the IP header is that the DMA is now 1558 * unaligned. On some architectures the cost of an unaligned DMA is high 1559 * and this cost outweighs the gains made by aligning the IP header. 1560 * 1561 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 1562 * to be overridden. 1563 */ 1564#ifndef NET_IP_ALIGN 1565#define NET_IP_ALIGN 2 1566#endif 1567 1568/* 1569 * The networking layer reserves some headroom in skb data (via 1570 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 1571 * the header has to grow. In the default case, if the header has to grow 1572 * 32 bytes or less we avoid the reallocation. 1573 * 1574 * Unfortunately this headroom changes the DMA alignment of the resulting 1575 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 1576 * on some architectures. An architecture can override this value, 1577 * perhaps setting it to a cacheline in size (since that will maintain 1578 * cacheline alignment of the DMA). It must be a power of 2. 1579 * 1580 * Various parts of the networking layer expect at least 32 bytes of 1581 * headroom, you should not reduce this. 1582 * 1583 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) 1584 * to reduce average number of cache lines per packet. 1585 * get_rps_cpus() for example only access one 64 bytes aligned block : 1586 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) 1587 */ 1588#ifndef NET_SKB_PAD 1589#define NET_SKB_PAD max(32, L1_CACHE_BYTES) 1590#endif 1591 1592extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1593 1594static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1595{ 1596 if (unlikely(skb_is_nonlinear(skb))) { 1597 WARN_ON(1); 1598 return; 1599 } 1600 skb->len = len; 1601 skb_set_tail_pointer(skb, len); 1602} 1603 1604extern void skb_trim(struct sk_buff *skb, unsigned int len); 1605 1606static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1607{ 1608 if (skb->data_len) 1609 return ___pskb_trim(skb, len); 1610 __skb_trim(skb, len); 1611 return 0; 1612} 1613 1614static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 1615{ 1616 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 1617} 1618 1619/** 1620 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 1621 * @skb: buffer to alter 1622 * @len: new length 1623 * 1624 * This is identical to pskb_trim except that the caller knows that 1625 * the skb is not cloned so we should never get an error due to out- 1626 * of-memory. 1627 */ 1628static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 1629{ 1630 int err = pskb_trim(skb, len); 1631 BUG_ON(err); 1632} 1633 1634/** 1635 * skb_orphan - orphan a buffer 1636 * @skb: buffer to orphan 1637 * 1638 * If a buffer currently has an owner then we call the owner's 1639 * destructor function and make the @skb unowned. The buffer continues 1640 * to exist but is no longer charged to its former owner. 1641 */ 1642static inline void skb_orphan(struct sk_buff *skb) 1643{ 1644 if (skb->destructor) 1645 skb->destructor(skb); 1646 skb->destructor = NULL; 1647 skb->sk = NULL; 1648} 1649 1650/** 1651 * __skb_queue_purge - empty a list 1652 * @list: list to empty 1653 * 1654 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1655 * the list and one reference dropped. This function does not take the 1656 * list lock and the caller must hold the relevant locks to use it. 1657 */ 1658extern void skb_queue_purge(struct sk_buff_head *list); 1659static inline void __skb_queue_purge(struct sk_buff_head *list) 1660{ 1661 struct sk_buff *skb; 1662 while ((skb = __skb_dequeue(list)) != NULL) 1663 kfree_skb(skb); 1664} 1665 1666/** 1667 * __dev_alloc_skb - allocate an skbuff for receiving 1668 * @length: length to allocate 1669 * @gfp_mask: get_free_pages mask, passed to alloc_skb 1670 * 1671 * Allocate a new &sk_buff and assign it a usage count of one. The 1672 * buffer has unspecified headroom built in. Users should allocate 1673 * the headroom they think they need without accounting for the 1674 * built in space. The built in space is used for optimisations. 1675 * 1676 * %NULL is returned if there is no free memory. 1677 */ 1678static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1679 gfp_t gfp_mask) 1680{ 1681 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); 1682 if (likely(skb)) 1683 skb_reserve(skb, NET_SKB_PAD); 1684 return skb; 1685} 1686 1687extern struct sk_buff *dev_alloc_skb(unsigned int length); 1688 1689extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1690 unsigned int length, gfp_t gfp_mask); 1691 1692/** 1693 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1694 * @dev: network device to receive on 1695 * @length: length to allocate 1696 * 1697 * Allocate a new &sk_buff and assign it a usage count of one. The 1698 * buffer has unspecified headroom built in. Users should allocate 1699 * the headroom they think they need without accounting for the 1700 * built in space. The built in space is used for optimisations. 1701 * 1702 * %NULL is returned if there is no free memory. Although this function 1703 * allocates memory it can be called from an interrupt. 1704 */ 1705static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1706 unsigned int length) 1707{ 1708 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1709} 1710 1711static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, 1712 unsigned int length, gfp_t gfp) 1713{ 1714 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); 1715 1716 if (NET_IP_ALIGN && skb) 1717 skb_reserve(skb, NET_IP_ALIGN); 1718 return skb; 1719} 1720 1721static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, 1722 unsigned int length) 1723{ 1724 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); 1725} 1726 1727/** 1728 * skb_frag_page - retrieve the page refered to by a paged fragment 1729 * @frag: the paged fragment 1730 * 1731 * Returns the &struct page associated with @frag. 1732 */ 1733static inline struct page *skb_frag_page(const skb_frag_t *frag) 1734{ 1735 return frag->page.p; 1736} 1737 1738/** 1739 * __skb_frag_ref - take an addition reference on a paged fragment. 1740 * @frag: the paged fragment 1741 * 1742 * Takes an additional reference on the paged fragment @frag. 1743 */ 1744static inline void __skb_frag_ref(skb_frag_t *frag) 1745{ 1746 get_page(skb_frag_page(frag)); 1747} 1748 1749/** 1750 * skb_frag_ref - take an addition reference on a paged fragment of an skb. 1751 * @skb: the buffer 1752 * @f: the fragment offset. 1753 * 1754 * Takes an additional reference on the @f'th paged fragment of @skb. 1755 */ 1756static inline void skb_frag_ref(struct sk_buff *skb, int f) 1757{ 1758 __skb_frag_ref(&skb_shinfo(skb)->frags[f]); 1759} 1760 1761/** 1762 * __skb_frag_unref - release a reference on a paged fragment. 1763 * @frag: the paged fragment 1764 * 1765 * Releases a reference on the paged fragment @frag. 1766 */ 1767static inline void __skb_frag_unref(skb_frag_t *frag) 1768{ 1769 put_page(skb_frag_page(frag)); 1770} 1771 1772/** 1773 * skb_frag_unref - release a reference on a paged fragment of an skb. 1774 * @skb: the buffer 1775 * @f: the fragment offset 1776 * 1777 * Releases a reference on the @f'th paged fragment of @skb. 1778 */ 1779static inline void skb_frag_unref(struct sk_buff *skb, int f) 1780{ 1781 __skb_frag_unref(&skb_shinfo(skb)->frags[f]); 1782} 1783 1784/** 1785 * skb_frag_address - gets the address of the data contained in a paged fragment 1786 * @frag: the paged fragment buffer 1787 * 1788 * Returns the address of the data within @frag. The page must already 1789 * be mapped. 1790 */ 1791static inline void *skb_frag_address(const skb_frag_t *frag) 1792{ 1793 return page_address(skb_frag_page(frag)) + frag->page_offset; 1794} 1795 1796/** 1797 * skb_frag_address_safe - gets the address of the data contained in a paged fragment 1798 * @frag: the paged fragment buffer 1799 * 1800 * Returns the address of the data within @frag. Checks that the page 1801 * is mapped and returns %NULL otherwise. 1802 */ 1803static inline void *skb_frag_address_safe(const skb_frag_t *frag) 1804{ 1805 void *ptr = page_address(skb_frag_page(frag)); 1806 if (unlikely(!ptr)) 1807 return NULL; 1808 1809 return ptr + frag->page_offset; 1810} 1811 1812/** 1813 * __skb_frag_set_page - sets the page contained in a paged fragment 1814 * @frag: the paged fragment 1815 * @page: the page to set 1816 * 1817 * Sets the fragment @frag to contain @page. 1818 */ 1819static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) 1820{ 1821 frag->page.p = page; 1822} 1823 1824/** 1825 * skb_frag_set_page - sets the page contained in a paged fragment of an skb 1826 * @skb: the buffer 1827 * @f: the fragment offset 1828 * @page: the page to set 1829 * 1830 * Sets the @f'th fragment of @skb to contain @page. 1831 */ 1832static inline void skb_frag_set_page(struct sk_buff *skb, int f, 1833 struct page *page) 1834{ 1835 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); 1836} 1837 1838/** 1839 * skb_frag_dma_map - maps a paged fragment via the DMA API 1840 * @dev: the device to map the fragment to 1841 * @frag: the paged fragment to map 1842 * @offset: the offset within the fragment (starting at the 1843 * fragment's own offset) 1844 * @size: the number of bytes to map 1845 * @dir: the direction of the mapping (%PCI_DMA_*) 1846 * 1847 * Maps the page associated with @frag to @device. 1848 */ 1849static inline dma_addr_t skb_frag_dma_map(struct device *dev, 1850 const skb_frag_t *frag, 1851 size_t offset, size_t size, 1852 enum dma_data_direction dir) 1853{ 1854 return dma_map_page(dev, skb_frag_page(frag), 1855 frag->page_offset + offset, size, dir); 1856} 1857 1858static inline struct sk_buff *pskb_copy(struct sk_buff *skb, 1859 gfp_t gfp_mask) 1860{ 1861 return __pskb_copy(skb, skb_headroom(skb), gfp_mask); 1862} 1863 1864/** 1865 * skb_clone_writable - is the header of a clone writable 1866 * @skb: buffer to check 1867 * @len: length up to which to write 1868 * 1869 * Returns true if modifying the header part of the cloned buffer 1870 * does not requires the data to be copied. 1871 */ 1872static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) 1873{ 1874 return !skb_header_cloned(skb) && 1875 skb_headroom(skb) + len <= skb->hdr_len; 1876} 1877 1878static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, 1879 int cloned) 1880{ 1881 int delta = 0; 1882 1883 if (headroom < NET_SKB_PAD) 1884 headroom = NET_SKB_PAD; 1885 if (headroom > skb_headroom(skb)) 1886 delta = headroom - skb_headroom(skb); 1887 1888 if (delta || cloned) 1889 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, 1890 GFP_ATOMIC); 1891 return 0; 1892} 1893 1894/** 1895 * skb_cow - copy header of skb when it is required 1896 * @skb: buffer to cow 1897 * @headroom: needed headroom 1898 * 1899 * If the skb passed lacks sufficient headroom or its data part 1900 * is shared, data is reallocated. If reallocation fails, an error 1901 * is returned and original skb is not changed. 1902 * 1903 * The result is skb with writable area skb->head...skb->tail 1904 * and at least @headroom of space at head. 1905 */ 1906static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1907{ 1908 return __skb_cow(skb, headroom, skb_cloned(skb)); 1909} 1910 1911/** 1912 * skb_cow_head - skb_cow but only making the head writable 1913 * @skb: buffer to cow 1914 * @headroom: needed headroom 1915 * 1916 * This function is identical to skb_cow except that we replace the 1917 * skb_cloned check by skb_header_cloned. It should be used when 1918 * you only need to push on some header and do not need to modify 1919 * the data. 1920 */ 1921static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) 1922{ 1923 return __skb_cow(skb, headroom, skb_header_cloned(skb)); 1924} 1925 1926/** 1927 * skb_padto - pad an skbuff up to a minimal size 1928 * @skb: buffer to pad 1929 * @len: minimal length 1930 * 1931 * Pads up a buffer to ensure the trailing bytes exist and are 1932 * blanked. If the buffer already contains sufficient data it 1933 * is untouched. Otherwise it is extended. Returns zero on 1934 * success. The skb is freed on error. 1935 */ 1936 1937static inline int skb_padto(struct sk_buff *skb, unsigned int len) 1938{ 1939 unsigned int size = skb->len; 1940 if (likely(size >= len)) 1941 return 0; 1942 return skb_pad(skb, len - size); 1943} 1944 1945static inline int skb_add_data(struct sk_buff *skb, 1946 char __user *from, int copy) 1947{ 1948 const int off = skb->len; 1949 1950 if (skb->ip_summed == CHECKSUM_NONE) { 1951 int err = 0; 1952 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), 1953 copy, 0, &err); 1954 if (!err) { 1955 skb->csum = csum_block_add(skb->csum, csum, off); 1956 return 0; 1957 } 1958 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 1959 return 0; 1960 1961 __skb_trim(skb, off); 1962 return -EFAULT; 1963} 1964 1965static inline int skb_can_coalesce(struct sk_buff *skb, int i, 1966 const struct page *page, int off) 1967{ 1968 if (i) { 1969 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1970 1971 return page == skb_frag_page(frag) && 1972 off == frag->page_offset + skb_frag_size(frag); 1973 } 1974 return 0; 1975} 1976 1977static inline int __skb_linearize(struct sk_buff *skb) 1978{ 1979 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 1980} 1981 1982/** 1983 * skb_linearize - convert paged skb to linear one 1984 * @skb: buffer to linarize 1985 * 1986 * If there is no free memory -ENOMEM is returned, otherwise zero 1987 * is returned and the old skb data released. 1988 */ 1989static inline int skb_linearize(struct sk_buff *skb) 1990{ 1991 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 1992} 1993 1994/** 1995 * skb_linearize_cow - make sure skb is linear and writable 1996 * @skb: buffer to process 1997 * 1998 * If there is no free memory -ENOMEM is returned, otherwise zero 1999 * is returned and the old skb data released. 2000 */ 2001static inline int skb_linearize_cow(struct sk_buff *skb) 2002{ 2003 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 2004 __skb_linearize(skb) : 0; 2005} 2006 2007/** 2008 * skb_postpull_rcsum - update checksum for received skb after pull 2009 * @skb: buffer to update 2010 * @start: start of data before pull 2011 * @len: length of data pulled 2012 * 2013 * After doing a pull on a received packet, you need to call this to 2014 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 2015 * CHECKSUM_NONE so that it can be recomputed from scratch. 2016 */ 2017 2018static inline void skb_postpull_rcsum(struct sk_buff *skb, 2019 const void *start, unsigned int len) 2020{ 2021 if (skb->ip_summed == CHECKSUM_COMPLETE) 2022 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 2023} 2024 2025unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 2026 2027/** 2028 * pskb_trim_rcsum - trim received skb and update checksum 2029 * @skb: buffer to trim 2030 * @len: new length 2031 * 2032 * This is exactly the same as pskb_trim except that it ensures the 2033 * checksum of received packets are still valid after the operation. 2034 */ 2035 2036static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 2037{ 2038 if (likely(len >= skb->len)) 2039 return 0; 2040 if (skb->ip_summed == CHECKSUM_COMPLETE) 2041 skb->ip_summed = CHECKSUM_NONE; 2042 return __pskb_trim(skb, len); 2043} 2044 2045#define skb_queue_walk(queue, skb) \ 2046 for (skb = (queue)->next; \ 2047 skb != (struct sk_buff *)(queue); \ 2048 skb = skb->next) 2049 2050#define skb_queue_walk_safe(queue, skb, tmp) \ 2051 for (skb = (queue)->next, tmp = skb->next; \ 2052 skb != (struct sk_buff *)(queue); \ 2053 skb = tmp, tmp = skb->next) 2054 2055#define skb_queue_walk_from(queue, skb) \ 2056 for (; skb != (struct sk_buff *)(queue); \ 2057 skb = skb->next) 2058 2059#define skb_queue_walk_from_safe(queue, skb, tmp) \ 2060 for (tmp = skb->next; \ 2061 skb != (struct sk_buff *)(queue); \ 2062 skb = tmp, tmp = skb->next) 2063 2064#define skb_queue_reverse_walk(queue, skb) \ 2065 for (skb = (queue)->prev; \ 2066 skb != (struct sk_buff *)(queue); \ 2067 skb = skb->prev) 2068 2069#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ 2070 for (skb = (queue)->prev, tmp = skb->prev; \ 2071 skb != (struct sk_buff *)(queue); \ 2072 skb = tmp, tmp = skb->prev) 2073 2074#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ 2075 for (tmp = skb->prev; \ 2076 skb != (struct sk_buff *)(queue); \ 2077 skb = tmp, tmp = skb->prev) 2078 2079static inline bool skb_has_frag_list(const struct sk_buff *skb) 2080{ 2081 return skb_shinfo(skb)->frag_list != NULL; 2082} 2083 2084static inline void skb_frag_list_init(struct sk_buff *skb) 2085{ 2086 skb_shinfo(skb)->frag_list = NULL; 2087} 2088 2089static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) 2090{ 2091 frag->next = skb_shinfo(skb)->frag_list; 2092 skb_shinfo(skb)->frag_list = frag; 2093} 2094 2095#define skb_walk_frags(skb, iter) \ 2096 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 2097 2098extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 2099 int *peeked, int *off, int *err); 2100extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 2101 int noblock, int *err); 2102extern unsigned int datagram_poll(struct file *file, struct socket *sock, 2103 struct poll_table_struct *wait); 2104extern int skb_copy_datagram_iovec(const struct sk_buff *from, 2105 int offset, struct iovec *to, 2106 int size); 2107extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 2108 int hlen, 2109 struct iovec *iov); 2110extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, 2111 int offset, 2112 const struct iovec *from, 2113 int from_offset, 2114 int len); 2115extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, 2116 int offset, 2117 const struct iovec *to, 2118 int to_offset, 2119 int size); 2120extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 2121extern void skb_free_datagram_locked(struct sock *sk, 2122 struct sk_buff *skb); 2123extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 2124 unsigned int flags); 2125extern __wsum skb_checksum(const struct sk_buff *skb, int offset, 2126 int len, __wsum csum); 2127extern int skb_copy_bits(const struct sk_buff *skb, int offset, 2128 void *to, int len); 2129extern int skb_store_bits(struct sk_buff *skb, int offset, 2130 const void *from, int len); 2131extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, 2132 int offset, u8 *to, int len, 2133 __wsum csum); 2134extern int skb_splice_bits(struct sk_buff *skb, 2135 unsigned int offset, 2136 struct pipe_inode_info *pipe, 2137 unsigned int len, 2138 unsigned int flags); 2139extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 2140extern void skb_split(struct sk_buff *skb, 2141 struct sk_buff *skb1, const u32 len); 2142extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, 2143 int shiftlen); 2144 2145extern struct sk_buff *skb_segment(struct sk_buff *skb, 2146 netdev_features_t features); 2147 2148static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2149 int len, void *buffer) 2150{ 2151 int hlen = skb_headlen(skb); 2152 2153 if (hlen - offset >= len) 2154 return skb->data + offset; 2155 2156 if (skb_copy_bits(skb, offset, buffer, len) < 0) 2157 return NULL; 2158 2159 return buffer; 2160} 2161 2162static inline void skb_copy_from_linear_data(const struct sk_buff *skb, 2163 void *to, 2164 const unsigned int len) 2165{ 2166 memcpy(to, skb->data, len); 2167} 2168 2169static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, 2170 const int offset, void *to, 2171 const unsigned int len) 2172{ 2173 memcpy(to, skb->data + offset, len); 2174} 2175 2176static inline void skb_copy_to_linear_data(struct sk_buff *skb, 2177 const void *from, 2178 const unsigned int len) 2179{ 2180 memcpy(skb->data, from, len); 2181} 2182 2183static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, 2184 const int offset, 2185 const void *from, 2186 const unsigned int len) 2187{ 2188 memcpy(skb->data + offset, from, len); 2189} 2190 2191extern void skb_init(void); 2192 2193static inline ktime_t skb_get_ktime(const struct sk_buff *skb) 2194{ 2195 return skb->tstamp; 2196} 2197 2198/** 2199 * skb_get_timestamp - get timestamp from a skb 2200 * @skb: skb to get stamp from 2201 * @stamp: pointer to struct timeval to store stamp in 2202 * 2203 * Timestamps are stored in the skb as offsets to a base timestamp. 2204 * This function converts the offset back to a struct timeval and stores 2205 * it in stamp. 2206 */ 2207static inline void skb_get_timestamp(const struct sk_buff *skb, 2208 struct timeval *stamp) 2209{ 2210 *stamp = ktime_to_timeval(skb->tstamp); 2211} 2212 2213static inline void skb_get_timestampns(const struct sk_buff *skb, 2214 struct timespec *stamp) 2215{ 2216 *stamp = ktime_to_timespec(skb->tstamp); 2217} 2218 2219static inline void __net_timestamp(struct sk_buff *skb) 2220{ 2221 skb->tstamp = ktime_get_real(); 2222} 2223 2224static inline ktime_t net_timedelta(ktime_t t) 2225{ 2226 return ktime_sub(ktime_get_real(), t); 2227} 2228 2229static inline ktime_t net_invalid_timestamp(void) 2230{ 2231 return ktime_set(0, 0); 2232} 2233 2234extern void skb_timestamping_init(void); 2235 2236#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 2237 2238extern void skb_clone_tx_timestamp(struct sk_buff *skb); 2239extern bool skb_defer_rx_timestamp(struct sk_buff *skb); 2240 2241#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ 2242 2243static inline void skb_clone_tx_timestamp(struct sk_buff *skb) 2244{ 2245} 2246 2247static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) 2248{ 2249 return false; 2250} 2251 2252#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ 2253 2254/** 2255 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps 2256 * 2257 * PHY drivers may accept clones of transmitted packets for 2258 * timestamping via their phy_driver.txtstamp method. These drivers 2259 * must call this function to return the skb back to the stack, with 2260 * or without a timestamp. 2261 * 2262 * @skb: clone of the the original outgoing packet 2263 * @hwtstamps: hardware time stamps, may be NULL if not available 2264 * 2265 */ 2266void skb_complete_tx_timestamp(struct sk_buff *skb, 2267 struct skb_shared_hwtstamps *hwtstamps); 2268 2269/** 2270 * skb_tstamp_tx - queue clone of skb with send time stamps 2271 * @orig_skb: the original outgoing packet 2272 * @hwtstamps: hardware time stamps, may be NULL if not available 2273 * 2274 * If the skb has a socket associated, then this function clones the 2275 * skb (thus sharing the actual data and optional structures), stores 2276 * the optional hardware time stamping information (if non NULL) or 2277 * generates a software time stamp (otherwise), then queues the clone 2278 * to the error queue of the socket. Errors are silently ignored. 2279 */ 2280extern void skb_tstamp_tx(struct sk_buff *orig_skb, 2281 struct skb_shared_hwtstamps *hwtstamps); 2282 2283static inline void sw_tx_timestamp(struct sk_buff *skb) 2284{ 2285 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && 2286 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 2287 skb_tstamp_tx(skb, NULL); 2288} 2289 2290/** 2291 * skb_tx_timestamp() - Driver hook for transmit timestamping 2292 * 2293 * Ethernet MAC Drivers should call this function in their hard_xmit() 2294 * function immediately before giving the sk_buff to the MAC hardware. 2295 * 2296 * @skb: A socket buffer. 2297 */ 2298static inline void skb_tx_timestamp(struct sk_buff *skb) 2299{ 2300 skb_clone_tx_timestamp(skb); 2301 sw_tx_timestamp(skb); 2302} 2303 2304/** 2305 * skb_complete_wifi_ack - deliver skb with wifi status 2306 * 2307 * @skb: the original outgoing packet 2308 * @acked: ack status 2309 * 2310 */ 2311void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); 2312 2313extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 2314extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 2315 2316static inline int skb_csum_unnecessary(const struct sk_buff *skb) 2317{ 2318 return skb->ip_summed & CHECKSUM_UNNECESSARY; 2319} 2320 2321/** 2322 * skb_checksum_complete - Calculate checksum of an entire packet 2323 * @skb: packet to process 2324 * 2325 * This function calculates the checksum over the entire packet plus 2326 * the value of skb->csum. The latter can be used to supply the 2327 * checksum of a pseudo header as used by TCP/UDP. It returns the 2328 * checksum. 2329 * 2330 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 2331 * this function can be used to verify that checksum on received 2332 * packets. In that case the function should return zero if the 2333 * checksum is correct. In particular, this function will return zero 2334 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 2335 * hardware has already verified the correctness of the checksum. 2336 */ 2337static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 2338{ 2339 return skb_csum_unnecessary(skb) ? 2340 0 : __skb_checksum_complete(skb); 2341} 2342 2343#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2344extern void nf_conntrack_destroy(struct nf_conntrack *nfct); 2345static inline void nf_conntrack_put(struct nf_conntrack *nfct) 2346{ 2347 if (nfct && atomic_dec_and_test(&nfct->use)) 2348 nf_conntrack_destroy(nfct); 2349} 2350static inline void nf_conntrack_get(struct nf_conntrack *nfct) 2351{ 2352 if (nfct) 2353 atomic_inc(&nfct->use); 2354} 2355#endif 2356#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2357static inline void nf_conntrack_get_reasm(struct sk_buff *skb) 2358{ 2359 if (skb) 2360 atomic_inc(&skb->users); 2361} 2362static inline void nf_conntrack_put_reasm(struct sk_buff *skb) 2363{ 2364 if (skb) 2365 kfree_skb(skb); 2366} 2367#endif 2368#ifdef CONFIG_BRIDGE_NETFILTER 2369static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 2370{ 2371 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 2372 kfree(nf_bridge); 2373} 2374static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 2375{ 2376 if (nf_bridge) 2377 atomic_inc(&nf_bridge->use); 2378} 2379#endif /* CONFIG_BRIDGE_NETFILTER */ 2380static inline void nf_reset(struct sk_buff *skb) 2381{ 2382#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2383 nf_conntrack_put(skb->nfct); 2384 skb->nfct = NULL; 2385#endif 2386#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2387 nf_conntrack_put_reasm(skb->nfct_reasm); 2388 skb->nfct_reasm = NULL; 2389#endif 2390#ifdef CONFIG_BRIDGE_NETFILTER 2391 nf_bridge_put(skb->nf_bridge); 2392 skb->nf_bridge = NULL; 2393#endif 2394} 2395 2396/* Note: This doesn't put any conntrack and bridge info in dst. */ 2397static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2398{ 2399#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2400 dst->nfct = src->nfct; 2401 nf_conntrack_get(src->nfct); 2402 dst->nfctinfo = src->nfctinfo; 2403#endif 2404#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2405 dst->nfct_reasm = src->nfct_reasm; 2406 nf_conntrack_get_reasm(src->nfct_reasm); 2407#endif 2408#ifdef CONFIG_BRIDGE_NETFILTER 2409 dst->nf_bridge = src->nf_bridge; 2410 nf_bridge_get(src->nf_bridge); 2411#endif 2412} 2413 2414static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2415{ 2416#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2417 nf_conntrack_put(dst->nfct); 2418#endif 2419#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2420 nf_conntrack_put_reasm(dst->nfct_reasm); 2421#endif 2422#ifdef CONFIG_BRIDGE_NETFILTER 2423 nf_bridge_put(dst->nf_bridge); 2424#endif 2425 __nf_copy(dst, src); 2426} 2427 2428#ifdef CONFIG_NETWORK_SECMARK 2429static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2430{ 2431 to->secmark = from->secmark; 2432} 2433 2434static inline void skb_init_secmark(struct sk_buff *skb) 2435{ 2436 skb->secmark = 0; 2437} 2438#else 2439static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2440{ } 2441 2442static inline void skb_init_secmark(struct sk_buff *skb) 2443{ } 2444#endif 2445 2446static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) 2447{ 2448 skb->queue_mapping = queue_mapping; 2449} 2450 2451static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) 2452{ 2453 return skb->queue_mapping; 2454} 2455 2456static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 2457{ 2458 to->queue_mapping = from->queue_mapping; 2459} 2460 2461static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) 2462{ 2463 skb->queue_mapping = rx_queue + 1; 2464} 2465 2466static inline u16 skb_get_rx_queue(const struct sk_buff *skb) 2467{ 2468 return skb->queue_mapping - 1; 2469} 2470 2471static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) 2472{ 2473 return skb->queue_mapping != 0; 2474} 2475 2476extern u16 __skb_tx_hash(const struct net_device *dev, 2477 const struct sk_buff *skb, 2478 unsigned int num_tx_queues); 2479 2480#ifdef CONFIG_XFRM 2481static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2482{ 2483 return skb->sp; 2484} 2485#else 2486static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2487{ 2488 return NULL; 2489} 2490#endif 2491 2492static inline bool skb_is_gso(const struct sk_buff *skb) 2493{ 2494 return skb_shinfo(skb)->gso_size; 2495} 2496 2497static inline bool skb_is_gso_v6(const struct sk_buff *skb) 2498{ 2499 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 2500} 2501 2502extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); 2503 2504static inline bool skb_warn_if_lro(const struct sk_buff *skb) 2505{ 2506 /* LRO sets gso_size but not gso_type, whereas if GSO is really 2507 * wanted then gso_type will be set. */ 2508 const struct skb_shared_info *shinfo = skb_shinfo(skb); 2509 2510 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && 2511 unlikely(shinfo->gso_type == 0)) { 2512 __skb_warn_lro_forwarding(skb); 2513 return true; 2514 } 2515 return false; 2516} 2517 2518static inline void skb_forward_csum(struct sk_buff *skb) 2519{ 2520 /* Unfortunately we don't support this one. Any brave souls? */ 2521 if (skb->ip_summed == CHECKSUM_COMPLETE) 2522 skb->ip_summed = CHECKSUM_NONE; 2523} 2524 2525/** 2526 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE 2527 * @skb: skb to check 2528 * 2529 * fresh skbs have their ip_summed set to CHECKSUM_NONE. 2530 * Instead of forcing ip_summed to CHECKSUM_NONE, we can 2531 * use this helper, to document places where we make this assertion. 2532 */ 2533static inline void skb_checksum_none_assert(const struct sk_buff *skb) 2534{ 2535#ifdef DEBUG 2536 BUG_ON(skb->ip_summed != CHECKSUM_NONE); 2537#endif 2538} 2539 2540bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2541 2542static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size) 2543{ 2544 if (irqs_disabled()) 2545 return false; 2546 2547 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) 2548 return false; 2549 2550 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 2551 return false; 2552 2553 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 2554 if (skb_end_pointer(skb) - skb->head < skb_size) 2555 return false; 2556 2557 if (skb_shared(skb) || skb_cloned(skb)) 2558 return false; 2559 2560 return true; 2561} 2562#endif /* __KERNEL__ */ 2563#endif /* _LINUX_SKBUFF_H */