at v3.5 73 kB view raw
1/* 2 * Definitions for the 'struct sk_buff' memory handlers. 3 * 4 * Authors: 5 * Alan Cox, <gw4pts@gw4pts.ampr.org> 6 * Florian La Roche, <rzsfl@rz.uni-sb.de> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14#ifndef _LINUX_SKBUFF_H 15#define _LINUX_SKBUFF_H 16 17#include <linux/kernel.h> 18#include <linux/kmemcheck.h> 19#include <linux/compiler.h> 20#include <linux/time.h> 21#include <linux/bug.h> 22#include <linux/cache.h> 23 24#include <linux/atomic.h> 25#include <asm/types.h> 26#include <linux/spinlock.h> 27#include <linux/net.h> 28#include <linux/textsearch.h> 29#include <net/checksum.h> 30#include <linux/rcupdate.h> 31#include <linux/dmaengine.h> 32#include <linux/hrtimer.h> 33#include <linux/dma-mapping.h> 34#include <linux/netdev_features.h> 35 36/* Don't change this without changing skb_csum_unnecessary! */ 37#define CHECKSUM_NONE 0 38#define CHECKSUM_UNNECESSARY 1 39#define CHECKSUM_COMPLETE 2 40#define CHECKSUM_PARTIAL 3 41 42#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ 43 ~(SMP_CACHE_BYTES - 1)) 44#define SKB_WITH_OVERHEAD(X) \ 45 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 46#define SKB_MAX_ORDER(X, ORDER) \ 47 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) 48#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) 49#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2)) 50 51/* return minimum truesize of one skb containing X bytes of data */ 52#define SKB_TRUESIZE(X) ((X) + \ 53 SKB_DATA_ALIGN(sizeof(struct sk_buff)) + \ 54 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 55 56/* A. Checksumming of received packets by device. 57 * 58 * NONE: device failed to checksum this packet. 59 * skb->csum is undefined. 60 * 61 * UNNECESSARY: device parsed packet and wouldbe verified checksum. 62 * skb->csum is undefined. 63 * It is bad option, but, unfortunately, many of vendors do this. 64 * Apparently with secret goal to sell you new device, when you 65 * will add new protocol to your host. F.e. IPv6. 8) 66 * 67 * COMPLETE: the most generic way. Device supplied checksum of _all_ 68 * the packet as seen by netif_rx in skb->csum. 69 * NOTE: Even if device supports only some protocols, but 70 * is able to produce some skb->csum, it MUST use COMPLETE, 71 * not UNNECESSARY. 72 * 73 * PARTIAL: identical to the case for output below. This may occur 74 * on a packet received directly from another Linux OS, e.g., 75 * a virtualised Linux kernel on the same host. The packet can 76 * be treated in the same way as UNNECESSARY except that on 77 * output (i.e., forwarding) the checksum must be filled in 78 * by the OS or the hardware. 79 * 80 * B. Checksumming on output. 81 * 82 * NONE: skb is checksummed by protocol or csum is not required. 83 * 84 * PARTIAL: device is required to csum packet as seen by hard_start_xmit 85 * from skb->csum_start to the end and to record the checksum 86 * at skb->csum_start + skb->csum_offset. 87 * 88 * Device must show its capabilities in dev->features, set 89 * at device setup time. 90 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum 91 * everything. 92 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only 93 * TCP/UDP over IPv4. Sigh. Vendors like this 94 * way by an unknown reason. Though, see comment above 95 * about CHECKSUM_UNNECESSARY. 8) 96 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead. 97 * 98 * UNNECESSARY: device will do per protocol specific csum. Protocol drivers 99 * that do not want net to perform the checksum calculation should use 100 * this flag in their outgoing skbs. 101 * NETIF_F_FCOE_CRC this indicates the device can do FCoE FC CRC 102 * offload. Correspondingly, the FCoE protocol driver 103 * stack should use CHECKSUM_UNNECESSARY. 104 * 105 * Any questions? No questions, good. --ANK 106 */ 107 108struct net_device; 109struct scatterlist; 110struct pipe_inode_info; 111 112#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 113struct nf_conntrack { 114 atomic_t use; 115}; 116#endif 117 118#ifdef CONFIG_BRIDGE_NETFILTER 119struct nf_bridge_info { 120 atomic_t use; 121 unsigned int mask; 122 struct net_device *physindev; 123 struct net_device *physoutdev; 124 unsigned long data[32 / sizeof(unsigned long)]; 125}; 126#endif 127 128struct sk_buff_head { 129 /* These two members must be first. */ 130 struct sk_buff *next; 131 struct sk_buff *prev; 132 133 __u32 qlen; 134 spinlock_t lock; 135}; 136 137struct sk_buff; 138 139/* To allow 64K frame to be packed as single skb without frag_list we 140 * require 64K/PAGE_SIZE pages plus 1 additional page to allow for 141 * buffers which do not start on a page boundary. 142 * 143 * Since GRO uses frags we allocate at least 16 regardless of page 144 * size. 145 */ 146#if (65536/PAGE_SIZE + 1) < 16 147#define MAX_SKB_FRAGS 16UL 148#else 149#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) 150#endif 151 152typedef struct skb_frag_struct skb_frag_t; 153 154struct skb_frag_struct { 155 struct { 156 struct page *p; 157 } page; 158#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) 159 __u32 page_offset; 160 __u32 size; 161#else 162 __u16 page_offset; 163 __u16 size; 164#endif 165}; 166 167static inline unsigned int skb_frag_size(const skb_frag_t *frag) 168{ 169 return frag->size; 170} 171 172static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) 173{ 174 frag->size = size; 175} 176 177static inline void skb_frag_size_add(skb_frag_t *frag, int delta) 178{ 179 frag->size += delta; 180} 181 182static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) 183{ 184 frag->size -= delta; 185} 186 187#define HAVE_HW_TIME_STAMP 188 189/** 190 * struct skb_shared_hwtstamps - hardware time stamps 191 * @hwtstamp: hardware time stamp transformed into duration 192 * since arbitrary point in time 193 * @syststamp: hwtstamp transformed to system time base 194 * 195 * Software time stamps generated by ktime_get_real() are stored in 196 * skb->tstamp. The relation between the different kinds of time 197 * stamps is as follows: 198 * 199 * syststamp and tstamp can be compared against each other in 200 * arbitrary combinations. The accuracy of a 201 * syststamp/tstamp/"syststamp from other device" comparison is 202 * limited by the accuracy of the transformation into system time 203 * base. This depends on the device driver and its underlying 204 * hardware. 205 * 206 * hwtstamps can only be compared against other hwtstamps from 207 * the same device. 208 * 209 * This structure is attached to packets as part of the 210 * &skb_shared_info. Use skb_hwtstamps() to get a pointer. 211 */ 212struct skb_shared_hwtstamps { 213 ktime_t hwtstamp; 214 ktime_t syststamp; 215}; 216 217/* Definitions for tx_flags in struct skb_shared_info */ 218enum { 219 /* generate hardware time stamp */ 220 SKBTX_HW_TSTAMP = 1 << 0, 221 222 /* generate software time stamp */ 223 SKBTX_SW_TSTAMP = 1 << 1, 224 225 /* device driver is going to provide hardware time stamp */ 226 SKBTX_IN_PROGRESS = 1 << 2, 227 228 /* device driver supports TX zero-copy buffers */ 229 SKBTX_DEV_ZEROCOPY = 1 << 3, 230 231 /* generate wifi status information (where possible) */ 232 SKBTX_WIFI_STATUS = 1 << 4, 233}; 234 235/* 236 * The callback notifies userspace to release buffers when skb DMA is done in 237 * lower device, the skb last reference should be 0 when calling this. 238 * The ctx field is used to track device context. 239 * The desc field is used to track userspace buffer index. 240 */ 241struct ubuf_info { 242 void (*callback)(struct ubuf_info *); 243 void *ctx; 244 unsigned long desc; 245}; 246 247/* This data is invariant across clones and lives at 248 * the end of the header data, ie. at skb->end. 249 */ 250struct skb_shared_info { 251 unsigned char nr_frags; 252 __u8 tx_flags; 253 unsigned short gso_size; 254 /* Warning: this field is not always filled in (UFO)! */ 255 unsigned short gso_segs; 256 unsigned short gso_type; 257 struct sk_buff *frag_list; 258 struct skb_shared_hwtstamps hwtstamps; 259 __be32 ip6_frag_id; 260 261 /* 262 * Warning : all fields before dataref are cleared in __alloc_skb() 263 */ 264 atomic_t dataref; 265 266 /* Intermediate layers must ensure that destructor_arg 267 * remains valid until skb destructor */ 268 void * destructor_arg; 269 270 /* must be last field, see pskb_expand_head() */ 271 skb_frag_t frags[MAX_SKB_FRAGS]; 272}; 273 274/* We divide dataref into two halves. The higher 16 bits hold references 275 * to the payload part of skb->data. The lower 16 bits hold references to 276 * the entire skb->data. A clone of a headerless skb holds the length of 277 * the header in skb->hdr_len. 278 * 279 * All users must obey the rule that the skb->data reference count must be 280 * greater than or equal to the payload reference count. 281 * 282 * Holding a reference to the payload part means that the user does not 283 * care about modifications to the header part of skb->data. 284 */ 285#define SKB_DATAREF_SHIFT 16 286#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1) 287 288 289enum { 290 SKB_FCLONE_UNAVAILABLE, 291 SKB_FCLONE_ORIG, 292 SKB_FCLONE_CLONE, 293}; 294 295enum { 296 SKB_GSO_TCPV4 = 1 << 0, 297 SKB_GSO_UDP = 1 << 1, 298 299 /* This indicates the skb is from an untrusted source. */ 300 SKB_GSO_DODGY = 1 << 2, 301 302 /* This indicates the tcp segment has CWR set. */ 303 SKB_GSO_TCP_ECN = 1 << 3, 304 305 SKB_GSO_TCPV6 = 1 << 4, 306 307 SKB_GSO_FCOE = 1 << 5, 308}; 309 310#if BITS_PER_LONG > 32 311#define NET_SKBUFF_DATA_USES_OFFSET 1 312#endif 313 314#ifdef NET_SKBUFF_DATA_USES_OFFSET 315typedef unsigned int sk_buff_data_t; 316#else 317typedef unsigned char *sk_buff_data_t; 318#endif 319 320#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \ 321 defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE) 322#define NET_SKBUFF_NF_DEFRAG_NEEDED 1 323#endif 324 325/** 326 * struct sk_buff - socket buffer 327 * @next: Next buffer in list 328 * @prev: Previous buffer in list 329 * @tstamp: Time we arrived 330 * @sk: Socket we are owned by 331 * @dev: Device we arrived on/are leaving by 332 * @cb: Control buffer. Free for use by every layer. Put private vars here 333 * @_skb_refdst: destination entry (with norefcount bit) 334 * @sp: the security path, used for xfrm 335 * @len: Length of actual data 336 * @data_len: Data length 337 * @mac_len: Length of link layer header 338 * @hdr_len: writable header length of cloned skb 339 * @csum: Checksum (must include start/offset pair) 340 * @csum_start: Offset from skb->head where checksumming should start 341 * @csum_offset: Offset from csum_start where checksum should be stored 342 * @priority: Packet queueing priority 343 * @local_df: allow local fragmentation 344 * @cloned: Head may be cloned (check refcnt to be sure) 345 * @ip_summed: Driver fed us an IP checksum 346 * @nohdr: Payload reference only, must not modify header 347 * @nfctinfo: Relationship of this skb to the connection 348 * @pkt_type: Packet class 349 * @fclone: skbuff clone status 350 * @ipvs_property: skbuff is owned by ipvs 351 * @peeked: this packet has been seen already, so stats have been 352 * done for it, don't do them again 353 * @nf_trace: netfilter packet trace flag 354 * @protocol: Packet protocol from driver 355 * @destructor: Destruct function 356 * @nfct: Associated connection, if any 357 * @nfct_reasm: netfilter conntrack re-assembly pointer 358 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 359 * @skb_iif: ifindex of device we arrived on 360 * @tc_index: Traffic control index 361 * @tc_verd: traffic control verdict 362 * @rxhash: the packet hash computed on receive 363 * @queue_mapping: Queue mapping for multiqueue devices 364 * @ndisc_nodetype: router type (from link layer) 365 * @ooo_okay: allow the mapping of a socket to a queue to be changed 366 * @l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport 367 * ports. 368 * @wifi_acked_valid: wifi_acked was set 369 * @wifi_acked: whether frame was acked on wifi or not 370 * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS 371 * @dma_cookie: a cookie to one of several possible DMA operations 372 * done by skb DMA functions 373 * @secmark: security marking 374 * @mark: Generic packet mark 375 * @dropcount: total number of sk_receive_queue overflows 376 * @vlan_tci: vlan tag control information 377 * @transport_header: Transport layer header 378 * @network_header: Network layer header 379 * @mac_header: Link layer header 380 * @tail: Tail pointer 381 * @end: End pointer 382 * @head: Head of buffer 383 * @data: Data head pointer 384 * @truesize: Buffer size 385 * @users: User count - see {datagram,tcp}.c 386 */ 387 388struct sk_buff { 389 /* These two members must be first. */ 390 struct sk_buff *next; 391 struct sk_buff *prev; 392 393 ktime_t tstamp; 394 395 struct sock *sk; 396 struct net_device *dev; 397 398 /* 399 * This is the control buffer. It is free to use for every 400 * layer. Please put your private variables there. If you 401 * want to keep them across layers you have to do a skb_clone() 402 * first. This is owned by whoever has the skb queued ATM. 403 */ 404 char cb[48] __aligned(8); 405 406 unsigned long _skb_refdst; 407#ifdef CONFIG_XFRM 408 struct sec_path *sp; 409#endif 410 unsigned int len, 411 data_len; 412 __u16 mac_len, 413 hdr_len; 414 union { 415 __wsum csum; 416 struct { 417 __u16 csum_start; 418 __u16 csum_offset; 419 }; 420 }; 421 __u32 priority; 422 kmemcheck_bitfield_begin(flags1); 423 __u8 local_df:1, 424 cloned:1, 425 ip_summed:2, 426 nohdr:1, 427 nfctinfo:3; 428 __u8 pkt_type:3, 429 fclone:2, 430 ipvs_property:1, 431 peeked:1, 432 nf_trace:1; 433 kmemcheck_bitfield_end(flags1); 434 __be16 protocol; 435 436 void (*destructor)(struct sk_buff *skb); 437#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 438 struct nf_conntrack *nfct; 439#endif 440#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 441 struct sk_buff *nfct_reasm; 442#endif 443#ifdef CONFIG_BRIDGE_NETFILTER 444 struct nf_bridge_info *nf_bridge; 445#endif 446 447 int skb_iif; 448 449 __u32 rxhash; 450 451 __u16 vlan_tci; 452 453#ifdef CONFIG_NET_SCHED 454 __u16 tc_index; /* traffic control index */ 455#ifdef CONFIG_NET_CLS_ACT 456 __u16 tc_verd; /* traffic control verdict */ 457#endif 458#endif 459 460 __u16 queue_mapping; 461 kmemcheck_bitfield_begin(flags2); 462#ifdef CONFIG_IPV6_NDISC_NODETYPE 463 __u8 ndisc_nodetype:2; 464#endif 465 __u8 ooo_okay:1; 466 __u8 l4_rxhash:1; 467 __u8 wifi_acked_valid:1; 468 __u8 wifi_acked:1; 469 __u8 no_fcs:1; 470 __u8 head_frag:1; 471 /* 8/10 bit hole (depending on ndisc_nodetype presence) */ 472 kmemcheck_bitfield_end(flags2); 473 474#ifdef CONFIG_NET_DMA 475 dma_cookie_t dma_cookie; 476#endif 477#ifdef CONFIG_NETWORK_SECMARK 478 __u32 secmark; 479#endif 480 union { 481 __u32 mark; 482 __u32 dropcount; 483 __u32 avail_size; 484 }; 485 486 sk_buff_data_t transport_header; 487 sk_buff_data_t network_header; 488 sk_buff_data_t mac_header; 489 /* These elements must be at the end, see alloc_skb() for details. */ 490 sk_buff_data_t tail; 491 sk_buff_data_t end; 492 unsigned char *head, 493 *data; 494 unsigned int truesize; 495 atomic_t users; 496}; 497 498#ifdef __KERNEL__ 499/* 500 * Handling routines are only of interest to the kernel 501 */ 502#include <linux/slab.h> 503 504 505/* 506 * skb might have a dst pointer attached, refcounted or not. 507 * _skb_refdst low order bit is set if refcount was _not_ taken 508 */ 509#define SKB_DST_NOREF 1UL 510#define SKB_DST_PTRMASK ~(SKB_DST_NOREF) 511 512/** 513 * skb_dst - returns skb dst_entry 514 * @skb: buffer 515 * 516 * Returns skb dst_entry, regardless of reference taken or not. 517 */ 518static inline struct dst_entry *skb_dst(const struct sk_buff *skb) 519{ 520 /* If refdst was not refcounted, check we still are in a 521 * rcu_read_lock section 522 */ 523 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && 524 !rcu_read_lock_held() && 525 !rcu_read_lock_bh_held()); 526 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK); 527} 528 529/** 530 * skb_dst_set - sets skb dst 531 * @skb: buffer 532 * @dst: dst entry 533 * 534 * Sets skb dst, assuming a reference was taken on dst and should 535 * be released by skb_dst_drop() 536 */ 537static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) 538{ 539 skb->_skb_refdst = (unsigned long)dst; 540} 541 542extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst); 543 544/** 545 * skb_dst_is_noref - Test if skb dst isn't refcounted 546 * @skb: buffer 547 */ 548static inline bool skb_dst_is_noref(const struct sk_buff *skb) 549{ 550 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); 551} 552 553static inline struct rtable *skb_rtable(const struct sk_buff *skb) 554{ 555 return (struct rtable *)skb_dst(skb); 556} 557 558extern void kfree_skb(struct sk_buff *skb); 559extern void consume_skb(struct sk_buff *skb); 560extern void __kfree_skb(struct sk_buff *skb); 561extern struct kmem_cache *skbuff_head_cache; 562 563extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen); 564extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 565 bool *fragstolen, int *delta_truesize); 566 567extern struct sk_buff *__alloc_skb(unsigned int size, 568 gfp_t priority, int fclone, int node); 569extern struct sk_buff *build_skb(void *data, unsigned int frag_size); 570static inline struct sk_buff *alloc_skb(unsigned int size, 571 gfp_t priority) 572{ 573 return __alloc_skb(size, priority, 0, NUMA_NO_NODE); 574} 575 576static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 577 gfp_t priority) 578{ 579 return __alloc_skb(size, priority, 1, NUMA_NO_NODE); 580} 581 582extern void skb_recycle(struct sk_buff *skb); 583extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); 584 585extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); 586extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask); 587extern struct sk_buff *skb_clone(struct sk_buff *skb, 588 gfp_t priority); 589extern struct sk_buff *skb_copy(const struct sk_buff *skb, 590 gfp_t priority); 591extern struct sk_buff *__pskb_copy(struct sk_buff *skb, 592 int headroom, gfp_t gfp_mask); 593 594extern int pskb_expand_head(struct sk_buff *skb, 595 int nhead, int ntail, 596 gfp_t gfp_mask); 597extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, 598 unsigned int headroom); 599extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb, 600 int newheadroom, int newtailroom, 601 gfp_t priority); 602extern int skb_to_sgvec(struct sk_buff *skb, 603 struct scatterlist *sg, int offset, 604 int len); 605extern int skb_cow_data(struct sk_buff *skb, int tailbits, 606 struct sk_buff **trailer); 607extern int skb_pad(struct sk_buff *skb, int pad); 608#define dev_kfree_skb(a) consume_skb(a) 609 610extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 611 int getfrag(void *from, char *to, int offset, 612 int len,int odd, struct sk_buff *skb), 613 void *from, int length); 614 615struct skb_seq_state { 616 __u32 lower_offset; 617 __u32 upper_offset; 618 __u32 frag_idx; 619 __u32 stepped_offset; 620 struct sk_buff *root_skb; 621 struct sk_buff *cur_skb; 622 __u8 *frag_data; 623}; 624 625extern void skb_prepare_seq_read(struct sk_buff *skb, 626 unsigned int from, unsigned int to, 627 struct skb_seq_state *st); 628extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data, 629 struct skb_seq_state *st); 630extern void skb_abort_seq_read(struct skb_seq_state *st); 631 632extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, 633 unsigned int to, struct ts_config *config, 634 struct ts_state *state); 635 636extern void __skb_get_rxhash(struct sk_buff *skb); 637static inline __u32 skb_get_rxhash(struct sk_buff *skb) 638{ 639 if (!skb->rxhash) 640 __skb_get_rxhash(skb); 641 642 return skb->rxhash; 643} 644 645#ifdef NET_SKBUFF_DATA_USES_OFFSET 646static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 647{ 648 return skb->head + skb->end; 649} 650 651static inline unsigned int skb_end_offset(const struct sk_buff *skb) 652{ 653 return skb->end; 654} 655#else 656static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 657{ 658 return skb->end; 659} 660 661static inline unsigned int skb_end_offset(const struct sk_buff *skb) 662{ 663 return skb->end - skb->head; 664} 665#endif 666 667/* Internal */ 668#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB))) 669 670static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) 671{ 672 return &skb_shinfo(skb)->hwtstamps; 673} 674 675/** 676 * skb_queue_empty - check if a queue is empty 677 * @list: queue head 678 * 679 * Returns true if the queue is empty, false otherwise. 680 */ 681static inline int skb_queue_empty(const struct sk_buff_head *list) 682{ 683 return list->next == (struct sk_buff *)list; 684} 685 686/** 687 * skb_queue_is_last - check if skb is the last entry in the queue 688 * @list: queue head 689 * @skb: buffer 690 * 691 * Returns true if @skb is the last buffer on the list. 692 */ 693static inline bool skb_queue_is_last(const struct sk_buff_head *list, 694 const struct sk_buff *skb) 695{ 696 return skb->next == (struct sk_buff *)list; 697} 698 699/** 700 * skb_queue_is_first - check if skb is the first entry in the queue 701 * @list: queue head 702 * @skb: buffer 703 * 704 * Returns true if @skb is the first buffer on the list. 705 */ 706static inline bool skb_queue_is_first(const struct sk_buff_head *list, 707 const struct sk_buff *skb) 708{ 709 return skb->prev == (struct sk_buff *)list; 710} 711 712/** 713 * skb_queue_next - return the next packet in the queue 714 * @list: queue head 715 * @skb: current buffer 716 * 717 * Return the next packet in @list after @skb. It is only valid to 718 * call this if skb_queue_is_last() evaluates to false. 719 */ 720static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list, 721 const struct sk_buff *skb) 722{ 723 /* This BUG_ON may seem severe, but if we just return then we 724 * are going to dereference garbage. 725 */ 726 BUG_ON(skb_queue_is_last(list, skb)); 727 return skb->next; 728} 729 730/** 731 * skb_queue_prev - return the prev packet in the queue 732 * @list: queue head 733 * @skb: current buffer 734 * 735 * Return the prev packet in @list before @skb. It is only valid to 736 * call this if skb_queue_is_first() evaluates to false. 737 */ 738static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list, 739 const struct sk_buff *skb) 740{ 741 /* This BUG_ON may seem severe, but if we just return then we 742 * are going to dereference garbage. 743 */ 744 BUG_ON(skb_queue_is_first(list, skb)); 745 return skb->prev; 746} 747 748/** 749 * skb_get - reference buffer 750 * @skb: buffer to reference 751 * 752 * Makes another reference to a socket buffer and returns a pointer 753 * to the buffer. 754 */ 755static inline struct sk_buff *skb_get(struct sk_buff *skb) 756{ 757 atomic_inc(&skb->users); 758 return skb; 759} 760 761/* 762 * If users == 1, we are the only owner and are can avoid redundant 763 * atomic change. 764 */ 765 766/** 767 * skb_cloned - is the buffer a clone 768 * @skb: buffer to check 769 * 770 * Returns true if the buffer was generated with skb_clone() and is 771 * one of multiple shared copies of the buffer. Cloned buffers are 772 * shared data so must not be written to under normal circumstances. 773 */ 774static inline int skb_cloned(const struct sk_buff *skb) 775{ 776 return skb->cloned && 777 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1; 778} 779 780/** 781 * skb_header_cloned - is the header a clone 782 * @skb: buffer to check 783 * 784 * Returns true if modifying the header part of the buffer requires 785 * the data to be copied. 786 */ 787static inline int skb_header_cloned(const struct sk_buff *skb) 788{ 789 int dataref; 790 791 if (!skb->cloned) 792 return 0; 793 794 dataref = atomic_read(&skb_shinfo(skb)->dataref); 795 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT); 796 return dataref != 1; 797} 798 799/** 800 * skb_header_release - release reference to header 801 * @skb: buffer to operate on 802 * 803 * Drop a reference to the header part of the buffer. This is done 804 * by acquiring a payload reference. You must not read from the header 805 * part of skb->data after this. 806 */ 807static inline void skb_header_release(struct sk_buff *skb) 808{ 809 BUG_ON(skb->nohdr); 810 skb->nohdr = 1; 811 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref); 812} 813 814/** 815 * skb_shared - is the buffer shared 816 * @skb: buffer to check 817 * 818 * Returns true if more than one person has a reference to this 819 * buffer. 820 */ 821static inline int skb_shared(const struct sk_buff *skb) 822{ 823 return atomic_read(&skb->users) != 1; 824} 825 826/** 827 * skb_share_check - check if buffer is shared and if so clone it 828 * @skb: buffer to check 829 * @pri: priority for memory allocation 830 * 831 * If the buffer is shared the buffer is cloned and the old copy 832 * drops a reference. A new clone with a single reference is returned. 833 * If the buffer is not shared the original buffer is returned. When 834 * being called from interrupt status or with spinlocks held pri must 835 * be GFP_ATOMIC. 836 * 837 * NULL is returned on a memory allocation failure. 838 */ 839static inline struct sk_buff *skb_share_check(struct sk_buff *skb, 840 gfp_t pri) 841{ 842 might_sleep_if(pri & __GFP_WAIT); 843 if (skb_shared(skb)) { 844 struct sk_buff *nskb = skb_clone(skb, pri); 845 kfree_skb(skb); 846 skb = nskb; 847 } 848 return skb; 849} 850 851/* 852 * Copy shared buffers into a new sk_buff. We effectively do COW on 853 * packets to handle cases where we have a local reader and forward 854 * and a couple of other messy ones. The normal one is tcpdumping 855 * a packet thats being forwarded. 856 */ 857 858/** 859 * skb_unshare - make a copy of a shared buffer 860 * @skb: buffer to check 861 * @pri: priority for memory allocation 862 * 863 * If the socket buffer is a clone then this function creates a new 864 * copy of the data, drops a reference count on the old copy and returns 865 * the new copy with the reference count at 1. If the buffer is not a clone 866 * the original buffer is returned. When called with a spinlock held or 867 * from interrupt state @pri must be %GFP_ATOMIC 868 * 869 * %NULL is returned on a memory allocation failure. 870 */ 871static inline struct sk_buff *skb_unshare(struct sk_buff *skb, 872 gfp_t pri) 873{ 874 might_sleep_if(pri & __GFP_WAIT); 875 if (skb_cloned(skb)) { 876 struct sk_buff *nskb = skb_copy(skb, pri); 877 kfree_skb(skb); /* Free our shared copy */ 878 skb = nskb; 879 } 880 return skb; 881} 882 883/** 884 * skb_peek - peek at the head of an &sk_buff_head 885 * @list_: list to peek at 886 * 887 * Peek an &sk_buff. Unlike most other operations you _MUST_ 888 * be careful with this one. A peek leaves the buffer on the 889 * list and someone else may run off with it. You must hold 890 * the appropriate locks or have a private queue to do this. 891 * 892 * Returns %NULL for an empty list or a pointer to the head element. 893 * The reference count is not incremented and the reference is therefore 894 * volatile. Use with caution. 895 */ 896static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) 897{ 898 struct sk_buff *skb = list_->next; 899 900 if (skb == (struct sk_buff *)list_) 901 skb = NULL; 902 return skb; 903} 904 905/** 906 * skb_peek_next - peek skb following the given one from a queue 907 * @skb: skb to start from 908 * @list_: list to peek at 909 * 910 * Returns %NULL when the end of the list is met or a pointer to the 911 * next element. The reference count is not incremented and the 912 * reference is therefore volatile. Use with caution. 913 */ 914static inline struct sk_buff *skb_peek_next(struct sk_buff *skb, 915 const struct sk_buff_head *list_) 916{ 917 struct sk_buff *next = skb->next; 918 919 if (next == (struct sk_buff *)list_) 920 next = NULL; 921 return next; 922} 923 924/** 925 * skb_peek_tail - peek at the tail of an &sk_buff_head 926 * @list_: list to peek at 927 * 928 * Peek an &sk_buff. Unlike most other operations you _MUST_ 929 * be careful with this one. A peek leaves the buffer on the 930 * list and someone else may run off with it. You must hold 931 * the appropriate locks or have a private queue to do this. 932 * 933 * Returns %NULL for an empty list or a pointer to the tail element. 934 * The reference count is not incremented and the reference is therefore 935 * volatile. Use with caution. 936 */ 937static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) 938{ 939 struct sk_buff *skb = list_->prev; 940 941 if (skb == (struct sk_buff *)list_) 942 skb = NULL; 943 return skb; 944 945} 946 947/** 948 * skb_queue_len - get queue length 949 * @list_: list to measure 950 * 951 * Return the length of an &sk_buff queue. 952 */ 953static inline __u32 skb_queue_len(const struct sk_buff_head *list_) 954{ 955 return list_->qlen; 956} 957 958/** 959 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head 960 * @list: queue to initialize 961 * 962 * This initializes only the list and queue length aspects of 963 * an sk_buff_head object. This allows to initialize the list 964 * aspects of an sk_buff_head without reinitializing things like 965 * the spinlock. It can also be used for on-stack sk_buff_head 966 * objects where the spinlock is known to not be used. 967 */ 968static inline void __skb_queue_head_init(struct sk_buff_head *list) 969{ 970 list->prev = list->next = (struct sk_buff *)list; 971 list->qlen = 0; 972} 973 974/* 975 * This function creates a split out lock class for each invocation; 976 * this is needed for now since a whole lot of users of the skb-queue 977 * infrastructure in drivers have different locking usage (in hardirq) 978 * than the networking core (in softirq only). In the long run either the 979 * network layer or drivers should need annotation to consolidate the 980 * main types of usage into 3 classes. 981 */ 982static inline void skb_queue_head_init(struct sk_buff_head *list) 983{ 984 spin_lock_init(&list->lock); 985 __skb_queue_head_init(list); 986} 987 988static inline void skb_queue_head_init_class(struct sk_buff_head *list, 989 struct lock_class_key *class) 990{ 991 skb_queue_head_init(list); 992 lockdep_set_class(&list->lock, class); 993} 994 995/* 996 * Insert an sk_buff on a list. 997 * 998 * The "__skb_xxxx()" functions are the non-atomic ones that 999 * can only be called with interrupts disabled. 1000 */ 1001extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list); 1002static inline void __skb_insert(struct sk_buff *newsk, 1003 struct sk_buff *prev, struct sk_buff *next, 1004 struct sk_buff_head *list) 1005{ 1006 newsk->next = next; 1007 newsk->prev = prev; 1008 next->prev = prev->next = newsk; 1009 list->qlen++; 1010} 1011 1012static inline void __skb_queue_splice(const struct sk_buff_head *list, 1013 struct sk_buff *prev, 1014 struct sk_buff *next) 1015{ 1016 struct sk_buff *first = list->next; 1017 struct sk_buff *last = list->prev; 1018 1019 first->prev = prev; 1020 prev->next = first; 1021 1022 last->next = next; 1023 next->prev = last; 1024} 1025 1026/** 1027 * skb_queue_splice - join two skb lists, this is designed for stacks 1028 * @list: the new list to add 1029 * @head: the place to add it in the first list 1030 */ 1031static inline void skb_queue_splice(const struct sk_buff_head *list, 1032 struct sk_buff_head *head) 1033{ 1034 if (!skb_queue_empty(list)) { 1035 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 1036 head->qlen += list->qlen; 1037 } 1038} 1039 1040/** 1041 * skb_queue_splice_init - join two skb lists and reinitialise the emptied list 1042 * @list: the new list to add 1043 * @head: the place to add it in the first list 1044 * 1045 * The list at @list is reinitialised 1046 */ 1047static inline void skb_queue_splice_init(struct sk_buff_head *list, 1048 struct sk_buff_head *head) 1049{ 1050 if (!skb_queue_empty(list)) { 1051 __skb_queue_splice(list, (struct sk_buff *) head, head->next); 1052 head->qlen += list->qlen; 1053 __skb_queue_head_init(list); 1054 } 1055} 1056 1057/** 1058 * skb_queue_splice_tail - join two skb lists, each list being a queue 1059 * @list: the new list to add 1060 * @head: the place to add it in the first list 1061 */ 1062static inline void skb_queue_splice_tail(const struct sk_buff_head *list, 1063 struct sk_buff_head *head) 1064{ 1065 if (!skb_queue_empty(list)) { 1066 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 1067 head->qlen += list->qlen; 1068 } 1069} 1070 1071/** 1072 * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list 1073 * @list: the new list to add 1074 * @head: the place to add it in the first list 1075 * 1076 * Each of the lists is a queue. 1077 * The list at @list is reinitialised 1078 */ 1079static inline void skb_queue_splice_tail_init(struct sk_buff_head *list, 1080 struct sk_buff_head *head) 1081{ 1082 if (!skb_queue_empty(list)) { 1083 __skb_queue_splice(list, head->prev, (struct sk_buff *) head); 1084 head->qlen += list->qlen; 1085 __skb_queue_head_init(list); 1086 } 1087} 1088 1089/** 1090 * __skb_queue_after - queue a buffer at the list head 1091 * @list: list to use 1092 * @prev: place after this buffer 1093 * @newsk: buffer to queue 1094 * 1095 * Queue a buffer int the middle of a list. This function takes no locks 1096 * and you must therefore hold required locks before calling it. 1097 * 1098 * A buffer cannot be placed on two lists at the same time. 1099 */ 1100static inline void __skb_queue_after(struct sk_buff_head *list, 1101 struct sk_buff *prev, 1102 struct sk_buff *newsk) 1103{ 1104 __skb_insert(newsk, prev, prev->next, list); 1105} 1106 1107extern void skb_append(struct sk_buff *old, struct sk_buff *newsk, 1108 struct sk_buff_head *list); 1109 1110static inline void __skb_queue_before(struct sk_buff_head *list, 1111 struct sk_buff *next, 1112 struct sk_buff *newsk) 1113{ 1114 __skb_insert(newsk, next->prev, next, list); 1115} 1116 1117/** 1118 * __skb_queue_head - queue a buffer at the list head 1119 * @list: list to use 1120 * @newsk: buffer to queue 1121 * 1122 * Queue a buffer at the start of a list. This function takes no locks 1123 * and you must therefore hold required locks before calling it. 1124 * 1125 * A buffer cannot be placed on two lists at the same time. 1126 */ 1127extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); 1128static inline void __skb_queue_head(struct sk_buff_head *list, 1129 struct sk_buff *newsk) 1130{ 1131 __skb_queue_after(list, (struct sk_buff *)list, newsk); 1132} 1133 1134/** 1135 * __skb_queue_tail - queue a buffer at the list tail 1136 * @list: list to use 1137 * @newsk: buffer to queue 1138 * 1139 * Queue a buffer at the end of a list. This function takes no locks 1140 * and you must therefore hold required locks before calling it. 1141 * 1142 * A buffer cannot be placed on two lists at the same time. 1143 */ 1144extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); 1145static inline void __skb_queue_tail(struct sk_buff_head *list, 1146 struct sk_buff *newsk) 1147{ 1148 __skb_queue_before(list, (struct sk_buff *)list, newsk); 1149} 1150 1151/* 1152 * remove sk_buff from list. _Must_ be called atomically, and with 1153 * the list known.. 1154 */ 1155extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list); 1156static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) 1157{ 1158 struct sk_buff *next, *prev; 1159 1160 list->qlen--; 1161 next = skb->next; 1162 prev = skb->prev; 1163 skb->next = skb->prev = NULL; 1164 next->prev = prev; 1165 prev->next = next; 1166} 1167 1168/** 1169 * __skb_dequeue - remove from the head of the queue 1170 * @list: list to dequeue from 1171 * 1172 * Remove the head of the list. This function does not take any locks 1173 * so must be used with appropriate locks held only. The head item is 1174 * returned or %NULL if the list is empty. 1175 */ 1176extern struct sk_buff *skb_dequeue(struct sk_buff_head *list); 1177static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) 1178{ 1179 struct sk_buff *skb = skb_peek(list); 1180 if (skb) 1181 __skb_unlink(skb, list); 1182 return skb; 1183} 1184 1185/** 1186 * __skb_dequeue_tail - remove from the tail of the queue 1187 * @list: list to dequeue from 1188 * 1189 * Remove the tail of the list. This function does not take any locks 1190 * so must be used with appropriate locks held only. The tail item is 1191 * returned or %NULL if the list is empty. 1192 */ 1193extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); 1194static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) 1195{ 1196 struct sk_buff *skb = skb_peek_tail(list); 1197 if (skb) 1198 __skb_unlink(skb, list); 1199 return skb; 1200} 1201 1202 1203static inline bool skb_is_nonlinear(const struct sk_buff *skb) 1204{ 1205 return skb->data_len; 1206} 1207 1208static inline unsigned int skb_headlen(const struct sk_buff *skb) 1209{ 1210 return skb->len - skb->data_len; 1211} 1212 1213static inline int skb_pagelen(const struct sk_buff *skb) 1214{ 1215 int i, len = 0; 1216 1217 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 1218 len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 1219 return len + skb_headlen(skb); 1220} 1221 1222/** 1223 * __skb_fill_page_desc - initialise a paged fragment in an skb 1224 * @skb: buffer containing fragment to be initialised 1225 * @i: paged fragment index to initialise 1226 * @page: the page to use for this fragment 1227 * @off: the offset to the data with @page 1228 * @size: the length of the data 1229 * 1230 * Initialises the @i'th fragment of @skb to point to &size bytes at 1231 * offset @off within @page. 1232 * 1233 * Does not take any additional reference on the fragment. 1234 */ 1235static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, 1236 struct page *page, int off, int size) 1237{ 1238 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1239 1240 frag->page.p = page; 1241 frag->page_offset = off; 1242 skb_frag_size_set(frag, size); 1243} 1244 1245/** 1246 * skb_fill_page_desc - initialise a paged fragment in an skb 1247 * @skb: buffer containing fragment to be initialised 1248 * @i: paged fragment index to initialise 1249 * @page: the page to use for this fragment 1250 * @off: the offset to the data with @page 1251 * @size: the length of the data 1252 * 1253 * As per __skb_fill_page_desc() -- initialises the @i'th fragment of 1254 * @skb to point to &size bytes at offset @off within @page. In 1255 * addition updates @skb such that @i is the last fragment. 1256 * 1257 * Does not take any additional reference on the fragment. 1258 */ 1259static inline void skb_fill_page_desc(struct sk_buff *skb, int i, 1260 struct page *page, int off, int size) 1261{ 1262 __skb_fill_page_desc(skb, i, page, off, size); 1263 skb_shinfo(skb)->nr_frags = i + 1; 1264} 1265 1266extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, 1267 int off, int size, unsigned int truesize); 1268 1269#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) 1270#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) 1271#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 1272 1273#ifdef NET_SKBUFF_DATA_USES_OFFSET 1274static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1275{ 1276 return skb->head + skb->tail; 1277} 1278 1279static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1280{ 1281 skb->tail = skb->data - skb->head; 1282} 1283 1284static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1285{ 1286 skb_reset_tail_pointer(skb); 1287 skb->tail += offset; 1288} 1289#else /* NET_SKBUFF_DATA_USES_OFFSET */ 1290static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 1291{ 1292 return skb->tail; 1293} 1294 1295static inline void skb_reset_tail_pointer(struct sk_buff *skb) 1296{ 1297 skb->tail = skb->data; 1298} 1299 1300static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 1301{ 1302 skb->tail = skb->data + offset; 1303} 1304 1305#endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1306 1307/* 1308 * Add data to an sk_buff 1309 */ 1310extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len); 1311static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 1312{ 1313 unsigned char *tmp = skb_tail_pointer(skb); 1314 SKB_LINEAR_ASSERT(skb); 1315 skb->tail += len; 1316 skb->len += len; 1317 return tmp; 1318} 1319 1320extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len); 1321static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len) 1322{ 1323 skb->data -= len; 1324 skb->len += len; 1325 return skb->data; 1326} 1327 1328extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len); 1329static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len) 1330{ 1331 skb->len -= len; 1332 BUG_ON(skb->len < skb->data_len); 1333 return skb->data += len; 1334} 1335 1336static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len) 1337{ 1338 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len); 1339} 1340 1341extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta); 1342 1343static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len) 1344{ 1345 if (len > skb_headlen(skb) && 1346 !__pskb_pull_tail(skb, len - skb_headlen(skb))) 1347 return NULL; 1348 skb->len -= len; 1349 return skb->data += len; 1350} 1351 1352static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len) 1353{ 1354 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len); 1355} 1356 1357static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) 1358{ 1359 if (likely(len <= skb_headlen(skb))) 1360 return 1; 1361 if (unlikely(len > skb->len)) 1362 return 0; 1363 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL; 1364} 1365 1366/** 1367 * skb_headroom - bytes at buffer head 1368 * @skb: buffer to check 1369 * 1370 * Return the number of bytes of free space at the head of an &sk_buff. 1371 */ 1372static inline unsigned int skb_headroom(const struct sk_buff *skb) 1373{ 1374 return skb->data - skb->head; 1375} 1376 1377/** 1378 * skb_tailroom - bytes at buffer end 1379 * @skb: buffer to check 1380 * 1381 * Return the number of bytes of free space at the tail of an sk_buff 1382 */ 1383static inline int skb_tailroom(const struct sk_buff *skb) 1384{ 1385 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 1386} 1387 1388/** 1389 * skb_availroom - bytes at buffer end 1390 * @skb: buffer to check 1391 * 1392 * Return the number of bytes of free space at the tail of an sk_buff 1393 * allocated by sk_stream_alloc() 1394 */ 1395static inline int skb_availroom(const struct sk_buff *skb) 1396{ 1397 return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len; 1398} 1399 1400/** 1401 * skb_reserve - adjust headroom 1402 * @skb: buffer to alter 1403 * @len: bytes to move 1404 * 1405 * Increase the headroom of an empty &sk_buff by reducing the tail 1406 * room. This is only allowed for an empty buffer. 1407 */ 1408static inline void skb_reserve(struct sk_buff *skb, int len) 1409{ 1410 skb->data += len; 1411 skb->tail += len; 1412} 1413 1414static inline void skb_reset_mac_len(struct sk_buff *skb) 1415{ 1416 skb->mac_len = skb->network_header - skb->mac_header; 1417} 1418 1419#ifdef NET_SKBUFF_DATA_USES_OFFSET 1420static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1421{ 1422 return skb->head + skb->transport_header; 1423} 1424 1425static inline void skb_reset_transport_header(struct sk_buff *skb) 1426{ 1427 skb->transport_header = skb->data - skb->head; 1428} 1429 1430static inline void skb_set_transport_header(struct sk_buff *skb, 1431 const int offset) 1432{ 1433 skb_reset_transport_header(skb); 1434 skb->transport_header += offset; 1435} 1436 1437static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1438{ 1439 return skb->head + skb->network_header; 1440} 1441 1442static inline void skb_reset_network_header(struct sk_buff *skb) 1443{ 1444 skb->network_header = skb->data - skb->head; 1445} 1446 1447static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1448{ 1449 skb_reset_network_header(skb); 1450 skb->network_header += offset; 1451} 1452 1453static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1454{ 1455 return skb->head + skb->mac_header; 1456} 1457 1458static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1459{ 1460 return skb->mac_header != ~0U; 1461} 1462 1463static inline void skb_reset_mac_header(struct sk_buff *skb) 1464{ 1465 skb->mac_header = skb->data - skb->head; 1466} 1467 1468static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1469{ 1470 skb_reset_mac_header(skb); 1471 skb->mac_header += offset; 1472} 1473 1474#else /* NET_SKBUFF_DATA_USES_OFFSET */ 1475 1476static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1477{ 1478 return skb->transport_header; 1479} 1480 1481static inline void skb_reset_transport_header(struct sk_buff *skb) 1482{ 1483 skb->transport_header = skb->data; 1484} 1485 1486static inline void skb_set_transport_header(struct sk_buff *skb, 1487 const int offset) 1488{ 1489 skb->transport_header = skb->data + offset; 1490} 1491 1492static inline unsigned char *skb_network_header(const struct sk_buff *skb) 1493{ 1494 return skb->network_header; 1495} 1496 1497static inline void skb_reset_network_header(struct sk_buff *skb) 1498{ 1499 skb->network_header = skb->data; 1500} 1501 1502static inline void skb_set_network_header(struct sk_buff *skb, const int offset) 1503{ 1504 skb->network_header = skb->data + offset; 1505} 1506 1507static inline unsigned char *skb_mac_header(const struct sk_buff *skb) 1508{ 1509 return skb->mac_header; 1510} 1511 1512static inline int skb_mac_header_was_set(const struct sk_buff *skb) 1513{ 1514 return skb->mac_header != NULL; 1515} 1516 1517static inline void skb_reset_mac_header(struct sk_buff *skb) 1518{ 1519 skb->mac_header = skb->data; 1520} 1521 1522static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) 1523{ 1524 skb->mac_header = skb->data + offset; 1525} 1526#endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1527 1528static inline void skb_mac_header_rebuild(struct sk_buff *skb) 1529{ 1530 if (skb_mac_header_was_set(skb)) { 1531 const unsigned char *old_mac = skb_mac_header(skb); 1532 1533 skb_set_mac_header(skb, -skb->mac_len); 1534 memmove(skb_mac_header(skb), old_mac, skb->mac_len); 1535 } 1536} 1537 1538static inline int skb_checksum_start_offset(const struct sk_buff *skb) 1539{ 1540 return skb->csum_start - skb_headroom(skb); 1541} 1542 1543static inline int skb_transport_offset(const struct sk_buff *skb) 1544{ 1545 return skb_transport_header(skb) - skb->data; 1546} 1547 1548static inline u32 skb_network_header_len(const struct sk_buff *skb) 1549{ 1550 return skb->transport_header - skb->network_header; 1551} 1552 1553static inline int skb_network_offset(const struct sk_buff *skb) 1554{ 1555 return skb_network_header(skb) - skb->data; 1556} 1557 1558static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) 1559{ 1560 return pskb_may_pull(skb, skb_network_offset(skb) + len); 1561} 1562 1563/* 1564 * CPUs often take a performance hit when accessing unaligned memory 1565 * locations. The actual performance hit varies, it can be small if the 1566 * hardware handles it or large if we have to take an exception and fix it 1567 * in software. 1568 * 1569 * Since an ethernet header is 14 bytes network drivers often end up with 1570 * the IP header at an unaligned offset. The IP header can be aligned by 1571 * shifting the start of the packet by 2 bytes. Drivers should do this 1572 * with: 1573 * 1574 * skb_reserve(skb, NET_IP_ALIGN); 1575 * 1576 * The downside to this alignment of the IP header is that the DMA is now 1577 * unaligned. On some architectures the cost of an unaligned DMA is high 1578 * and this cost outweighs the gains made by aligning the IP header. 1579 * 1580 * Since this trade off varies between architectures, we allow NET_IP_ALIGN 1581 * to be overridden. 1582 */ 1583#ifndef NET_IP_ALIGN 1584#define NET_IP_ALIGN 2 1585#endif 1586 1587/* 1588 * The networking layer reserves some headroom in skb data (via 1589 * dev_alloc_skb). This is used to avoid having to reallocate skb data when 1590 * the header has to grow. In the default case, if the header has to grow 1591 * 32 bytes or less we avoid the reallocation. 1592 * 1593 * Unfortunately this headroom changes the DMA alignment of the resulting 1594 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive 1595 * on some architectures. An architecture can override this value, 1596 * perhaps setting it to a cacheline in size (since that will maintain 1597 * cacheline alignment of the DMA). It must be a power of 2. 1598 * 1599 * Various parts of the networking layer expect at least 32 bytes of 1600 * headroom, you should not reduce this. 1601 * 1602 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS) 1603 * to reduce average number of cache lines per packet. 1604 * get_rps_cpus() for example only access one 64 bytes aligned block : 1605 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8) 1606 */ 1607#ifndef NET_SKB_PAD 1608#define NET_SKB_PAD max(32, L1_CACHE_BYTES) 1609#endif 1610 1611extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); 1612 1613static inline void __skb_trim(struct sk_buff *skb, unsigned int len) 1614{ 1615 if (unlikely(skb_is_nonlinear(skb))) { 1616 WARN_ON(1); 1617 return; 1618 } 1619 skb->len = len; 1620 skb_set_tail_pointer(skb, len); 1621} 1622 1623extern void skb_trim(struct sk_buff *skb, unsigned int len); 1624 1625static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) 1626{ 1627 if (skb->data_len) 1628 return ___pskb_trim(skb, len); 1629 __skb_trim(skb, len); 1630 return 0; 1631} 1632 1633static inline int pskb_trim(struct sk_buff *skb, unsigned int len) 1634{ 1635 return (len < skb->len) ? __pskb_trim(skb, len) : 0; 1636} 1637 1638/** 1639 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer 1640 * @skb: buffer to alter 1641 * @len: new length 1642 * 1643 * This is identical to pskb_trim except that the caller knows that 1644 * the skb is not cloned so we should never get an error due to out- 1645 * of-memory. 1646 */ 1647static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) 1648{ 1649 int err = pskb_trim(skb, len); 1650 BUG_ON(err); 1651} 1652 1653/** 1654 * skb_orphan - orphan a buffer 1655 * @skb: buffer to orphan 1656 * 1657 * If a buffer currently has an owner then we call the owner's 1658 * destructor function and make the @skb unowned. The buffer continues 1659 * to exist but is no longer charged to its former owner. 1660 */ 1661static inline void skb_orphan(struct sk_buff *skb) 1662{ 1663 if (skb->destructor) 1664 skb->destructor(skb); 1665 skb->destructor = NULL; 1666 skb->sk = NULL; 1667} 1668 1669/** 1670 * __skb_queue_purge - empty a list 1671 * @list: list to empty 1672 * 1673 * Delete all buffers on an &sk_buff list. Each buffer is removed from 1674 * the list and one reference dropped. This function does not take the 1675 * list lock and the caller must hold the relevant locks to use it. 1676 */ 1677extern void skb_queue_purge(struct sk_buff_head *list); 1678static inline void __skb_queue_purge(struct sk_buff_head *list) 1679{ 1680 struct sk_buff *skb; 1681 while ((skb = __skb_dequeue(list)) != NULL) 1682 kfree_skb(skb); 1683} 1684 1685extern void *netdev_alloc_frag(unsigned int fragsz); 1686 1687extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev, 1688 unsigned int length, 1689 gfp_t gfp_mask); 1690 1691/** 1692 * netdev_alloc_skb - allocate an skbuff for rx on a specific device 1693 * @dev: network device to receive on 1694 * @length: length to allocate 1695 * 1696 * Allocate a new &sk_buff and assign it a usage count of one. The 1697 * buffer has unspecified headroom built in. Users should allocate 1698 * the headroom they think they need without accounting for the 1699 * built in space. The built in space is used for optimisations. 1700 * 1701 * %NULL is returned if there is no free memory. Although this function 1702 * allocates memory it can be called from an interrupt. 1703 */ 1704static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, 1705 unsigned int length) 1706{ 1707 return __netdev_alloc_skb(dev, length, GFP_ATOMIC); 1708} 1709 1710/* legacy helper around __netdev_alloc_skb() */ 1711static inline struct sk_buff *__dev_alloc_skb(unsigned int length, 1712 gfp_t gfp_mask) 1713{ 1714 return __netdev_alloc_skb(NULL, length, gfp_mask); 1715} 1716 1717/* legacy helper around netdev_alloc_skb() */ 1718static inline struct sk_buff *dev_alloc_skb(unsigned int length) 1719{ 1720 return netdev_alloc_skb(NULL, length); 1721} 1722 1723 1724static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev, 1725 unsigned int length, gfp_t gfp) 1726{ 1727 struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp); 1728 1729 if (NET_IP_ALIGN && skb) 1730 skb_reserve(skb, NET_IP_ALIGN); 1731 return skb; 1732} 1733 1734static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, 1735 unsigned int length) 1736{ 1737 return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC); 1738} 1739 1740/** 1741 * skb_frag_page - retrieve the page refered to by a paged fragment 1742 * @frag: the paged fragment 1743 * 1744 * Returns the &struct page associated with @frag. 1745 */ 1746static inline struct page *skb_frag_page(const skb_frag_t *frag) 1747{ 1748 return frag->page.p; 1749} 1750 1751/** 1752 * __skb_frag_ref - take an addition reference on a paged fragment. 1753 * @frag: the paged fragment 1754 * 1755 * Takes an additional reference on the paged fragment @frag. 1756 */ 1757static inline void __skb_frag_ref(skb_frag_t *frag) 1758{ 1759 get_page(skb_frag_page(frag)); 1760} 1761 1762/** 1763 * skb_frag_ref - take an addition reference on a paged fragment of an skb. 1764 * @skb: the buffer 1765 * @f: the fragment offset. 1766 * 1767 * Takes an additional reference on the @f'th paged fragment of @skb. 1768 */ 1769static inline void skb_frag_ref(struct sk_buff *skb, int f) 1770{ 1771 __skb_frag_ref(&skb_shinfo(skb)->frags[f]); 1772} 1773 1774/** 1775 * __skb_frag_unref - release a reference on a paged fragment. 1776 * @frag: the paged fragment 1777 * 1778 * Releases a reference on the paged fragment @frag. 1779 */ 1780static inline void __skb_frag_unref(skb_frag_t *frag) 1781{ 1782 put_page(skb_frag_page(frag)); 1783} 1784 1785/** 1786 * skb_frag_unref - release a reference on a paged fragment of an skb. 1787 * @skb: the buffer 1788 * @f: the fragment offset 1789 * 1790 * Releases a reference on the @f'th paged fragment of @skb. 1791 */ 1792static inline void skb_frag_unref(struct sk_buff *skb, int f) 1793{ 1794 __skb_frag_unref(&skb_shinfo(skb)->frags[f]); 1795} 1796 1797/** 1798 * skb_frag_address - gets the address of the data contained in a paged fragment 1799 * @frag: the paged fragment buffer 1800 * 1801 * Returns the address of the data within @frag. The page must already 1802 * be mapped. 1803 */ 1804static inline void *skb_frag_address(const skb_frag_t *frag) 1805{ 1806 return page_address(skb_frag_page(frag)) + frag->page_offset; 1807} 1808 1809/** 1810 * skb_frag_address_safe - gets the address of the data contained in a paged fragment 1811 * @frag: the paged fragment buffer 1812 * 1813 * Returns the address of the data within @frag. Checks that the page 1814 * is mapped and returns %NULL otherwise. 1815 */ 1816static inline void *skb_frag_address_safe(const skb_frag_t *frag) 1817{ 1818 void *ptr = page_address(skb_frag_page(frag)); 1819 if (unlikely(!ptr)) 1820 return NULL; 1821 1822 return ptr + frag->page_offset; 1823} 1824 1825/** 1826 * __skb_frag_set_page - sets the page contained in a paged fragment 1827 * @frag: the paged fragment 1828 * @page: the page to set 1829 * 1830 * Sets the fragment @frag to contain @page. 1831 */ 1832static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page) 1833{ 1834 frag->page.p = page; 1835} 1836 1837/** 1838 * skb_frag_set_page - sets the page contained in a paged fragment of an skb 1839 * @skb: the buffer 1840 * @f: the fragment offset 1841 * @page: the page to set 1842 * 1843 * Sets the @f'th fragment of @skb to contain @page. 1844 */ 1845static inline void skb_frag_set_page(struct sk_buff *skb, int f, 1846 struct page *page) 1847{ 1848 __skb_frag_set_page(&skb_shinfo(skb)->frags[f], page); 1849} 1850 1851/** 1852 * skb_frag_dma_map - maps a paged fragment via the DMA API 1853 * @dev: the device to map the fragment to 1854 * @frag: the paged fragment to map 1855 * @offset: the offset within the fragment (starting at the 1856 * fragment's own offset) 1857 * @size: the number of bytes to map 1858 * @dir: the direction of the mapping (%PCI_DMA_*) 1859 * 1860 * Maps the page associated with @frag to @device. 1861 */ 1862static inline dma_addr_t skb_frag_dma_map(struct device *dev, 1863 const skb_frag_t *frag, 1864 size_t offset, size_t size, 1865 enum dma_data_direction dir) 1866{ 1867 return dma_map_page(dev, skb_frag_page(frag), 1868 frag->page_offset + offset, size, dir); 1869} 1870 1871static inline struct sk_buff *pskb_copy(struct sk_buff *skb, 1872 gfp_t gfp_mask) 1873{ 1874 return __pskb_copy(skb, skb_headroom(skb), gfp_mask); 1875} 1876 1877/** 1878 * skb_clone_writable - is the header of a clone writable 1879 * @skb: buffer to check 1880 * @len: length up to which to write 1881 * 1882 * Returns true if modifying the header part of the cloned buffer 1883 * does not requires the data to be copied. 1884 */ 1885static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len) 1886{ 1887 return !skb_header_cloned(skb) && 1888 skb_headroom(skb) + len <= skb->hdr_len; 1889} 1890 1891static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, 1892 int cloned) 1893{ 1894 int delta = 0; 1895 1896 if (headroom > skb_headroom(skb)) 1897 delta = headroom - skb_headroom(skb); 1898 1899 if (delta || cloned) 1900 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, 1901 GFP_ATOMIC); 1902 return 0; 1903} 1904 1905/** 1906 * skb_cow - copy header of skb when it is required 1907 * @skb: buffer to cow 1908 * @headroom: needed headroom 1909 * 1910 * If the skb passed lacks sufficient headroom or its data part 1911 * is shared, data is reallocated. If reallocation fails, an error 1912 * is returned and original skb is not changed. 1913 * 1914 * The result is skb with writable area skb->head...skb->tail 1915 * and at least @headroom of space at head. 1916 */ 1917static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) 1918{ 1919 return __skb_cow(skb, headroom, skb_cloned(skb)); 1920} 1921 1922/** 1923 * skb_cow_head - skb_cow but only making the head writable 1924 * @skb: buffer to cow 1925 * @headroom: needed headroom 1926 * 1927 * This function is identical to skb_cow except that we replace the 1928 * skb_cloned check by skb_header_cloned. It should be used when 1929 * you only need to push on some header and do not need to modify 1930 * the data. 1931 */ 1932static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) 1933{ 1934 return __skb_cow(skb, headroom, skb_header_cloned(skb)); 1935} 1936 1937/** 1938 * skb_padto - pad an skbuff up to a minimal size 1939 * @skb: buffer to pad 1940 * @len: minimal length 1941 * 1942 * Pads up a buffer to ensure the trailing bytes exist and are 1943 * blanked. If the buffer already contains sufficient data it 1944 * is untouched. Otherwise it is extended. Returns zero on 1945 * success. The skb is freed on error. 1946 */ 1947 1948static inline int skb_padto(struct sk_buff *skb, unsigned int len) 1949{ 1950 unsigned int size = skb->len; 1951 if (likely(size >= len)) 1952 return 0; 1953 return skb_pad(skb, len - size); 1954} 1955 1956static inline int skb_add_data(struct sk_buff *skb, 1957 char __user *from, int copy) 1958{ 1959 const int off = skb->len; 1960 1961 if (skb->ip_summed == CHECKSUM_NONE) { 1962 int err = 0; 1963 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy), 1964 copy, 0, &err); 1965 if (!err) { 1966 skb->csum = csum_block_add(skb->csum, csum, off); 1967 return 0; 1968 } 1969 } else if (!copy_from_user(skb_put(skb, copy), from, copy)) 1970 return 0; 1971 1972 __skb_trim(skb, off); 1973 return -EFAULT; 1974} 1975 1976static inline bool skb_can_coalesce(struct sk_buff *skb, int i, 1977 const struct page *page, int off) 1978{ 1979 if (i) { 1980 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1981 1982 return page == skb_frag_page(frag) && 1983 off == frag->page_offset + skb_frag_size(frag); 1984 } 1985 return false; 1986} 1987 1988static inline int __skb_linearize(struct sk_buff *skb) 1989{ 1990 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM; 1991} 1992 1993/** 1994 * skb_linearize - convert paged skb to linear one 1995 * @skb: buffer to linarize 1996 * 1997 * If there is no free memory -ENOMEM is returned, otherwise zero 1998 * is returned and the old skb data released. 1999 */ 2000static inline int skb_linearize(struct sk_buff *skb) 2001{ 2002 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0; 2003} 2004 2005/** 2006 * skb_linearize_cow - make sure skb is linear and writable 2007 * @skb: buffer to process 2008 * 2009 * If there is no free memory -ENOMEM is returned, otherwise zero 2010 * is returned and the old skb data released. 2011 */ 2012static inline int skb_linearize_cow(struct sk_buff *skb) 2013{ 2014 return skb_is_nonlinear(skb) || skb_cloned(skb) ? 2015 __skb_linearize(skb) : 0; 2016} 2017 2018/** 2019 * skb_postpull_rcsum - update checksum for received skb after pull 2020 * @skb: buffer to update 2021 * @start: start of data before pull 2022 * @len: length of data pulled 2023 * 2024 * After doing a pull on a received packet, you need to call this to 2025 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to 2026 * CHECKSUM_NONE so that it can be recomputed from scratch. 2027 */ 2028 2029static inline void skb_postpull_rcsum(struct sk_buff *skb, 2030 const void *start, unsigned int len) 2031{ 2032 if (skb->ip_summed == CHECKSUM_COMPLETE) 2033 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); 2034} 2035 2036unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); 2037 2038/** 2039 * pskb_trim_rcsum - trim received skb and update checksum 2040 * @skb: buffer to trim 2041 * @len: new length 2042 * 2043 * This is exactly the same as pskb_trim except that it ensures the 2044 * checksum of received packets are still valid after the operation. 2045 */ 2046 2047static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) 2048{ 2049 if (likely(len >= skb->len)) 2050 return 0; 2051 if (skb->ip_summed == CHECKSUM_COMPLETE) 2052 skb->ip_summed = CHECKSUM_NONE; 2053 return __pskb_trim(skb, len); 2054} 2055 2056#define skb_queue_walk(queue, skb) \ 2057 for (skb = (queue)->next; \ 2058 skb != (struct sk_buff *)(queue); \ 2059 skb = skb->next) 2060 2061#define skb_queue_walk_safe(queue, skb, tmp) \ 2062 for (skb = (queue)->next, tmp = skb->next; \ 2063 skb != (struct sk_buff *)(queue); \ 2064 skb = tmp, tmp = skb->next) 2065 2066#define skb_queue_walk_from(queue, skb) \ 2067 for (; skb != (struct sk_buff *)(queue); \ 2068 skb = skb->next) 2069 2070#define skb_queue_walk_from_safe(queue, skb, tmp) \ 2071 for (tmp = skb->next; \ 2072 skb != (struct sk_buff *)(queue); \ 2073 skb = tmp, tmp = skb->next) 2074 2075#define skb_queue_reverse_walk(queue, skb) \ 2076 for (skb = (queue)->prev; \ 2077 skb != (struct sk_buff *)(queue); \ 2078 skb = skb->prev) 2079 2080#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ 2081 for (skb = (queue)->prev, tmp = skb->prev; \ 2082 skb != (struct sk_buff *)(queue); \ 2083 skb = tmp, tmp = skb->prev) 2084 2085#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \ 2086 for (tmp = skb->prev; \ 2087 skb != (struct sk_buff *)(queue); \ 2088 skb = tmp, tmp = skb->prev) 2089 2090static inline bool skb_has_frag_list(const struct sk_buff *skb) 2091{ 2092 return skb_shinfo(skb)->frag_list != NULL; 2093} 2094 2095static inline void skb_frag_list_init(struct sk_buff *skb) 2096{ 2097 skb_shinfo(skb)->frag_list = NULL; 2098} 2099 2100static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag) 2101{ 2102 frag->next = skb_shinfo(skb)->frag_list; 2103 skb_shinfo(skb)->frag_list = frag; 2104} 2105 2106#define skb_walk_frags(skb, iter) \ 2107 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next) 2108 2109extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 2110 int *peeked, int *off, int *err); 2111extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 2112 int noblock, int *err); 2113extern unsigned int datagram_poll(struct file *file, struct socket *sock, 2114 struct poll_table_struct *wait); 2115extern int skb_copy_datagram_iovec(const struct sk_buff *from, 2116 int offset, struct iovec *to, 2117 int size); 2118extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, 2119 int hlen, 2120 struct iovec *iov); 2121extern int skb_copy_datagram_from_iovec(struct sk_buff *skb, 2122 int offset, 2123 const struct iovec *from, 2124 int from_offset, 2125 int len); 2126extern int skb_copy_datagram_const_iovec(const struct sk_buff *from, 2127 int offset, 2128 const struct iovec *to, 2129 int to_offset, 2130 int size); 2131extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb); 2132extern void skb_free_datagram_locked(struct sock *sk, 2133 struct sk_buff *skb); 2134extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, 2135 unsigned int flags); 2136extern __wsum skb_checksum(const struct sk_buff *skb, int offset, 2137 int len, __wsum csum); 2138extern int skb_copy_bits(const struct sk_buff *skb, int offset, 2139 void *to, int len); 2140extern int skb_store_bits(struct sk_buff *skb, int offset, 2141 const void *from, int len); 2142extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, 2143 int offset, u8 *to, int len, 2144 __wsum csum); 2145extern int skb_splice_bits(struct sk_buff *skb, 2146 unsigned int offset, 2147 struct pipe_inode_info *pipe, 2148 unsigned int len, 2149 unsigned int flags); 2150extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to); 2151extern void skb_split(struct sk_buff *skb, 2152 struct sk_buff *skb1, const u32 len); 2153extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, 2154 int shiftlen); 2155 2156extern struct sk_buff *skb_segment(struct sk_buff *skb, 2157 netdev_features_t features); 2158 2159static inline void *skb_header_pointer(const struct sk_buff *skb, int offset, 2160 int len, void *buffer) 2161{ 2162 int hlen = skb_headlen(skb); 2163 2164 if (hlen - offset >= len) 2165 return skb->data + offset; 2166 2167 if (skb_copy_bits(skb, offset, buffer, len) < 0) 2168 return NULL; 2169 2170 return buffer; 2171} 2172 2173static inline void skb_copy_from_linear_data(const struct sk_buff *skb, 2174 void *to, 2175 const unsigned int len) 2176{ 2177 memcpy(to, skb->data, len); 2178} 2179 2180static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb, 2181 const int offset, void *to, 2182 const unsigned int len) 2183{ 2184 memcpy(to, skb->data + offset, len); 2185} 2186 2187static inline void skb_copy_to_linear_data(struct sk_buff *skb, 2188 const void *from, 2189 const unsigned int len) 2190{ 2191 memcpy(skb->data, from, len); 2192} 2193 2194static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb, 2195 const int offset, 2196 const void *from, 2197 const unsigned int len) 2198{ 2199 memcpy(skb->data + offset, from, len); 2200} 2201 2202extern void skb_init(void); 2203 2204static inline ktime_t skb_get_ktime(const struct sk_buff *skb) 2205{ 2206 return skb->tstamp; 2207} 2208 2209/** 2210 * skb_get_timestamp - get timestamp from a skb 2211 * @skb: skb to get stamp from 2212 * @stamp: pointer to struct timeval to store stamp in 2213 * 2214 * Timestamps are stored in the skb as offsets to a base timestamp. 2215 * This function converts the offset back to a struct timeval and stores 2216 * it in stamp. 2217 */ 2218static inline void skb_get_timestamp(const struct sk_buff *skb, 2219 struct timeval *stamp) 2220{ 2221 *stamp = ktime_to_timeval(skb->tstamp); 2222} 2223 2224static inline void skb_get_timestampns(const struct sk_buff *skb, 2225 struct timespec *stamp) 2226{ 2227 *stamp = ktime_to_timespec(skb->tstamp); 2228} 2229 2230static inline void __net_timestamp(struct sk_buff *skb) 2231{ 2232 skb->tstamp = ktime_get_real(); 2233} 2234 2235static inline ktime_t net_timedelta(ktime_t t) 2236{ 2237 return ktime_sub(ktime_get_real(), t); 2238} 2239 2240static inline ktime_t net_invalid_timestamp(void) 2241{ 2242 return ktime_set(0, 0); 2243} 2244 2245extern void skb_timestamping_init(void); 2246 2247#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING 2248 2249extern void skb_clone_tx_timestamp(struct sk_buff *skb); 2250extern bool skb_defer_rx_timestamp(struct sk_buff *skb); 2251 2252#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */ 2253 2254static inline void skb_clone_tx_timestamp(struct sk_buff *skb) 2255{ 2256} 2257 2258static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) 2259{ 2260 return false; 2261} 2262 2263#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */ 2264 2265/** 2266 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps 2267 * 2268 * PHY drivers may accept clones of transmitted packets for 2269 * timestamping via their phy_driver.txtstamp method. These drivers 2270 * must call this function to return the skb back to the stack, with 2271 * or without a timestamp. 2272 * 2273 * @skb: clone of the the original outgoing packet 2274 * @hwtstamps: hardware time stamps, may be NULL if not available 2275 * 2276 */ 2277void skb_complete_tx_timestamp(struct sk_buff *skb, 2278 struct skb_shared_hwtstamps *hwtstamps); 2279 2280/** 2281 * skb_tstamp_tx - queue clone of skb with send time stamps 2282 * @orig_skb: the original outgoing packet 2283 * @hwtstamps: hardware time stamps, may be NULL if not available 2284 * 2285 * If the skb has a socket associated, then this function clones the 2286 * skb (thus sharing the actual data and optional structures), stores 2287 * the optional hardware time stamping information (if non NULL) or 2288 * generates a software time stamp (otherwise), then queues the clone 2289 * to the error queue of the socket. Errors are silently ignored. 2290 */ 2291extern void skb_tstamp_tx(struct sk_buff *orig_skb, 2292 struct skb_shared_hwtstamps *hwtstamps); 2293 2294static inline void sw_tx_timestamp(struct sk_buff *skb) 2295{ 2296 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && 2297 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) 2298 skb_tstamp_tx(skb, NULL); 2299} 2300 2301/** 2302 * skb_tx_timestamp() - Driver hook for transmit timestamping 2303 * 2304 * Ethernet MAC Drivers should call this function in their hard_xmit() 2305 * function immediately before giving the sk_buff to the MAC hardware. 2306 * 2307 * @skb: A socket buffer. 2308 */ 2309static inline void skb_tx_timestamp(struct sk_buff *skb) 2310{ 2311 skb_clone_tx_timestamp(skb); 2312 sw_tx_timestamp(skb); 2313} 2314 2315/** 2316 * skb_complete_wifi_ack - deliver skb with wifi status 2317 * 2318 * @skb: the original outgoing packet 2319 * @acked: ack status 2320 * 2321 */ 2322void skb_complete_wifi_ack(struct sk_buff *skb, bool acked); 2323 2324extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len); 2325extern __sum16 __skb_checksum_complete(struct sk_buff *skb); 2326 2327static inline int skb_csum_unnecessary(const struct sk_buff *skb) 2328{ 2329 return skb->ip_summed & CHECKSUM_UNNECESSARY; 2330} 2331 2332/** 2333 * skb_checksum_complete - Calculate checksum of an entire packet 2334 * @skb: packet to process 2335 * 2336 * This function calculates the checksum over the entire packet plus 2337 * the value of skb->csum. The latter can be used to supply the 2338 * checksum of a pseudo header as used by TCP/UDP. It returns the 2339 * checksum. 2340 * 2341 * For protocols that contain complete checksums such as ICMP/TCP/UDP, 2342 * this function can be used to verify that checksum on received 2343 * packets. In that case the function should return zero if the 2344 * checksum is correct. In particular, this function will return zero 2345 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the 2346 * hardware has already verified the correctness of the checksum. 2347 */ 2348static inline __sum16 skb_checksum_complete(struct sk_buff *skb) 2349{ 2350 return skb_csum_unnecessary(skb) ? 2351 0 : __skb_checksum_complete(skb); 2352} 2353 2354#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2355extern void nf_conntrack_destroy(struct nf_conntrack *nfct); 2356static inline void nf_conntrack_put(struct nf_conntrack *nfct) 2357{ 2358 if (nfct && atomic_dec_and_test(&nfct->use)) 2359 nf_conntrack_destroy(nfct); 2360} 2361static inline void nf_conntrack_get(struct nf_conntrack *nfct) 2362{ 2363 if (nfct) 2364 atomic_inc(&nfct->use); 2365} 2366#endif 2367#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2368static inline void nf_conntrack_get_reasm(struct sk_buff *skb) 2369{ 2370 if (skb) 2371 atomic_inc(&skb->users); 2372} 2373static inline void nf_conntrack_put_reasm(struct sk_buff *skb) 2374{ 2375 if (skb) 2376 kfree_skb(skb); 2377} 2378#endif 2379#ifdef CONFIG_BRIDGE_NETFILTER 2380static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge) 2381{ 2382 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use)) 2383 kfree(nf_bridge); 2384} 2385static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge) 2386{ 2387 if (nf_bridge) 2388 atomic_inc(&nf_bridge->use); 2389} 2390#endif /* CONFIG_BRIDGE_NETFILTER */ 2391static inline void nf_reset(struct sk_buff *skb) 2392{ 2393#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2394 nf_conntrack_put(skb->nfct); 2395 skb->nfct = NULL; 2396#endif 2397#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2398 nf_conntrack_put_reasm(skb->nfct_reasm); 2399 skb->nfct_reasm = NULL; 2400#endif 2401#ifdef CONFIG_BRIDGE_NETFILTER 2402 nf_bridge_put(skb->nf_bridge); 2403 skb->nf_bridge = NULL; 2404#endif 2405} 2406 2407/* Note: This doesn't put any conntrack and bridge info in dst. */ 2408static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2409{ 2410#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2411 dst->nfct = src->nfct; 2412 nf_conntrack_get(src->nfct); 2413 dst->nfctinfo = src->nfctinfo; 2414#endif 2415#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2416 dst->nfct_reasm = src->nfct_reasm; 2417 nf_conntrack_get_reasm(src->nfct_reasm); 2418#endif 2419#ifdef CONFIG_BRIDGE_NETFILTER 2420 dst->nf_bridge = src->nf_bridge; 2421 nf_bridge_get(src->nf_bridge); 2422#endif 2423} 2424 2425static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) 2426{ 2427#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 2428 nf_conntrack_put(dst->nfct); 2429#endif 2430#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED 2431 nf_conntrack_put_reasm(dst->nfct_reasm); 2432#endif 2433#ifdef CONFIG_BRIDGE_NETFILTER 2434 nf_bridge_put(dst->nf_bridge); 2435#endif 2436 __nf_copy(dst, src); 2437} 2438 2439#ifdef CONFIG_NETWORK_SECMARK 2440static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2441{ 2442 to->secmark = from->secmark; 2443} 2444 2445static inline void skb_init_secmark(struct sk_buff *skb) 2446{ 2447 skb->secmark = 0; 2448} 2449#else 2450static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from) 2451{ } 2452 2453static inline void skb_init_secmark(struct sk_buff *skb) 2454{ } 2455#endif 2456 2457static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) 2458{ 2459 skb->queue_mapping = queue_mapping; 2460} 2461 2462static inline u16 skb_get_queue_mapping(const struct sk_buff *skb) 2463{ 2464 return skb->queue_mapping; 2465} 2466 2467static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) 2468{ 2469 to->queue_mapping = from->queue_mapping; 2470} 2471 2472static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue) 2473{ 2474 skb->queue_mapping = rx_queue + 1; 2475} 2476 2477static inline u16 skb_get_rx_queue(const struct sk_buff *skb) 2478{ 2479 return skb->queue_mapping - 1; 2480} 2481 2482static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) 2483{ 2484 return skb->queue_mapping != 0; 2485} 2486 2487extern u16 __skb_tx_hash(const struct net_device *dev, 2488 const struct sk_buff *skb, 2489 unsigned int num_tx_queues); 2490 2491#ifdef CONFIG_XFRM 2492static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2493{ 2494 return skb->sp; 2495} 2496#else 2497static inline struct sec_path *skb_sec_path(struct sk_buff *skb) 2498{ 2499 return NULL; 2500} 2501#endif 2502 2503static inline bool skb_is_gso(const struct sk_buff *skb) 2504{ 2505 return skb_shinfo(skb)->gso_size; 2506} 2507 2508static inline bool skb_is_gso_v6(const struct sk_buff *skb) 2509{ 2510 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; 2511} 2512 2513extern void __skb_warn_lro_forwarding(const struct sk_buff *skb); 2514 2515static inline bool skb_warn_if_lro(const struct sk_buff *skb) 2516{ 2517 /* LRO sets gso_size but not gso_type, whereas if GSO is really 2518 * wanted then gso_type will be set. */ 2519 const struct skb_shared_info *shinfo = skb_shinfo(skb); 2520 2521 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 && 2522 unlikely(shinfo->gso_type == 0)) { 2523 __skb_warn_lro_forwarding(skb); 2524 return true; 2525 } 2526 return false; 2527} 2528 2529static inline void skb_forward_csum(struct sk_buff *skb) 2530{ 2531 /* Unfortunately we don't support this one. Any brave souls? */ 2532 if (skb->ip_summed == CHECKSUM_COMPLETE) 2533 skb->ip_summed = CHECKSUM_NONE; 2534} 2535 2536/** 2537 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE 2538 * @skb: skb to check 2539 * 2540 * fresh skbs have their ip_summed set to CHECKSUM_NONE. 2541 * Instead of forcing ip_summed to CHECKSUM_NONE, we can 2542 * use this helper, to document places where we make this assertion. 2543 */ 2544static inline void skb_checksum_none_assert(const struct sk_buff *skb) 2545{ 2546#ifdef DEBUG 2547 BUG_ON(skb->ip_summed != CHECKSUM_NONE); 2548#endif 2549} 2550 2551bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2552 2553static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size) 2554{ 2555 if (irqs_disabled()) 2556 return false; 2557 2558 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) 2559 return false; 2560 2561 if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) 2562 return false; 2563 2564 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 2565 if (skb_end_offset(skb) < skb_size) 2566 return false; 2567 2568 if (skb_shared(skb) || skb_cloned(skb)) 2569 return false; 2570 2571 return true; 2572} 2573 2574/** 2575 * skb_head_is_locked - Determine if the skb->head is locked down 2576 * @skb: skb to check 2577 * 2578 * The head on skbs build around a head frag can be removed if they are 2579 * not cloned. This function returns true if the skb head is locked down 2580 * due to either being allocated via kmalloc, or by being a clone with 2581 * multiple references to the head. 2582 */ 2583static inline bool skb_head_is_locked(const struct sk_buff *skb) 2584{ 2585 return !skb->head_frag || skb_cloned(skb); 2586} 2587#endif /* __KERNEL__ */ 2588#endif /* _LINUX_SKBUFF_H */