Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Definitions for the 'struct sk_buff' memory handlers.
3 *
4 * Authors:
5 * Alan Cox, <gw4pts@gw4pts.ampr.org>
6 * Florian La Roche, <rzsfl@rz.uni-sb.de>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#ifndef _LINUX_SKBUFF_H
15#define _LINUX_SKBUFF_H
16
17#include <linux/kernel.h>
18#include <linux/kmemcheck.h>
19#include <linux/compiler.h>
20#include <linux/time.h>
21#include <linux/cache.h>
22
23#include <asm/atomic.h>
24#include <asm/types.h>
25#include <linux/spinlock.h>
26#include <linux/net.h>
27#include <linux/textsearch.h>
28#include <net/checksum.h>
29#include <linux/rcupdate.h>
30#include <linux/dmaengine.h>
31#include <linux/hrtimer.h>
32
33/* Don't change this without changing skb_csum_unnecessary! */
34#define CHECKSUM_NONE 0
35#define CHECKSUM_UNNECESSARY 1
36#define CHECKSUM_COMPLETE 2
37#define CHECKSUM_PARTIAL 3
38
39#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
40 ~(SMP_CACHE_BYTES - 1))
41#define SKB_WITH_OVERHEAD(X) \
42 ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
43#define SKB_MAX_ORDER(X, ORDER) \
44 SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
45#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
46#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
47
48/* A. Checksumming of received packets by device.
49 *
50 * NONE: device failed to checksum this packet.
51 * skb->csum is undefined.
52 *
53 * UNNECESSARY: device parsed packet and wouldbe verified checksum.
54 * skb->csum is undefined.
55 * It is bad option, but, unfortunately, many of vendors do this.
56 * Apparently with secret goal to sell you new device, when you
57 * will add new protocol to your host. F.e. IPv6. 8)
58 *
59 * COMPLETE: the most generic way. Device supplied checksum of _all_
60 * the packet as seen by netif_rx in skb->csum.
61 * NOTE: Even if device supports only some protocols, but
62 * is able to produce some skb->csum, it MUST use COMPLETE,
63 * not UNNECESSARY.
64 *
65 * PARTIAL: identical to the case for output below. This may occur
66 * on a packet received directly from another Linux OS, e.g.,
67 * a virtualised Linux kernel on the same host. The packet can
68 * be treated in the same way as UNNECESSARY except that on
69 * output (i.e., forwarding) the checksum must be filled in
70 * by the OS or the hardware.
71 *
72 * B. Checksumming on output.
73 *
74 * NONE: skb is checksummed by protocol or csum is not required.
75 *
76 * PARTIAL: device is required to csum packet as seen by hard_start_xmit
77 * from skb->csum_start to the end and to record the checksum
78 * at skb->csum_start + skb->csum_offset.
79 *
80 * Device must show its capabilities in dev->features, set
81 * at device setup time.
82 * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
83 * everything.
84 * NETIF_F_NO_CSUM - loopback or reliable single hop media.
85 * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
86 * TCP/UDP over IPv4. Sigh. Vendors like this
87 * way by an unknown reason. Though, see comment above
88 * about CHECKSUM_UNNECESSARY. 8)
89 * NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
90 *
91 * Any questions? No questions, good. --ANK
92 */
93
94struct net_device;
95struct scatterlist;
96struct pipe_inode_info;
97
98#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
99struct nf_conntrack {
100 atomic_t use;
101};
102#endif
103
104#ifdef CONFIG_BRIDGE_NETFILTER
105struct nf_bridge_info {
106 atomic_t use;
107 struct net_device *physindev;
108 struct net_device *physoutdev;
109 unsigned int mask;
110 unsigned long data[32 / sizeof(unsigned long)];
111};
112#endif
113
114struct sk_buff_head {
115 /* These two members must be first. */
116 struct sk_buff *next;
117 struct sk_buff *prev;
118
119 __u32 qlen;
120 spinlock_t lock;
121};
122
123struct sk_buff;
124
125/* To allow 64K frame to be packed as single skb without frag_list. Since
126 * GRO uses frags we allocate at least 16 regardless of page size.
127 */
128#if (65536/PAGE_SIZE + 2) < 16
129#define MAX_SKB_FRAGS 16UL
130#else
131#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
132#endif
133
134typedef struct skb_frag_struct skb_frag_t;
135
136struct skb_frag_struct {
137 struct page *page;
138#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
139 __u32 page_offset;
140 __u32 size;
141#else
142 __u16 page_offset;
143 __u16 size;
144#endif
145};
146
147#define HAVE_HW_TIME_STAMP
148
149/**
150 * struct skb_shared_hwtstamps - hardware time stamps
151 * @hwtstamp: hardware time stamp transformed into duration
152 * since arbitrary point in time
153 * @syststamp: hwtstamp transformed to system time base
154 *
155 * Software time stamps generated by ktime_get_real() are stored in
156 * skb->tstamp. The relation between the different kinds of time
157 * stamps is as follows:
158 *
159 * syststamp and tstamp can be compared against each other in
160 * arbitrary combinations. The accuracy of a
161 * syststamp/tstamp/"syststamp from other device" comparison is
162 * limited by the accuracy of the transformation into system time
163 * base. This depends on the device driver and its underlying
164 * hardware.
165 *
166 * hwtstamps can only be compared against other hwtstamps from
167 * the same device.
168 *
169 * This structure is attached to packets as part of the
170 * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
171 */
172struct skb_shared_hwtstamps {
173 ktime_t hwtstamp;
174 ktime_t syststamp;
175};
176
177/* Definitions for tx_flags in struct skb_shared_info */
178enum {
179 /* generate hardware time stamp */
180 SKBTX_HW_TSTAMP = 1 << 0,
181
182 /* generate software time stamp */
183 SKBTX_SW_TSTAMP = 1 << 1,
184
185 /* device driver is going to provide hardware time stamp */
186 SKBTX_IN_PROGRESS = 1 << 2,
187
188 /* ensure the originating sk reference is available on driver level */
189 SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
190};
191
192/* This data is invariant across clones and lives at
193 * the end of the header data, ie. at skb->end.
194 */
195struct skb_shared_info {
196 unsigned short nr_frags;
197 unsigned short gso_size;
198 /* Warning: this field is not always filled in (UFO)! */
199 unsigned short gso_segs;
200 unsigned short gso_type;
201 __be32 ip6_frag_id;
202 __u8 tx_flags;
203 struct sk_buff *frag_list;
204 struct skb_shared_hwtstamps hwtstamps;
205
206 /*
207 * Warning : all fields before dataref are cleared in __alloc_skb()
208 */
209 atomic_t dataref;
210
211 /* Intermediate layers must ensure that destructor_arg
212 * remains valid until skb destructor */
213 void * destructor_arg;
214 /* must be last field, see pskb_expand_head() */
215 skb_frag_t frags[MAX_SKB_FRAGS];
216};
217
218/* We divide dataref into two halves. The higher 16 bits hold references
219 * to the payload part of skb->data. The lower 16 bits hold references to
220 * the entire skb->data. A clone of a headerless skb holds the length of
221 * the header in skb->hdr_len.
222 *
223 * All users must obey the rule that the skb->data reference count must be
224 * greater than or equal to the payload reference count.
225 *
226 * Holding a reference to the payload part means that the user does not
227 * care about modifications to the header part of skb->data.
228 */
229#define SKB_DATAREF_SHIFT 16
230#define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
231
232
233enum {
234 SKB_FCLONE_UNAVAILABLE,
235 SKB_FCLONE_ORIG,
236 SKB_FCLONE_CLONE,
237};
238
239enum {
240 SKB_GSO_TCPV4 = 1 << 0,
241 SKB_GSO_UDP = 1 << 1,
242
243 /* This indicates the skb is from an untrusted source. */
244 SKB_GSO_DODGY = 1 << 2,
245
246 /* This indicates the tcp segment has CWR set. */
247 SKB_GSO_TCP_ECN = 1 << 3,
248
249 SKB_GSO_TCPV6 = 1 << 4,
250
251 SKB_GSO_FCOE = 1 << 5,
252};
253
254#if BITS_PER_LONG > 32
255#define NET_SKBUFF_DATA_USES_OFFSET 1
256#endif
257
258#ifdef NET_SKBUFF_DATA_USES_OFFSET
259typedef unsigned int sk_buff_data_t;
260#else
261typedef unsigned char *sk_buff_data_t;
262#endif
263
264#if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
265 defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
266#define NET_SKBUFF_NF_DEFRAG_NEEDED 1
267#endif
268
269/**
270 * struct sk_buff - socket buffer
271 * @next: Next buffer in list
272 * @prev: Previous buffer in list
273 * @sk: Socket we are owned by
274 * @tstamp: Time we arrived
275 * @dev: Device we arrived on/are leaving by
276 * @transport_header: Transport layer header
277 * @network_header: Network layer header
278 * @mac_header: Link layer header
279 * @_skb_refdst: destination entry (with norefcount bit)
280 * @sp: the security path, used for xfrm
281 * @cb: Control buffer. Free for use by every layer. Put private vars here
282 * @len: Length of actual data
283 * @data_len: Data length
284 * @mac_len: Length of link layer header
285 * @hdr_len: writable header length of cloned skb
286 * @csum: Checksum (must include start/offset pair)
287 * @csum_start: Offset from skb->head where checksumming should start
288 * @csum_offset: Offset from csum_start where checksum should be stored
289 * @local_df: allow local fragmentation
290 * @cloned: Head may be cloned (check refcnt to be sure)
291 * @nohdr: Payload reference only, must not modify header
292 * @pkt_type: Packet class
293 * @fclone: skbuff clone status
294 * @ip_summed: Driver fed us an IP checksum
295 * @priority: Packet queueing priority
296 * @users: User count - see {datagram,tcp}.c
297 * @protocol: Packet protocol from driver
298 * @truesize: Buffer size
299 * @head: Head of buffer
300 * @data: Data head pointer
301 * @tail: Tail pointer
302 * @end: End pointer
303 * @destructor: Destruct function
304 * @mark: Generic packet mark
305 * @nfct: Associated connection, if any
306 * @ipvs_property: skbuff is owned by ipvs
307 * @peeked: this packet has been seen already, so stats have been
308 * done for it, don't do them again
309 * @nf_trace: netfilter packet trace flag
310 * @nfctinfo: Relationship of this skb to the connection
311 * @nfct_reasm: netfilter conntrack re-assembly pointer
312 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
313 * @skb_iif: ifindex of device we arrived on
314 * @rxhash: the packet hash computed on receive
315 * @queue_mapping: Queue mapping for multiqueue devices
316 * @tc_index: Traffic control index
317 * @tc_verd: traffic control verdict
318 * @ndisc_nodetype: router type (from link layer)
319 * @dma_cookie: a cookie to one of several possible DMA operations
320 * done by skb DMA functions
321 * @secmark: security marking
322 * @vlan_tci: vlan tag control information
323 */
324
325struct sk_buff {
326 /* These two members must be first. */
327 struct sk_buff *next;
328 struct sk_buff *prev;
329
330 ktime_t tstamp;
331
332 struct sock *sk;
333 struct net_device *dev;
334
335 /*
336 * This is the control buffer. It is free to use for every
337 * layer. Please put your private variables there. If you
338 * want to keep them across layers you have to do a skb_clone()
339 * first. This is owned by whoever has the skb queued ATM.
340 */
341 char cb[48] __aligned(8);
342
343 unsigned long _skb_refdst;
344#ifdef CONFIG_XFRM
345 struct sec_path *sp;
346#endif
347 unsigned int len,
348 data_len;
349 __u16 mac_len,
350 hdr_len;
351 union {
352 __wsum csum;
353 struct {
354 __u16 csum_start;
355 __u16 csum_offset;
356 };
357 };
358 __u32 priority;
359 kmemcheck_bitfield_begin(flags1);
360 __u8 local_df:1,
361 cloned:1,
362 ip_summed:2,
363 nohdr:1,
364 nfctinfo:3;
365 __u8 pkt_type:3,
366 fclone:2,
367 ipvs_property:1,
368 peeked:1,
369 nf_trace:1;
370 kmemcheck_bitfield_end(flags1);
371 __be16 protocol;
372
373 void (*destructor)(struct sk_buff *skb);
374#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
375 struct nf_conntrack *nfct;
376#endif
377#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
378 struct sk_buff *nfct_reasm;
379#endif
380#ifdef CONFIG_BRIDGE_NETFILTER
381 struct nf_bridge_info *nf_bridge;
382#endif
383
384 int skb_iif;
385#ifdef CONFIG_NET_SCHED
386 __u16 tc_index; /* traffic control index */
387#ifdef CONFIG_NET_CLS_ACT
388 __u16 tc_verd; /* traffic control verdict */
389#endif
390#endif
391
392 __u32 rxhash;
393
394 __u16 queue_mapping;
395 kmemcheck_bitfield_begin(flags2);
396#ifdef CONFIG_IPV6_NDISC_NODETYPE
397 __u8 ndisc_nodetype:2;
398#endif
399 __u8 ooo_okay:1;
400 kmemcheck_bitfield_end(flags2);
401
402 /* 0/13 bit hole */
403
404#ifdef CONFIG_NET_DMA
405 dma_cookie_t dma_cookie;
406#endif
407#ifdef CONFIG_NETWORK_SECMARK
408 __u32 secmark;
409#endif
410 union {
411 __u32 mark;
412 __u32 dropcount;
413 };
414
415 __u16 vlan_tci;
416
417 sk_buff_data_t transport_header;
418 sk_buff_data_t network_header;
419 sk_buff_data_t mac_header;
420 /* These elements must be at the end, see alloc_skb() for details. */
421 sk_buff_data_t tail;
422 sk_buff_data_t end;
423 unsigned char *head,
424 *data;
425 unsigned int truesize;
426 atomic_t users;
427};
428
429#ifdef __KERNEL__
430/*
431 * Handling routines are only of interest to the kernel
432 */
433#include <linux/slab.h>
434
435#include <asm/system.h>
436
437/*
438 * skb might have a dst pointer attached, refcounted or not.
439 * _skb_refdst low order bit is set if refcount was _not_ taken
440 */
441#define SKB_DST_NOREF 1UL
442#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
443
444/**
445 * skb_dst - returns skb dst_entry
446 * @skb: buffer
447 *
448 * Returns skb dst_entry, regardless of reference taken or not.
449 */
450static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
451{
452 /* If refdst was not refcounted, check we still are in a
453 * rcu_read_lock section
454 */
455 WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
456 !rcu_read_lock_held() &&
457 !rcu_read_lock_bh_held());
458 return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
459}
460
461/**
462 * skb_dst_set - sets skb dst
463 * @skb: buffer
464 * @dst: dst entry
465 *
466 * Sets skb dst, assuming a reference was taken on dst and should
467 * be released by skb_dst_drop()
468 */
469static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
470{
471 skb->_skb_refdst = (unsigned long)dst;
472}
473
474extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst);
475
476/**
477 * skb_dst_is_noref - Test if skb dst isn't refcounted
478 * @skb: buffer
479 */
480static inline bool skb_dst_is_noref(const struct sk_buff *skb)
481{
482 return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
483}
484
485static inline struct rtable *skb_rtable(const struct sk_buff *skb)
486{
487 return (struct rtable *)skb_dst(skb);
488}
489
490extern void kfree_skb(struct sk_buff *skb);
491extern void consume_skb(struct sk_buff *skb);
492extern void __kfree_skb(struct sk_buff *skb);
493extern struct sk_buff *__alloc_skb(unsigned int size,
494 gfp_t priority, int fclone, int node);
495static inline struct sk_buff *alloc_skb(unsigned int size,
496 gfp_t priority)
497{
498 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
499}
500
501static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
502 gfp_t priority)
503{
504 return __alloc_skb(size, priority, 1, NUMA_NO_NODE);
505}
506
507extern bool skb_recycle_check(struct sk_buff *skb, int skb_size);
508
509extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
510extern struct sk_buff *skb_clone(struct sk_buff *skb,
511 gfp_t priority);
512extern struct sk_buff *skb_copy(const struct sk_buff *skb,
513 gfp_t priority);
514extern struct sk_buff *pskb_copy(struct sk_buff *skb,
515 gfp_t gfp_mask);
516extern int pskb_expand_head(struct sk_buff *skb,
517 int nhead, int ntail,
518 gfp_t gfp_mask);
519extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
520 unsigned int headroom);
521extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
522 int newheadroom, int newtailroom,
523 gfp_t priority);
524extern int skb_to_sgvec(struct sk_buff *skb,
525 struct scatterlist *sg, int offset,
526 int len);
527extern int skb_cow_data(struct sk_buff *skb, int tailbits,
528 struct sk_buff **trailer);
529extern int skb_pad(struct sk_buff *skb, int pad);
530#define dev_kfree_skb(a) consume_skb(a)
531
532extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
533 int getfrag(void *from, char *to, int offset,
534 int len,int odd, struct sk_buff *skb),
535 void *from, int length);
536
537struct skb_seq_state {
538 __u32 lower_offset;
539 __u32 upper_offset;
540 __u32 frag_idx;
541 __u32 stepped_offset;
542 struct sk_buff *root_skb;
543 struct sk_buff *cur_skb;
544 __u8 *frag_data;
545};
546
547extern void skb_prepare_seq_read(struct sk_buff *skb,
548 unsigned int from, unsigned int to,
549 struct skb_seq_state *st);
550extern unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
551 struct skb_seq_state *st);
552extern void skb_abort_seq_read(struct skb_seq_state *st);
553
554extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
555 unsigned int to, struct ts_config *config,
556 struct ts_state *state);
557
558extern __u32 __skb_get_rxhash(struct sk_buff *skb);
559static inline __u32 skb_get_rxhash(struct sk_buff *skb)
560{
561 if (!skb->rxhash)
562 skb->rxhash = __skb_get_rxhash(skb);
563
564 return skb->rxhash;
565}
566
567#ifdef NET_SKBUFF_DATA_USES_OFFSET
568static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
569{
570 return skb->head + skb->end;
571}
572#else
573static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
574{
575 return skb->end;
576}
577#endif
578
579/* Internal */
580#define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
581
582static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
583{
584 return &skb_shinfo(skb)->hwtstamps;
585}
586
587/**
588 * skb_queue_empty - check if a queue is empty
589 * @list: queue head
590 *
591 * Returns true if the queue is empty, false otherwise.
592 */
593static inline int skb_queue_empty(const struct sk_buff_head *list)
594{
595 return list->next == (struct sk_buff *)list;
596}
597
598/**
599 * skb_queue_is_last - check if skb is the last entry in the queue
600 * @list: queue head
601 * @skb: buffer
602 *
603 * Returns true if @skb is the last buffer on the list.
604 */
605static inline bool skb_queue_is_last(const struct sk_buff_head *list,
606 const struct sk_buff *skb)
607{
608 return skb->next == (struct sk_buff *)list;
609}
610
611/**
612 * skb_queue_is_first - check if skb is the first entry in the queue
613 * @list: queue head
614 * @skb: buffer
615 *
616 * Returns true if @skb is the first buffer on the list.
617 */
618static inline bool skb_queue_is_first(const struct sk_buff_head *list,
619 const struct sk_buff *skb)
620{
621 return skb->prev == (struct sk_buff *)list;
622}
623
624/**
625 * skb_queue_next - return the next packet in the queue
626 * @list: queue head
627 * @skb: current buffer
628 *
629 * Return the next packet in @list after @skb. It is only valid to
630 * call this if skb_queue_is_last() evaluates to false.
631 */
632static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
633 const struct sk_buff *skb)
634{
635 /* This BUG_ON may seem severe, but if we just return then we
636 * are going to dereference garbage.
637 */
638 BUG_ON(skb_queue_is_last(list, skb));
639 return skb->next;
640}
641
642/**
643 * skb_queue_prev - return the prev packet in the queue
644 * @list: queue head
645 * @skb: current buffer
646 *
647 * Return the prev packet in @list before @skb. It is only valid to
648 * call this if skb_queue_is_first() evaluates to false.
649 */
650static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
651 const struct sk_buff *skb)
652{
653 /* This BUG_ON may seem severe, but if we just return then we
654 * are going to dereference garbage.
655 */
656 BUG_ON(skb_queue_is_first(list, skb));
657 return skb->prev;
658}
659
660/**
661 * skb_get - reference buffer
662 * @skb: buffer to reference
663 *
664 * Makes another reference to a socket buffer and returns a pointer
665 * to the buffer.
666 */
667static inline struct sk_buff *skb_get(struct sk_buff *skb)
668{
669 atomic_inc(&skb->users);
670 return skb;
671}
672
673/*
674 * If users == 1, we are the only owner and are can avoid redundant
675 * atomic change.
676 */
677
678/**
679 * skb_cloned - is the buffer a clone
680 * @skb: buffer to check
681 *
682 * Returns true if the buffer was generated with skb_clone() and is
683 * one of multiple shared copies of the buffer. Cloned buffers are
684 * shared data so must not be written to under normal circumstances.
685 */
686static inline int skb_cloned(const struct sk_buff *skb)
687{
688 return skb->cloned &&
689 (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
690}
691
692/**
693 * skb_header_cloned - is the header a clone
694 * @skb: buffer to check
695 *
696 * Returns true if modifying the header part of the buffer requires
697 * the data to be copied.
698 */
699static inline int skb_header_cloned(const struct sk_buff *skb)
700{
701 int dataref;
702
703 if (!skb->cloned)
704 return 0;
705
706 dataref = atomic_read(&skb_shinfo(skb)->dataref);
707 dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
708 return dataref != 1;
709}
710
711/**
712 * skb_header_release - release reference to header
713 * @skb: buffer to operate on
714 *
715 * Drop a reference to the header part of the buffer. This is done
716 * by acquiring a payload reference. You must not read from the header
717 * part of skb->data after this.
718 */
719static inline void skb_header_release(struct sk_buff *skb)
720{
721 BUG_ON(skb->nohdr);
722 skb->nohdr = 1;
723 atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
724}
725
726/**
727 * skb_shared - is the buffer shared
728 * @skb: buffer to check
729 *
730 * Returns true if more than one person has a reference to this
731 * buffer.
732 */
733static inline int skb_shared(const struct sk_buff *skb)
734{
735 return atomic_read(&skb->users) != 1;
736}
737
738/**
739 * skb_share_check - check if buffer is shared and if so clone it
740 * @skb: buffer to check
741 * @pri: priority for memory allocation
742 *
743 * If the buffer is shared the buffer is cloned and the old copy
744 * drops a reference. A new clone with a single reference is returned.
745 * If the buffer is not shared the original buffer is returned. When
746 * being called from interrupt status or with spinlocks held pri must
747 * be GFP_ATOMIC.
748 *
749 * NULL is returned on a memory allocation failure.
750 */
751static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
752 gfp_t pri)
753{
754 might_sleep_if(pri & __GFP_WAIT);
755 if (skb_shared(skb)) {
756 struct sk_buff *nskb = skb_clone(skb, pri);
757 kfree_skb(skb);
758 skb = nskb;
759 }
760 return skb;
761}
762
763/*
764 * Copy shared buffers into a new sk_buff. We effectively do COW on
765 * packets to handle cases where we have a local reader and forward
766 * and a couple of other messy ones. The normal one is tcpdumping
767 * a packet thats being forwarded.
768 */
769
770/**
771 * skb_unshare - make a copy of a shared buffer
772 * @skb: buffer to check
773 * @pri: priority for memory allocation
774 *
775 * If the socket buffer is a clone then this function creates a new
776 * copy of the data, drops a reference count on the old copy and returns
777 * the new copy with the reference count at 1. If the buffer is not a clone
778 * the original buffer is returned. When called with a spinlock held or
779 * from interrupt state @pri must be %GFP_ATOMIC
780 *
781 * %NULL is returned on a memory allocation failure.
782 */
783static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
784 gfp_t pri)
785{
786 might_sleep_if(pri & __GFP_WAIT);
787 if (skb_cloned(skb)) {
788 struct sk_buff *nskb = skb_copy(skb, pri);
789 kfree_skb(skb); /* Free our shared copy */
790 skb = nskb;
791 }
792 return skb;
793}
794
795/**
796 * skb_peek - peek at the head of an &sk_buff_head
797 * @list_: list to peek at
798 *
799 * Peek an &sk_buff. Unlike most other operations you _MUST_
800 * be careful with this one. A peek leaves the buffer on the
801 * list and someone else may run off with it. You must hold
802 * the appropriate locks or have a private queue to do this.
803 *
804 * Returns %NULL for an empty list or a pointer to the head element.
805 * The reference count is not incremented and the reference is therefore
806 * volatile. Use with caution.
807 */
808static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
809{
810 struct sk_buff *list = ((struct sk_buff *)list_)->next;
811 if (list == (struct sk_buff *)list_)
812 list = NULL;
813 return list;
814}
815
816/**
817 * skb_peek_tail - peek at the tail of an &sk_buff_head
818 * @list_: list to peek at
819 *
820 * Peek an &sk_buff. Unlike most other operations you _MUST_
821 * be careful with this one. A peek leaves the buffer on the
822 * list and someone else may run off with it. You must hold
823 * the appropriate locks or have a private queue to do this.
824 *
825 * Returns %NULL for an empty list or a pointer to the tail element.
826 * The reference count is not incremented and the reference is therefore
827 * volatile. Use with caution.
828 */
829static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
830{
831 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
832 if (list == (struct sk_buff *)list_)
833 list = NULL;
834 return list;
835}
836
837/**
838 * skb_queue_len - get queue length
839 * @list_: list to measure
840 *
841 * Return the length of an &sk_buff queue.
842 */
843static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
844{
845 return list_->qlen;
846}
847
848/**
849 * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
850 * @list: queue to initialize
851 *
852 * This initializes only the list and queue length aspects of
853 * an sk_buff_head object. This allows to initialize the list
854 * aspects of an sk_buff_head without reinitializing things like
855 * the spinlock. It can also be used for on-stack sk_buff_head
856 * objects where the spinlock is known to not be used.
857 */
858static inline void __skb_queue_head_init(struct sk_buff_head *list)
859{
860 list->prev = list->next = (struct sk_buff *)list;
861 list->qlen = 0;
862}
863
864/*
865 * This function creates a split out lock class for each invocation;
866 * this is needed for now since a whole lot of users of the skb-queue
867 * infrastructure in drivers have different locking usage (in hardirq)
868 * than the networking core (in softirq only). In the long run either the
869 * network layer or drivers should need annotation to consolidate the
870 * main types of usage into 3 classes.
871 */
872static inline void skb_queue_head_init(struct sk_buff_head *list)
873{
874 spin_lock_init(&list->lock);
875 __skb_queue_head_init(list);
876}
877
878static inline void skb_queue_head_init_class(struct sk_buff_head *list,
879 struct lock_class_key *class)
880{
881 skb_queue_head_init(list);
882 lockdep_set_class(&list->lock, class);
883}
884
885/*
886 * Insert an sk_buff on a list.
887 *
888 * The "__skb_xxxx()" functions are the non-atomic ones that
889 * can only be called with interrupts disabled.
890 */
891extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
892static inline void __skb_insert(struct sk_buff *newsk,
893 struct sk_buff *prev, struct sk_buff *next,
894 struct sk_buff_head *list)
895{
896 newsk->next = next;
897 newsk->prev = prev;
898 next->prev = prev->next = newsk;
899 list->qlen++;
900}
901
902static inline void __skb_queue_splice(const struct sk_buff_head *list,
903 struct sk_buff *prev,
904 struct sk_buff *next)
905{
906 struct sk_buff *first = list->next;
907 struct sk_buff *last = list->prev;
908
909 first->prev = prev;
910 prev->next = first;
911
912 last->next = next;
913 next->prev = last;
914}
915
916/**
917 * skb_queue_splice - join two skb lists, this is designed for stacks
918 * @list: the new list to add
919 * @head: the place to add it in the first list
920 */
921static inline void skb_queue_splice(const struct sk_buff_head *list,
922 struct sk_buff_head *head)
923{
924 if (!skb_queue_empty(list)) {
925 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
926 head->qlen += list->qlen;
927 }
928}
929
930/**
931 * skb_queue_splice - join two skb lists and reinitialise the emptied list
932 * @list: the new list to add
933 * @head: the place to add it in the first list
934 *
935 * The list at @list is reinitialised
936 */
937static inline void skb_queue_splice_init(struct sk_buff_head *list,
938 struct sk_buff_head *head)
939{
940 if (!skb_queue_empty(list)) {
941 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
942 head->qlen += list->qlen;
943 __skb_queue_head_init(list);
944 }
945}
946
947/**
948 * skb_queue_splice_tail - join two skb lists, each list being a queue
949 * @list: the new list to add
950 * @head: the place to add it in the first list
951 */
952static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
953 struct sk_buff_head *head)
954{
955 if (!skb_queue_empty(list)) {
956 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
957 head->qlen += list->qlen;
958 }
959}
960
961/**
962 * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
963 * @list: the new list to add
964 * @head: the place to add it in the first list
965 *
966 * Each of the lists is a queue.
967 * The list at @list is reinitialised
968 */
969static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
970 struct sk_buff_head *head)
971{
972 if (!skb_queue_empty(list)) {
973 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
974 head->qlen += list->qlen;
975 __skb_queue_head_init(list);
976 }
977}
978
979/**
980 * __skb_queue_after - queue a buffer at the list head
981 * @list: list to use
982 * @prev: place after this buffer
983 * @newsk: buffer to queue
984 *
985 * Queue a buffer int the middle of a list. This function takes no locks
986 * and you must therefore hold required locks before calling it.
987 *
988 * A buffer cannot be placed on two lists at the same time.
989 */
990static inline void __skb_queue_after(struct sk_buff_head *list,
991 struct sk_buff *prev,
992 struct sk_buff *newsk)
993{
994 __skb_insert(newsk, prev, prev->next, list);
995}
996
997extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
998 struct sk_buff_head *list);
999
1000static inline void __skb_queue_before(struct sk_buff_head *list,
1001 struct sk_buff *next,
1002 struct sk_buff *newsk)
1003{
1004 __skb_insert(newsk, next->prev, next, list);
1005}
1006
1007/**
1008 * __skb_queue_head - queue a buffer at the list head
1009 * @list: list to use
1010 * @newsk: buffer to queue
1011 *
1012 * Queue a buffer at the start of a list. This function takes no locks
1013 * and you must therefore hold required locks before calling it.
1014 *
1015 * A buffer cannot be placed on two lists at the same time.
1016 */
1017extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
1018static inline void __skb_queue_head(struct sk_buff_head *list,
1019 struct sk_buff *newsk)
1020{
1021 __skb_queue_after(list, (struct sk_buff *)list, newsk);
1022}
1023
1024/**
1025 * __skb_queue_tail - queue a buffer at the list tail
1026 * @list: list to use
1027 * @newsk: buffer to queue
1028 *
1029 * Queue a buffer at the end of a list. This function takes no locks
1030 * and you must therefore hold required locks before calling it.
1031 *
1032 * A buffer cannot be placed on two lists at the same time.
1033 */
1034extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
1035static inline void __skb_queue_tail(struct sk_buff_head *list,
1036 struct sk_buff *newsk)
1037{
1038 __skb_queue_before(list, (struct sk_buff *)list, newsk);
1039}
1040
1041/*
1042 * remove sk_buff from list. _Must_ be called atomically, and with
1043 * the list known..
1044 */
1045extern void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
1046static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1047{
1048 struct sk_buff *next, *prev;
1049
1050 list->qlen--;
1051 next = skb->next;
1052 prev = skb->prev;
1053 skb->next = skb->prev = NULL;
1054 next->prev = prev;
1055 prev->next = next;
1056}
1057
1058/**
1059 * __skb_dequeue - remove from the head of the queue
1060 * @list: list to dequeue from
1061 *
1062 * Remove the head of the list. This function does not take any locks
1063 * so must be used with appropriate locks held only. The head item is
1064 * returned or %NULL if the list is empty.
1065 */
1066extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
1067static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1068{
1069 struct sk_buff *skb = skb_peek(list);
1070 if (skb)
1071 __skb_unlink(skb, list);
1072 return skb;
1073}
1074
1075/**
1076 * __skb_dequeue_tail - remove from the tail of the queue
1077 * @list: list to dequeue from
1078 *
1079 * Remove the tail of the list. This function does not take any locks
1080 * so must be used with appropriate locks held only. The tail item is
1081 * returned or %NULL if the list is empty.
1082 */
1083extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
1084static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1085{
1086 struct sk_buff *skb = skb_peek_tail(list);
1087 if (skb)
1088 __skb_unlink(skb, list);
1089 return skb;
1090}
1091
1092
1093static inline int skb_is_nonlinear(const struct sk_buff *skb)
1094{
1095 return skb->data_len;
1096}
1097
1098static inline unsigned int skb_headlen(const struct sk_buff *skb)
1099{
1100 return skb->len - skb->data_len;
1101}
1102
1103static inline int skb_pagelen(const struct sk_buff *skb)
1104{
1105 int i, len = 0;
1106
1107 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1108 len += skb_shinfo(skb)->frags[i].size;
1109 return len + skb_headlen(skb);
1110}
1111
1112static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1113 struct page *page, int off, int size)
1114{
1115 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1116
1117 frag->page = page;
1118 frag->page_offset = off;
1119 frag->size = size;
1120 skb_shinfo(skb)->nr_frags = i + 1;
1121}
1122
1123extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1124 int off, int size);
1125
1126#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
1127#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb))
1128#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
1129
1130#ifdef NET_SKBUFF_DATA_USES_OFFSET
1131static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1132{
1133 return skb->head + skb->tail;
1134}
1135
1136static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1137{
1138 skb->tail = skb->data - skb->head;
1139}
1140
1141static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1142{
1143 skb_reset_tail_pointer(skb);
1144 skb->tail += offset;
1145}
1146#else /* NET_SKBUFF_DATA_USES_OFFSET */
1147static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1148{
1149 return skb->tail;
1150}
1151
1152static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1153{
1154 skb->tail = skb->data;
1155}
1156
1157static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1158{
1159 skb->tail = skb->data + offset;
1160}
1161
1162#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1163
1164/*
1165 * Add data to an sk_buff
1166 */
1167extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
1168static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1169{
1170 unsigned char *tmp = skb_tail_pointer(skb);
1171 SKB_LINEAR_ASSERT(skb);
1172 skb->tail += len;
1173 skb->len += len;
1174 return tmp;
1175}
1176
1177extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
1178static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1179{
1180 skb->data -= len;
1181 skb->len += len;
1182 return skb->data;
1183}
1184
1185extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
1186static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1187{
1188 skb->len -= len;
1189 BUG_ON(skb->len < skb->data_len);
1190 return skb->data += len;
1191}
1192
1193static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1194{
1195 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1196}
1197
1198extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1199
1200static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1201{
1202 if (len > skb_headlen(skb) &&
1203 !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1204 return NULL;
1205 skb->len -= len;
1206 return skb->data += len;
1207}
1208
1209static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1210{
1211 return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1212}
1213
1214static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1215{
1216 if (likely(len <= skb_headlen(skb)))
1217 return 1;
1218 if (unlikely(len > skb->len))
1219 return 0;
1220 return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1221}
1222
1223/**
1224 * skb_headroom - bytes at buffer head
1225 * @skb: buffer to check
1226 *
1227 * Return the number of bytes of free space at the head of an &sk_buff.
1228 */
1229static inline unsigned int skb_headroom(const struct sk_buff *skb)
1230{
1231 return skb->data - skb->head;
1232}
1233
1234/**
1235 * skb_tailroom - bytes at buffer end
1236 * @skb: buffer to check
1237 *
1238 * Return the number of bytes of free space at the tail of an sk_buff
1239 */
1240static inline int skb_tailroom(const struct sk_buff *skb)
1241{
1242 return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1243}
1244
1245/**
1246 * skb_reserve - adjust headroom
1247 * @skb: buffer to alter
1248 * @len: bytes to move
1249 *
1250 * Increase the headroom of an empty &sk_buff by reducing the tail
1251 * room. This is only allowed for an empty buffer.
1252 */
1253static inline void skb_reserve(struct sk_buff *skb, int len)
1254{
1255 skb->data += len;
1256 skb->tail += len;
1257}
1258
1259static inline void skb_reset_mac_len(struct sk_buff *skb)
1260{
1261 skb->mac_len = skb->network_header - skb->mac_header;
1262}
1263
1264#ifdef NET_SKBUFF_DATA_USES_OFFSET
1265static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1266{
1267 return skb->head + skb->transport_header;
1268}
1269
1270static inline void skb_reset_transport_header(struct sk_buff *skb)
1271{
1272 skb->transport_header = skb->data - skb->head;
1273}
1274
1275static inline void skb_set_transport_header(struct sk_buff *skb,
1276 const int offset)
1277{
1278 skb_reset_transport_header(skb);
1279 skb->transport_header += offset;
1280}
1281
1282static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1283{
1284 return skb->head + skb->network_header;
1285}
1286
1287static inline void skb_reset_network_header(struct sk_buff *skb)
1288{
1289 skb->network_header = skb->data - skb->head;
1290}
1291
1292static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1293{
1294 skb_reset_network_header(skb);
1295 skb->network_header += offset;
1296}
1297
1298static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1299{
1300 return skb->head + skb->mac_header;
1301}
1302
1303static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1304{
1305 return skb->mac_header != ~0U;
1306}
1307
1308static inline void skb_reset_mac_header(struct sk_buff *skb)
1309{
1310 skb->mac_header = skb->data - skb->head;
1311}
1312
1313static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1314{
1315 skb_reset_mac_header(skb);
1316 skb->mac_header += offset;
1317}
1318
1319#else /* NET_SKBUFF_DATA_USES_OFFSET */
1320
1321static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1322{
1323 return skb->transport_header;
1324}
1325
1326static inline void skb_reset_transport_header(struct sk_buff *skb)
1327{
1328 skb->transport_header = skb->data;
1329}
1330
1331static inline void skb_set_transport_header(struct sk_buff *skb,
1332 const int offset)
1333{
1334 skb->transport_header = skb->data + offset;
1335}
1336
1337static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1338{
1339 return skb->network_header;
1340}
1341
1342static inline void skb_reset_network_header(struct sk_buff *skb)
1343{
1344 skb->network_header = skb->data;
1345}
1346
1347static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1348{
1349 skb->network_header = skb->data + offset;
1350}
1351
1352static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1353{
1354 return skb->mac_header;
1355}
1356
1357static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1358{
1359 return skb->mac_header != NULL;
1360}
1361
1362static inline void skb_reset_mac_header(struct sk_buff *skb)
1363{
1364 skb->mac_header = skb->data;
1365}
1366
1367static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1368{
1369 skb->mac_header = skb->data + offset;
1370}
1371#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1372
1373static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1374{
1375 return skb->csum_start - skb_headroom(skb);
1376}
1377
1378static inline int skb_transport_offset(const struct sk_buff *skb)
1379{
1380 return skb_transport_header(skb) - skb->data;
1381}
1382
1383static inline u32 skb_network_header_len(const struct sk_buff *skb)
1384{
1385 return skb->transport_header - skb->network_header;
1386}
1387
1388static inline int skb_network_offset(const struct sk_buff *skb)
1389{
1390 return skb_network_header(skb) - skb->data;
1391}
1392
1393static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1394{
1395 return pskb_may_pull(skb, skb_network_offset(skb) + len);
1396}
1397
1398/*
1399 * CPUs often take a performance hit when accessing unaligned memory
1400 * locations. The actual performance hit varies, it can be small if the
1401 * hardware handles it or large if we have to take an exception and fix it
1402 * in software.
1403 *
1404 * Since an ethernet header is 14 bytes network drivers often end up with
1405 * the IP header at an unaligned offset. The IP header can be aligned by
1406 * shifting the start of the packet by 2 bytes. Drivers should do this
1407 * with:
1408 *
1409 * skb_reserve(skb, NET_IP_ALIGN);
1410 *
1411 * The downside to this alignment of the IP header is that the DMA is now
1412 * unaligned. On some architectures the cost of an unaligned DMA is high
1413 * and this cost outweighs the gains made by aligning the IP header.
1414 *
1415 * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1416 * to be overridden.
1417 */
1418#ifndef NET_IP_ALIGN
1419#define NET_IP_ALIGN 2
1420#endif
1421
1422/*
1423 * The networking layer reserves some headroom in skb data (via
1424 * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1425 * the header has to grow. In the default case, if the header has to grow
1426 * 32 bytes or less we avoid the reallocation.
1427 *
1428 * Unfortunately this headroom changes the DMA alignment of the resulting
1429 * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1430 * on some architectures. An architecture can override this value,
1431 * perhaps setting it to a cacheline in size (since that will maintain
1432 * cacheline alignment of the DMA). It must be a power of 2.
1433 *
1434 * Various parts of the networking layer expect at least 32 bytes of
1435 * headroom, you should not reduce this.
1436 *
1437 * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1438 * to reduce average number of cache lines per packet.
1439 * get_rps_cpus() for example only access one 64 bytes aligned block :
1440 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1441 */
1442#ifndef NET_SKB_PAD
1443#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
1444#endif
1445
1446extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1447
1448static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1449{
1450 if (unlikely(skb_is_nonlinear(skb))) {
1451 WARN_ON(1);
1452 return;
1453 }
1454 skb->len = len;
1455 skb_set_tail_pointer(skb, len);
1456}
1457
1458extern void skb_trim(struct sk_buff *skb, unsigned int len);
1459
1460static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1461{
1462 if (skb->data_len)
1463 return ___pskb_trim(skb, len);
1464 __skb_trim(skb, len);
1465 return 0;
1466}
1467
1468static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1469{
1470 return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1471}
1472
1473/**
1474 * pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1475 * @skb: buffer to alter
1476 * @len: new length
1477 *
1478 * This is identical to pskb_trim except that the caller knows that
1479 * the skb is not cloned so we should never get an error due to out-
1480 * of-memory.
1481 */
1482static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1483{
1484 int err = pskb_trim(skb, len);
1485 BUG_ON(err);
1486}
1487
1488/**
1489 * skb_orphan - orphan a buffer
1490 * @skb: buffer to orphan
1491 *
1492 * If a buffer currently has an owner then we call the owner's
1493 * destructor function and make the @skb unowned. The buffer continues
1494 * to exist but is no longer charged to its former owner.
1495 */
1496static inline void skb_orphan(struct sk_buff *skb)
1497{
1498 if (skb->destructor)
1499 skb->destructor(skb);
1500 skb->destructor = NULL;
1501 skb->sk = NULL;
1502}
1503
1504/**
1505 * __skb_queue_purge - empty a list
1506 * @list: list to empty
1507 *
1508 * Delete all buffers on an &sk_buff list. Each buffer is removed from
1509 * the list and one reference dropped. This function does not take the
1510 * list lock and the caller must hold the relevant locks to use it.
1511 */
1512extern void skb_queue_purge(struct sk_buff_head *list);
1513static inline void __skb_queue_purge(struct sk_buff_head *list)
1514{
1515 struct sk_buff *skb;
1516 while ((skb = __skb_dequeue(list)) != NULL)
1517 kfree_skb(skb);
1518}
1519
1520/**
1521 * __dev_alloc_skb - allocate an skbuff for receiving
1522 * @length: length to allocate
1523 * @gfp_mask: get_free_pages mask, passed to alloc_skb
1524 *
1525 * Allocate a new &sk_buff and assign it a usage count of one. The
1526 * buffer has unspecified headroom built in. Users should allocate
1527 * the headroom they think they need without accounting for the
1528 * built in space. The built in space is used for optimisations.
1529 *
1530 * %NULL is returned if there is no free memory.
1531 */
1532static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1533 gfp_t gfp_mask)
1534{
1535 struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
1536 if (likely(skb))
1537 skb_reserve(skb, NET_SKB_PAD);
1538 return skb;
1539}
1540
1541extern struct sk_buff *dev_alloc_skb(unsigned int length);
1542
1543extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1544 unsigned int length, gfp_t gfp_mask);
1545
1546/**
1547 * netdev_alloc_skb - allocate an skbuff for rx on a specific device
1548 * @dev: network device to receive on
1549 * @length: length to allocate
1550 *
1551 * Allocate a new &sk_buff and assign it a usage count of one. The
1552 * buffer has unspecified headroom built in. Users should allocate
1553 * the headroom they think they need without accounting for the
1554 * built in space. The built in space is used for optimisations.
1555 *
1556 * %NULL is returned if there is no free memory. Although this function
1557 * allocates memory it can be called from an interrupt.
1558 */
1559static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1560 unsigned int length)
1561{
1562 return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1563}
1564
1565static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1566 unsigned int length)
1567{
1568 struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN);
1569
1570 if (NET_IP_ALIGN && skb)
1571 skb_reserve(skb, NET_IP_ALIGN);
1572 return skb;
1573}
1574
1575/**
1576 * __netdev_alloc_page - allocate a page for ps-rx on a specific device
1577 * @dev: network device to receive on
1578 * @gfp_mask: alloc_pages_node mask
1579 *
1580 * Allocate a new page. dev currently unused.
1581 *
1582 * %NULL is returned if there is no free memory.
1583 */
1584static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
1585{
1586 return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0);
1587}
1588
1589/**
1590 * netdev_alloc_page - allocate a page for ps-rx on a specific device
1591 * @dev: network device to receive on
1592 *
1593 * Allocate a new page. dev currently unused.
1594 *
1595 * %NULL is returned if there is no free memory.
1596 */
1597static inline struct page *netdev_alloc_page(struct net_device *dev)
1598{
1599 return __netdev_alloc_page(dev, GFP_ATOMIC);
1600}
1601
1602static inline void netdev_free_page(struct net_device *dev, struct page *page)
1603{
1604 __free_page(page);
1605}
1606
1607/**
1608 * skb_clone_writable - is the header of a clone writable
1609 * @skb: buffer to check
1610 * @len: length up to which to write
1611 *
1612 * Returns true if modifying the header part of the cloned buffer
1613 * does not requires the data to be copied.
1614 */
1615static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
1616{
1617 return !skb_header_cloned(skb) &&
1618 skb_headroom(skb) + len <= skb->hdr_len;
1619}
1620
1621static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1622 int cloned)
1623{
1624 int delta = 0;
1625
1626 if (headroom < NET_SKB_PAD)
1627 headroom = NET_SKB_PAD;
1628 if (headroom > skb_headroom(skb))
1629 delta = headroom - skb_headroom(skb);
1630
1631 if (delta || cloned)
1632 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
1633 GFP_ATOMIC);
1634 return 0;
1635}
1636
1637/**
1638 * skb_cow - copy header of skb when it is required
1639 * @skb: buffer to cow
1640 * @headroom: needed headroom
1641 *
1642 * If the skb passed lacks sufficient headroom or its data part
1643 * is shared, data is reallocated. If reallocation fails, an error
1644 * is returned and original skb is not changed.
1645 *
1646 * The result is skb with writable area skb->head...skb->tail
1647 * and at least @headroom of space at head.
1648 */
1649static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
1650{
1651 return __skb_cow(skb, headroom, skb_cloned(skb));
1652}
1653
1654/**
1655 * skb_cow_head - skb_cow but only making the head writable
1656 * @skb: buffer to cow
1657 * @headroom: needed headroom
1658 *
1659 * This function is identical to skb_cow except that we replace the
1660 * skb_cloned check by skb_header_cloned. It should be used when
1661 * you only need to push on some header and do not need to modify
1662 * the data.
1663 */
1664static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
1665{
1666 return __skb_cow(skb, headroom, skb_header_cloned(skb));
1667}
1668
1669/**
1670 * skb_padto - pad an skbuff up to a minimal size
1671 * @skb: buffer to pad
1672 * @len: minimal length
1673 *
1674 * Pads up a buffer to ensure the trailing bytes exist and are
1675 * blanked. If the buffer already contains sufficient data it
1676 * is untouched. Otherwise it is extended. Returns zero on
1677 * success. The skb is freed on error.
1678 */
1679
1680static inline int skb_padto(struct sk_buff *skb, unsigned int len)
1681{
1682 unsigned int size = skb->len;
1683 if (likely(size >= len))
1684 return 0;
1685 return skb_pad(skb, len - size);
1686}
1687
1688static inline int skb_add_data(struct sk_buff *skb,
1689 char __user *from, int copy)
1690{
1691 const int off = skb->len;
1692
1693 if (skb->ip_summed == CHECKSUM_NONE) {
1694 int err = 0;
1695 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
1696 copy, 0, &err);
1697 if (!err) {
1698 skb->csum = csum_block_add(skb->csum, csum, off);
1699 return 0;
1700 }
1701 } else if (!copy_from_user(skb_put(skb, copy), from, copy))
1702 return 0;
1703
1704 __skb_trim(skb, off);
1705 return -EFAULT;
1706}
1707
1708static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1709 struct page *page, int off)
1710{
1711 if (i) {
1712 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1713
1714 return page == frag->page &&
1715 off == frag->page_offset + frag->size;
1716 }
1717 return 0;
1718}
1719
1720static inline int __skb_linearize(struct sk_buff *skb)
1721{
1722 return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
1723}
1724
1725/**
1726 * skb_linearize - convert paged skb to linear one
1727 * @skb: buffer to linarize
1728 *
1729 * If there is no free memory -ENOMEM is returned, otherwise zero
1730 * is returned and the old skb data released.
1731 */
1732static inline int skb_linearize(struct sk_buff *skb)
1733{
1734 return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
1735}
1736
1737/**
1738 * skb_linearize_cow - make sure skb is linear and writable
1739 * @skb: buffer to process
1740 *
1741 * If there is no free memory -ENOMEM is returned, otherwise zero
1742 * is returned and the old skb data released.
1743 */
1744static inline int skb_linearize_cow(struct sk_buff *skb)
1745{
1746 return skb_is_nonlinear(skb) || skb_cloned(skb) ?
1747 __skb_linearize(skb) : 0;
1748}
1749
1750/**
1751 * skb_postpull_rcsum - update checksum for received skb after pull
1752 * @skb: buffer to update
1753 * @start: start of data before pull
1754 * @len: length of data pulled
1755 *
1756 * After doing a pull on a received packet, you need to call this to
1757 * update the CHECKSUM_COMPLETE checksum, or set ip_summed to
1758 * CHECKSUM_NONE so that it can be recomputed from scratch.
1759 */
1760
1761static inline void skb_postpull_rcsum(struct sk_buff *skb,
1762 const void *start, unsigned int len)
1763{
1764 if (skb->ip_summed == CHECKSUM_COMPLETE)
1765 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1766}
1767
1768unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1769
1770/**
1771 * pskb_trim_rcsum - trim received skb and update checksum
1772 * @skb: buffer to trim
1773 * @len: new length
1774 *
1775 * This is exactly the same as pskb_trim except that it ensures the
1776 * checksum of received packets are still valid after the operation.
1777 */
1778
1779static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1780{
1781 if (likely(len >= skb->len))
1782 return 0;
1783 if (skb->ip_summed == CHECKSUM_COMPLETE)
1784 skb->ip_summed = CHECKSUM_NONE;
1785 return __pskb_trim(skb, len);
1786}
1787
1788#define skb_queue_walk(queue, skb) \
1789 for (skb = (queue)->next; \
1790 skb != (struct sk_buff *)(queue); \
1791 skb = skb->next)
1792
1793#define skb_queue_walk_safe(queue, skb, tmp) \
1794 for (skb = (queue)->next, tmp = skb->next; \
1795 skb != (struct sk_buff *)(queue); \
1796 skb = tmp, tmp = skb->next)
1797
1798#define skb_queue_walk_from(queue, skb) \
1799 for (; skb != (struct sk_buff *)(queue); \
1800 skb = skb->next)
1801
1802#define skb_queue_walk_from_safe(queue, skb, tmp) \
1803 for (tmp = skb->next; \
1804 skb != (struct sk_buff *)(queue); \
1805 skb = tmp, tmp = skb->next)
1806
1807#define skb_queue_reverse_walk(queue, skb) \
1808 for (skb = (queue)->prev; \
1809 skb != (struct sk_buff *)(queue); \
1810 skb = skb->prev)
1811
1812#define skb_queue_reverse_walk_safe(queue, skb, tmp) \
1813 for (skb = (queue)->prev, tmp = skb->prev; \
1814 skb != (struct sk_buff *)(queue); \
1815 skb = tmp, tmp = skb->prev)
1816
1817#define skb_queue_reverse_walk_from_safe(queue, skb, tmp) \
1818 for (tmp = skb->prev; \
1819 skb != (struct sk_buff *)(queue); \
1820 skb = tmp, tmp = skb->prev)
1821
1822static inline bool skb_has_frag_list(const struct sk_buff *skb)
1823{
1824 return skb_shinfo(skb)->frag_list != NULL;
1825}
1826
1827static inline void skb_frag_list_init(struct sk_buff *skb)
1828{
1829 skb_shinfo(skb)->frag_list = NULL;
1830}
1831
1832static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
1833{
1834 frag->next = skb_shinfo(skb)->frag_list;
1835 skb_shinfo(skb)->frag_list = frag;
1836}
1837
1838#define skb_walk_frags(skb, iter) \
1839 for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
1840
1841extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
1842 int *peeked, int *err);
1843extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
1844 int noblock, int *err);
1845extern unsigned int datagram_poll(struct file *file, struct socket *sock,
1846 struct poll_table_struct *wait);
1847extern int skb_copy_datagram_iovec(const struct sk_buff *from,
1848 int offset, struct iovec *to,
1849 int size);
1850extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
1851 int hlen,
1852 struct iovec *iov);
1853extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
1854 int offset,
1855 const struct iovec *from,
1856 int from_offset,
1857 int len);
1858extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
1859 int offset,
1860 const struct iovec *to,
1861 int to_offset,
1862 int size);
1863extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
1864extern void skb_free_datagram_locked(struct sock *sk,
1865 struct sk_buff *skb);
1866extern int skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
1867 unsigned int flags);
1868extern __wsum skb_checksum(const struct sk_buff *skb, int offset,
1869 int len, __wsum csum);
1870extern int skb_copy_bits(const struct sk_buff *skb, int offset,
1871 void *to, int len);
1872extern int skb_store_bits(struct sk_buff *skb, int offset,
1873 const void *from, int len);
1874extern __wsum skb_copy_and_csum_bits(const struct sk_buff *skb,
1875 int offset, u8 *to, int len,
1876 __wsum csum);
1877extern int skb_splice_bits(struct sk_buff *skb,
1878 unsigned int offset,
1879 struct pipe_inode_info *pipe,
1880 unsigned int len,
1881 unsigned int flags);
1882extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1883extern void skb_split(struct sk_buff *skb,
1884 struct sk_buff *skb1, const u32 len);
1885extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
1886 int shiftlen);
1887
1888extern struct sk_buff *skb_segment(struct sk_buff *skb, u32 features);
1889
1890static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
1891 int len, void *buffer)
1892{
1893 int hlen = skb_headlen(skb);
1894
1895 if (hlen - offset >= len)
1896 return skb->data + offset;
1897
1898 if (skb_copy_bits(skb, offset, buffer, len) < 0)
1899 return NULL;
1900
1901 return buffer;
1902}
1903
1904static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
1905 void *to,
1906 const unsigned int len)
1907{
1908 memcpy(to, skb->data, len);
1909}
1910
1911static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
1912 const int offset, void *to,
1913 const unsigned int len)
1914{
1915 memcpy(to, skb->data + offset, len);
1916}
1917
1918static inline void skb_copy_to_linear_data(struct sk_buff *skb,
1919 const void *from,
1920 const unsigned int len)
1921{
1922 memcpy(skb->data, from, len);
1923}
1924
1925static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
1926 const int offset,
1927 const void *from,
1928 const unsigned int len)
1929{
1930 memcpy(skb->data + offset, from, len);
1931}
1932
1933extern void skb_init(void);
1934
1935static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
1936{
1937 return skb->tstamp;
1938}
1939
1940/**
1941 * skb_get_timestamp - get timestamp from a skb
1942 * @skb: skb to get stamp from
1943 * @stamp: pointer to struct timeval to store stamp in
1944 *
1945 * Timestamps are stored in the skb as offsets to a base timestamp.
1946 * This function converts the offset back to a struct timeval and stores
1947 * it in stamp.
1948 */
1949static inline void skb_get_timestamp(const struct sk_buff *skb,
1950 struct timeval *stamp)
1951{
1952 *stamp = ktime_to_timeval(skb->tstamp);
1953}
1954
1955static inline void skb_get_timestampns(const struct sk_buff *skb,
1956 struct timespec *stamp)
1957{
1958 *stamp = ktime_to_timespec(skb->tstamp);
1959}
1960
1961static inline void __net_timestamp(struct sk_buff *skb)
1962{
1963 skb->tstamp = ktime_get_real();
1964}
1965
1966static inline ktime_t net_timedelta(ktime_t t)
1967{
1968 return ktime_sub(ktime_get_real(), t);
1969}
1970
1971static inline ktime_t net_invalid_timestamp(void)
1972{
1973 return ktime_set(0, 0);
1974}
1975
1976extern void skb_timestamping_init(void);
1977
1978#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
1979
1980extern void skb_clone_tx_timestamp(struct sk_buff *skb);
1981extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
1982
1983#else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
1984
1985static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
1986{
1987}
1988
1989static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
1990{
1991 return false;
1992}
1993
1994#endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
1995
1996/**
1997 * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
1998 *
1999 * @skb: clone of the the original outgoing packet
2000 * @hwtstamps: hardware time stamps
2001 *
2002 */
2003void skb_complete_tx_timestamp(struct sk_buff *skb,
2004 struct skb_shared_hwtstamps *hwtstamps);
2005
2006/**
2007 * skb_tstamp_tx - queue clone of skb with send time stamps
2008 * @orig_skb: the original outgoing packet
2009 * @hwtstamps: hardware time stamps, may be NULL if not available
2010 *
2011 * If the skb has a socket associated, then this function clones the
2012 * skb (thus sharing the actual data and optional structures), stores
2013 * the optional hardware time stamping information (if non NULL) or
2014 * generates a software time stamp (otherwise), then queues the clone
2015 * to the error queue of the socket. Errors are silently ignored.
2016 */
2017extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2018 struct skb_shared_hwtstamps *hwtstamps);
2019
2020static inline void sw_tx_timestamp(struct sk_buff *skb)
2021{
2022 if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2023 !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2024 skb_tstamp_tx(skb, NULL);
2025}
2026
2027/**
2028 * skb_tx_timestamp() - Driver hook for transmit timestamping
2029 *
2030 * Ethernet MAC Drivers should call this function in their hard_xmit()
2031 * function as soon as possible after giving the sk_buff to the MAC
2032 * hardware, but before freeing the sk_buff.
2033 *
2034 * @skb: A socket buffer.
2035 */
2036static inline void skb_tx_timestamp(struct sk_buff *skb)
2037{
2038 skb_clone_tx_timestamp(skb);
2039 sw_tx_timestamp(skb);
2040}
2041
2042extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2043extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2044
2045static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2046{
2047 return skb->ip_summed & CHECKSUM_UNNECESSARY;
2048}
2049
2050/**
2051 * skb_checksum_complete - Calculate checksum of an entire packet
2052 * @skb: packet to process
2053 *
2054 * This function calculates the checksum over the entire packet plus
2055 * the value of skb->csum. The latter can be used to supply the
2056 * checksum of a pseudo header as used by TCP/UDP. It returns the
2057 * checksum.
2058 *
2059 * For protocols that contain complete checksums such as ICMP/TCP/UDP,
2060 * this function can be used to verify that checksum on received
2061 * packets. In that case the function should return zero if the
2062 * checksum is correct. In particular, this function will return zero
2063 * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
2064 * hardware has already verified the correctness of the checksum.
2065 */
2066static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2067{
2068 return skb_csum_unnecessary(skb) ?
2069 0 : __skb_checksum_complete(skb);
2070}
2071
2072#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2073extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
2074static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2075{
2076 if (nfct && atomic_dec_and_test(&nfct->use))
2077 nf_conntrack_destroy(nfct);
2078}
2079static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2080{
2081 if (nfct)
2082 atomic_inc(&nfct->use);
2083}
2084#endif
2085#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2086static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2087{
2088 if (skb)
2089 atomic_inc(&skb->users);
2090}
2091static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2092{
2093 if (skb)
2094 kfree_skb(skb);
2095}
2096#endif
2097#ifdef CONFIG_BRIDGE_NETFILTER
2098static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2099{
2100 if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2101 kfree(nf_bridge);
2102}
2103static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2104{
2105 if (nf_bridge)
2106 atomic_inc(&nf_bridge->use);
2107}
2108#endif /* CONFIG_BRIDGE_NETFILTER */
2109static inline void nf_reset(struct sk_buff *skb)
2110{
2111#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2112 nf_conntrack_put(skb->nfct);
2113 skb->nfct = NULL;
2114#endif
2115#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2116 nf_conntrack_put_reasm(skb->nfct_reasm);
2117 skb->nfct_reasm = NULL;
2118#endif
2119#ifdef CONFIG_BRIDGE_NETFILTER
2120 nf_bridge_put(skb->nf_bridge);
2121 skb->nf_bridge = NULL;
2122#endif
2123}
2124
2125/* Note: This doesn't put any conntrack and bridge info in dst. */
2126static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2127{
2128#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2129 dst->nfct = src->nfct;
2130 nf_conntrack_get(src->nfct);
2131 dst->nfctinfo = src->nfctinfo;
2132#endif
2133#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2134 dst->nfct_reasm = src->nfct_reasm;
2135 nf_conntrack_get_reasm(src->nfct_reasm);
2136#endif
2137#ifdef CONFIG_BRIDGE_NETFILTER
2138 dst->nf_bridge = src->nf_bridge;
2139 nf_bridge_get(src->nf_bridge);
2140#endif
2141}
2142
2143static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2144{
2145#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2146 nf_conntrack_put(dst->nfct);
2147#endif
2148#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2149 nf_conntrack_put_reasm(dst->nfct_reasm);
2150#endif
2151#ifdef CONFIG_BRIDGE_NETFILTER
2152 nf_bridge_put(dst->nf_bridge);
2153#endif
2154 __nf_copy(dst, src);
2155}
2156
2157#ifdef CONFIG_NETWORK_SECMARK
2158static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2159{
2160 to->secmark = from->secmark;
2161}
2162
2163static inline void skb_init_secmark(struct sk_buff *skb)
2164{
2165 skb->secmark = 0;
2166}
2167#else
2168static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2169{ }
2170
2171static inline void skb_init_secmark(struct sk_buff *skb)
2172{ }
2173#endif
2174
2175static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2176{
2177 skb->queue_mapping = queue_mapping;
2178}
2179
2180static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2181{
2182 return skb->queue_mapping;
2183}
2184
2185static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2186{
2187 to->queue_mapping = from->queue_mapping;
2188}
2189
2190static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2191{
2192 skb->queue_mapping = rx_queue + 1;
2193}
2194
2195static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2196{
2197 return skb->queue_mapping - 1;
2198}
2199
2200static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2201{
2202 return skb->queue_mapping != 0;
2203}
2204
2205extern u16 __skb_tx_hash(const struct net_device *dev,
2206 const struct sk_buff *skb,
2207 unsigned int num_tx_queues);
2208
2209#ifdef CONFIG_XFRM
2210static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2211{
2212 return skb->sp;
2213}
2214#else
2215static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2216{
2217 return NULL;
2218}
2219#endif
2220
2221static inline int skb_is_gso(const struct sk_buff *skb)
2222{
2223 return skb_shinfo(skb)->gso_size;
2224}
2225
2226static inline int skb_is_gso_v6(const struct sk_buff *skb)
2227{
2228 return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2229}
2230
2231extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2232
2233static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2234{
2235 /* LRO sets gso_size but not gso_type, whereas if GSO is really
2236 * wanted then gso_type will be set. */
2237 struct skb_shared_info *shinfo = skb_shinfo(skb);
2238 if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2239 unlikely(shinfo->gso_type == 0)) {
2240 __skb_warn_lro_forwarding(skb);
2241 return true;
2242 }
2243 return false;
2244}
2245
2246static inline void skb_forward_csum(struct sk_buff *skb)
2247{
2248 /* Unfortunately we don't support this one. Any brave souls? */
2249 if (skb->ip_summed == CHECKSUM_COMPLETE)
2250 skb->ip_summed = CHECKSUM_NONE;
2251}
2252
2253/**
2254 * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
2255 * @skb: skb to check
2256 *
2257 * fresh skbs have their ip_summed set to CHECKSUM_NONE.
2258 * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2259 * use this helper, to document places where we make this assertion.
2260 */
2261static inline void skb_checksum_none_assert(struct sk_buff *skb)
2262{
2263#ifdef DEBUG
2264 BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2265#endif
2266}
2267
2268bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2269#endif /* __KERNEL__ */
2270#endif /* _LINUX_SKBUFF_H */