Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Routines having to do with the 'struct sk_buff' memory handlers.
4 *
5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
6 * Florian La Roche <rzsfl@rz.uni-sb.de>
7 *
8 * Fixes:
9 * Alan Cox : Fixed the worst of the load
10 * balancer bugs.
11 * Dave Platt : Interrupt stacking fix.
12 * Richard Kooijman : Timestamp fixes.
13 * Alan Cox : Changed buffer format.
14 * Alan Cox : destructor hook for AF_UNIX etc.
15 * Linus Torvalds : Better skb_clone.
16 * Alan Cox : Added skb_copy.
17 * Alan Cox : Added all the changed routines Linus
18 * only put in the headers
19 * Ray VanTassle : Fixed --skb->lock in free
20 * Alan Cox : skb_copy copy arp field
21 * Andi Kleen : slabified it.
22 * Robert Olsson : Removed skb_head_pool
23 *
24 * NOTE:
25 * The __skb_ routines should be called with interrupts
26 * disabled, or you better be *real* sure that the operation is atomic
27 * with respect to whatever list is being frobbed (e.g. via lock_sock()
28 * or via disabling bottom half handlers, etc).
29 */
30
31/*
32 * The functions in this file will not compile correctly with gcc 2.4.x
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/module.h>
38#include <linux/types.h>
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/interrupt.h>
42#include <linux/in.h>
43#include <linux/inet.h>
44#include <linux/slab.h>
45#include <linux/tcp.h>
46#include <linux/udp.h>
47#include <linux/sctp.h>
48#include <linux/netdevice.h>
49#ifdef CONFIG_NET_CLS_ACT
50#include <net/pkt_sched.h>
51#endif
52#include <linux/string.h>
53#include <linux/skbuff.h>
54#include <linux/splice.h>
55#include <linux/cache.h>
56#include <linux/rtnetlink.h>
57#include <linux/init.h>
58#include <linux/scatterlist.h>
59#include <linux/errqueue.h>
60#include <linux/prefetch.h>
61#include <linux/if_vlan.h>
62#include <linux/mpls.h>
63#include <linux/kcov.h>
64
65#include <net/protocol.h>
66#include <net/dst.h>
67#include <net/sock.h>
68#include <net/checksum.h>
69#include <net/ip6_checksum.h>
70#include <net/xfrm.h>
71#include <net/mpls.h>
72#include <net/mptcp.h>
73#include <net/mctp.h>
74#include <net/page_pool.h>
75
76#include <linux/uaccess.h>
77#include <trace/events/skb.h>
78#include <linux/highmem.h>
79#include <linux/capability.h>
80#include <linux/user_namespace.h>
81#include <linux/indirect_call_wrapper.h>
82
83#include "dev.h"
84#include "sock_destructor.h"
85
86struct kmem_cache *skbuff_head_cache __ro_after_init;
87static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
88#ifdef CONFIG_SKB_EXTENSIONS
89static struct kmem_cache *skbuff_ext_cache __ro_after_init;
90#endif
91int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
92EXPORT_SYMBOL(sysctl_max_skb_frags);
93
94#undef FN
95#define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
96const char * const drop_reasons[] = {
97 DEFINE_DROP_REASON(FN, FN)
98};
99EXPORT_SYMBOL(drop_reasons);
100
101/**
102 * skb_panic - private function for out-of-line support
103 * @skb: buffer
104 * @sz: size
105 * @addr: address
106 * @msg: skb_over_panic or skb_under_panic
107 *
108 * Out-of-line support for skb_put() and skb_push().
109 * Called via the wrapper skb_over_panic() or skb_under_panic().
110 * Keep out of line to prevent kernel bloat.
111 * __builtin_return_address is not used because it is not always reliable.
112 */
113static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
114 const char msg[])
115{
116 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
117 msg, addr, skb->len, sz, skb->head, skb->data,
118 (unsigned long)skb->tail, (unsigned long)skb->end,
119 skb->dev ? skb->dev->name : "<NULL>");
120 BUG();
121}
122
123static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
124{
125 skb_panic(skb, sz, addr, __func__);
126}
127
128static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
129{
130 skb_panic(skb, sz, addr, __func__);
131}
132
133#define NAPI_SKB_CACHE_SIZE 64
134#define NAPI_SKB_CACHE_BULK 16
135#define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
136
137#if PAGE_SIZE == SZ_4K
138
139#define NAPI_HAS_SMALL_PAGE_FRAG 1
140#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc)
141
142/* specialized page frag allocator using a single order 0 page
143 * and slicing it into 1K sized fragment. Constrained to systems
144 * with a very limited amount of 1K fragments fitting a single
145 * page - to avoid excessive truesize underestimation
146 */
147
148struct page_frag_1k {
149 void *va;
150 u16 offset;
151 bool pfmemalloc;
152};
153
154static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp)
155{
156 struct page *page;
157 int offset;
158
159 offset = nc->offset - SZ_1K;
160 if (likely(offset >= 0))
161 goto use_frag;
162
163 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
164 if (!page)
165 return NULL;
166
167 nc->va = page_address(page);
168 nc->pfmemalloc = page_is_pfmemalloc(page);
169 offset = PAGE_SIZE - SZ_1K;
170 page_ref_add(page, offset / SZ_1K);
171
172use_frag:
173 nc->offset = offset;
174 return nc->va + offset;
175}
176#else
177
178/* the small page is actually unused in this build; add dummy helpers
179 * to please the compiler and avoid later preprocessor's conditionals
180 */
181#define NAPI_HAS_SMALL_PAGE_FRAG 0
182#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false
183
184struct page_frag_1k {
185};
186
187static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask)
188{
189 return NULL;
190}
191
192#endif
193
194struct napi_alloc_cache {
195 struct page_frag_cache page;
196 struct page_frag_1k page_small;
197 unsigned int skb_count;
198 void *skb_cache[NAPI_SKB_CACHE_SIZE];
199};
200
201static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
202static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
203
204/* Double check that napi_get_frags() allocates skbs with
205 * skb->head being backed by slab, not a page fragment.
206 * This is to make sure bug fixed in 3226b158e67c
207 * ("net: avoid 32 x truesize under-estimation for tiny skbs")
208 * does not accidentally come back.
209 */
210void napi_get_frags_check(struct napi_struct *napi)
211{
212 struct sk_buff *skb;
213
214 local_bh_disable();
215 skb = napi_get_frags(napi);
216 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag);
217 napi_free_frags(napi);
218 local_bh_enable();
219}
220
221void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
222{
223 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
224
225 fragsz = SKB_DATA_ALIGN(fragsz);
226
227 return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
228}
229EXPORT_SYMBOL(__napi_alloc_frag_align);
230
231void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
232{
233 void *data;
234
235 fragsz = SKB_DATA_ALIGN(fragsz);
236 if (in_hardirq() || irqs_disabled()) {
237 struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
238
239 data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
240 } else {
241 struct napi_alloc_cache *nc;
242
243 local_bh_disable();
244 nc = this_cpu_ptr(&napi_alloc_cache);
245 data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
246 local_bh_enable();
247 }
248 return data;
249}
250EXPORT_SYMBOL(__netdev_alloc_frag_align);
251
252static struct sk_buff *napi_skb_cache_get(void)
253{
254 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
255 struct sk_buff *skb;
256
257 if (unlikely(!nc->skb_count)) {
258 nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache,
259 GFP_ATOMIC,
260 NAPI_SKB_CACHE_BULK,
261 nc->skb_cache);
262 if (unlikely(!nc->skb_count))
263 return NULL;
264 }
265
266 skb = nc->skb_cache[--nc->skb_count];
267 kasan_unpoison_object_data(skbuff_head_cache, skb);
268
269 return skb;
270}
271
272/* Caller must provide SKB that is memset cleared */
273static void __build_skb_around(struct sk_buff *skb, void *data,
274 unsigned int frag_size)
275{
276 struct skb_shared_info *shinfo;
277 unsigned int size = frag_size ? : ksize(data);
278
279 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
280
281 /* Assumes caller memset cleared SKB */
282 skb->truesize = SKB_TRUESIZE(size);
283 refcount_set(&skb->users, 1);
284 skb->head = data;
285 skb->data = data;
286 skb_reset_tail_pointer(skb);
287 skb_set_end_offset(skb, size);
288 skb->mac_header = (typeof(skb->mac_header))~0U;
289 skb->transport_header = (typeof(skb->transport_header))~0U;
290 skb->alloc_cpu = raw_smp_processor_id();
291 /* make sure we initialize shinfo sequentially */
292 shinfo = skb_shinfo(skb);
293 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
294 atomic_set(&shinfo->dataref, 1);
295
296 skb_set_kcov_handle(skb, kcov_common_handle());
297}
298
299/**
300 * __build_skb - build a network buffer
301 * @data: data buffer provided by caller
302 * @frag_size: size of data, or 0 if head was kmalloced
303 *
304 * Allocate a new &sk_buff. Caller provides space holding head and
305 * skb_shared_info. @data must have been allocated by kmalloc() only if
306 * @frag_size is 0, otherwise data should come from the page allocator
307 * or vmalloc()
308 * The return is the new skb buffer.
309 * On a failure the return is %NULL, and @data is not freed.
310 * Notes :
311 * Before IO, driver allocates only data buffer where NIC put incoming frame
312 * Driver should add room at head (NET_SKB_PAD) and
313 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
314 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
315 * before giving packet to stack.
316 * RX rings only contains data buffers, not full skbs.
317 */
318struct sk_buff *__build_skb(void *data, unsigned int frag_size)
319{
320 struct sk_buff *skb;
321
322 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
323 if (unlikely(!skb))
324 return NULL;
325
326 memset(skb, 0, offsetof(struct sk_buff, tail));
327 __build_skb_around(skb, data, frag_size);
328
329 return skb;
330}
331
332/* build_skb() is wrapper over __build_skb(), that specifically
333 * takes care of skb->head and skb->pfmemalloc
334 * This means that if @frag_size is not zero, then @data must be backed
335 * by a page fragment, not kmalloc() or vmalloc()
336 */
337struct sk_buff *build_skb(void *data, unsigned int frag_size)
338{
339 struct sk_buff *skb = __build_skb(data, frag_size);
340
341 if (skb && frag_size) {
342 skb->head_frag = 1;
343 if (page_is_pfmemalloc(virt_to_head_page(data)))
344 skb->pfmemalloc = 1;
345 }
346 return skb;
347}
348EXPORT_SYMBOL(build_skb);
349
350/**
351 * build_skb_around - build a network buffer around provided skb
352 * @skb: sk_buff provide by caller, must be memset cleared
353 * @data: data buffer provided by caller
354 * @frag_size: size of data, or 0 if head was kmalloced
355 */
356struct sk_buff *build_skb_around(struct sk_buff *skb,
357 void *data, unsigned int frag_size)
358{
359 if (unlikely(!skb))
360 return NULL;
361
362 __build_skb_around(skb, data, frag_size);
363
364 if (frag_size) {
365 skb->head_frag = 1;
366 if (page_is_pfmemalloc(virt_to_head_page(data)))
367 skb->pfmemalloc = 1;
368 }
369 return skb;
370}
371EXPORT_SYMBOL(build_skb_around);
372
373/**
374 * __napi_build_skb - build a network buffer
375 * @data: data buffer provided by caller
376 * @frag_size: size of data, or 0 if head was kmalloced
377 *
378 * Version of __build_skb() that uses NAPI percpu caches to obtain
379 * skbuff_head instead of inplace allocation.
380 *
381 * Returns a new &sk_buff on success, %NULL on allocation failure.
382 */
383static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size)
384{
385 struct sk_buff *skb;
386
387 skb = napi_skb_cache_get();
388 if (unlikely(!skb))
389 return NULL;
390
391 memset(skb, 0, offsetof(struct sk_buff, tail));
392 __build_skb_around(skb, data, frag_size);
393
394 return skb;
395}
396
397/**
398 * napi_build_skb - build a network buffer
399 * @data: data buffer provided by caller
400 * @frag_size: size of data, or 0 if head was kmalloced
401 *
402 * Version of __napi_build_skb() that takes care of skb->head_frag
403 * and skb->pfmemalloc when the data is a page or page fragment.
404 *
405 * Returns a new &sk_buff on success, %NULL on allocation failure.
406 */
407struct sk_buff *napi_build_skb(void *data, unsigned int frag_size)
408{
409 struct sk_buff *skb = __napi_build_skb(data, frag_size);
410
411 if (likely(skb) && frag_size) {
412 skb->head_frag = 1;
413 skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
414 }
415
416 return skb;
417}
418EXPORT_SYMBOL(napi_build_skb);
419
420/*
421 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
422 * the caller if emergency pfmemalloc reserves are being used. If it is and
423 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
424 * may be used. Otherwise, the packet data may be discarded until enough
425 * memory is free
426 */
427static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
428 bool *pfmemalloc)
429{
430 void *obj;
431 bool ret_pfmemalloc = false;
432
433 /*
434 * Try a regular allocation, when that fails and we're not entitled
435 * to the reserves, fail.
436 */
437 obj = kmalloc_node_track_caller(size,
438 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
439 node);
440 if (obj || !(gfp_pfmemalloc_allowed(flags)))
441 goto out;
442
443 /* Try again but now we are using pfmemalloc reserves */
444 ret_pfmemalloc = true;
445 obj = kmalloc_node_track_caller(size, flags, node);
446
447out:
448 if (pfmemalloc)
449 *pfmemalloc = ret_pfmemalloc;
450
451 return obj;
452}
453
454/* Allocate a new skbuff. We do this ourselves so we can fill in a few
455 * 'private' fields and also do memory statistics to find all the
456 * [BEEP] leaks.
457 *
458 */
459
460/**
461 * __alloc_skb - allocate a network buffer
462 * @size: size to allocate
463 * @gfp_mask: allocation mask
464 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
465 * instead of head cache and allocate a cloned (child) skb.
466 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
467 * allocations in case the data is required for writeback
468 * @node: numa node to allocate memory on
469 *
470 * Allocate a new &sk_buff. The returned buffer has no headroom and a
471 * tail room of at least size bytes. The object has a reference count
472 * of one. The return is the buffer. On a failure the return is %NULL.
473 *
474 * Buffers may only be allocated from interrupts using a @gfp_mask of
475 * %GFP_ATOMIC.
476 */
477struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
478 int flags, int node)
479{
480 struct kmem_cache *cache;
481 struct sk_buff *skb;
482 unsigned int osize;
483 bool pfmemalloc;
484 u8 *data;
485
486 cache = (flags & SKB_ALLOC_FCLONE)
487 ? skbuff_fclone_cache : skbuff_head_cache;
488
489 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
490 gfp_mask |= __GFP_MEMALLOC;
491
492 /* Get the HEAD */
493 if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI &&
494 likely(node == NUMA_NO_NODE || node == numa_mem_id()))
495 skb = napi_skb_cache_get();
496 else
497 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
498 if (unlikely(!skb))
499 return NULL;
500 prefetchw(skb);
501
502 /* We do our best to align skb_shared_info on a separate cache
503 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
504 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
505 * Both skb->head and skb_shared_info are cache line aligned.
506 */
507 size = SKB_DATA_ALIGN(size);
508 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
509 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
510 if (unlikely(!data))
511 goto nodata;
512 /* kmalloc(size) might give us more room than requested.
513 * Put skb_shared_info exactly at the end of allocated zone,
514 * to allow max possible filling before reallocation.
515 */
516 osize = ksize(data);
517 size = SKB_WITH_OVERHEAD(osize);
518 prefetchw(data + size);
519
520 /*
521 * Only clear those fields we need to clear, not those that we will
522 * actually initialise below. Hence, don't put any more fields after
523 * the tail pointer in struct sk_buff!
524 */
525 memset(skb, 0, offsetof(struct sk_buff, tail));
526 __build_skb_around(skb, data, osize);
527 skb->pfmemalloc = pfmemalloc;
528
529 if (flags & SKB_ALLOC_FCLONE) {
530 struct sk_buff_fclones *fclones;
531
532 fclones = container_of(skb, struct sk_buff_fclones, skb1);
533
534 skb->fclone = SKB_FCLONE_ORIG;
535 refcount_set(&fclones->fclone_ref, 1);
536 }
537
538 return skb;
539
540nodata:
541 kmem_cache_free(cache, skb);
542 return NULL;
543}
544EXPORT_SYMBOL(__alloc_skb);
545
546/**
547 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
548 * @dev: network device to receive on
549 * @len: length to allocate
550 * @gfp_mask: get_free_pages mask, passed to alloc_skb
551 *
552 * Allocate a new &sk_buff and assign it a usage count of one. The
553 * buffer has NET_SKB_PAD headroom built in. Users should allocate
554 * the headroom they think they need without accounting for the
555 * built in space. The built in space is used for optimisations.
556 *
557 * %NULL is returned if there is no free memory.
558 */
559struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
560 gfp_t gfp_mask)
561{
562 struct page_frag_cache *nc;
563 struct sk_buff *skb;
564 bool pfmemalloc;
565 void *data;
566
567 len += NET_SKB_PAD;
568
569 /* If requested length is either too small or too big,
570 * we use kmalloc() for skb->head allocation.
571 */
572 if (len <= SKB_WITH_OVERHEAD(1024) ||
573 len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
574 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
575 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
576 if (!skb)
577 goto skb_fail;
578 goto skb_success;
579 }
580
581 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
582 len = SKB_DATA_ALIGN(len);
583
584 if (sk_memalloc_socks())
585 gfp_mask |= __GFP_MEMALLOC;
586
587 if (in_hardirq() || irqs_disabled()) {
588 nc = this_cpu_ptr(&netdev_alloc_cache);
589 data = page_frag_alloc(nc, len, gfp_mask);
590 pfmemalloc = nc->pfmemalloc;
591 } else {
592 local_bh_disable();
593 nc = this_cpu_ptr(&napi_alloc_cache.page);
594 data = page_frag_alloc(nc, len, gfp_mask);
595 pfmemalloc = nc->pfmemalloc;
596 local_bh_enable();
597 }
598
599 if (unlikely(!data))
600 return NULL;
601
602 skb = __build_skb(data, len);
603 if (unlikely(!skb)) {
604 skb_free_frag(data);
605 return NULL;
606 }
607
608 if (pfmemalloc)
609 skb->pfmemalloc = 1;
610 skb->head_frag = 1;
611
612skb_success:
613 skb_reserve(skb, NET_SKB_PAD);
614 skb->dev = dev;
615
616skb_fail:
617 return skb;
618}
619EXPORT_SYMBOL(__netdev_alloc_skb);
620
621/**
622 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
623 * @napi: napi instance this buffer was allocated for
624 * @len: length to allocate
625 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
626 *
627 * Allocate a new sk_buff for use in NAPI receive. This buffer will
628 * attempt to allocate the head from a special reserved region used
629 * only for NAPI Rx allocation. By doing this we can save several
630 * CPU cycles by avoiding having to disable and re-enable IRQs.
631 *
632 * %NULL is returned if there is no free memory.
633 */
634struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
635 gfp_t gfp_mask)
636{
637 struct napi_alloc_cache *nc;
638 struct sk_buff *skb;
639 bool pfmemalloc;
640 void *data;
641
642 DEBUG_NET_WARN_ON_ONCE(!in_softirq());
643 len += NET_SKB_PAD + NET_IP_ALIGN;
644
645 /* If requested length is either too small or too big,
646 * we use kmalloc() for skb->head allocation.
647 * When the small frag allocator is available, prefer it over kmalloc
648 * for small fragments
649 */
650 if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) ||
651 len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
652 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
653 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
654 NUMA_NO_NODE);
655 if (!skb)
656 goto skb_fail;
657 goto skb_success;
658 }
659
660 nc = this_cpu_ptr(&napi_alloc_cache);
661
662 if (sk_memalloc_socks())
663 gfp_mask |= __GFP_MEMALLOC;
664
665 if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) {
666 /* we are artificially inflating the allocation size, but
667 * that is not as bad as it may look like, as:
668 * - 'len' less than GRO_MAX_HEAD makes little sense
669 * - On most systems, larger 'len' values lead to fragment
670 * size above 512 bytes
671 * - kmalloc would use the kmalloc-1k slab for such values
672 * - Builds with smaller GRO_MAX_HEAD will very likely do
673 * little networking, as that implies no WiFi and no
674 * tunnels support, and 32 bits arches.
675 */
676 len = SZ_1K;
677
678 data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
679 pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
680 } else {
681 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
682 len = SKB_DATA_ALIGN(len);
683
684 data = page_frag_alloc(&nc->page, len, gfp_mask);
685 pfmemalloc = nc->page.pfmemalloc;
686 }
687
688 if (unlikely(!data))
689 return NULL;
690
691 skb = __napi_build_skb(data, len);
692 if (unlikely(!skb)) {
693 skb_free_frag(data);
694 return NULL;
695 }
696
697 if (pfmemalloc)
698 skb->pfmemalloc = 1;
699 skb->head_frag = 1;
700
701skb_success:
702 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
703 skb->dev = napi->dev;
704
705skb_fail:
706 return skb;
707}
708EXPORT_SYMBOL(__napi_alloc_skb);
709
710void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
711 int size, unsigned int truesize)
712{
713 skb_fill_page_desc(skb, i, page, off, size);
714 skb->len += size;
715 skb->data_len += size;
716 skb->truesize += truesize;
717}
718EXPORT_SYMBOL(skb_add_rx_frag);
719
720void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
721 unsigned int truesize)
722{
723 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
724
725 skb_frag_size_add(frag, size);
726 skb->len += size;
727 skb->data_len += size;
728 skb->truesize += truesize;
729}
730EXPORT_SYMBOL(skb_coalesce_rx_frag);
731
732static void skb_drop_list(struct sk_buff **listp)
733{
734 kfree_skb_list(*listp);
735 *listp = NULL;
736}
737
738static inline void skb_drop_fraglist(struct sk_buff *skb)
739{
740 skb_drop_list(&skb_shinfo(skb)->frag_list);
741}
742
743static void skb_clone_fraglist(struct sk_buff *skb)
744{
745 struct sk_buff *list;
746
747 skb_walk_frags(skb, list)
748 skb_get(list);
749}
750
751static void skb_free_head(struct sk_buff *skb)
752{
753 unsigned char *head = skb->head;
754
755 if (skb->head_frag) {
756 if (skb_pp_recycle(skb, head))
757 return;
758 skb_free_frag(head);
759 } else {
760 kfree(head);
761 }
762}
763
764static void skb_release_data(struct sk_buff *skb)
765{
766 struct skb_shared_info *shinfo = skb_shinfo(skb);
767 int i;
768
769 if (skb->cloned &&
770 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
771 &shinfo->dataref))
772 goto exit;
773
774 if (skb_zcopy(skb)) {
775 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS;
776
777 skb_zcopy_clear(skb, true);
778 if (skip_unref)
779 goto free_head;
780 }
781
782 for (i = 0; i < shinfo->nr_frags; i++)
783 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle);
784
785free_head:
786 if (shinfo->frag_list)
787 kfree_skb_list(shinfo->frag_list);
788
789 skb_free_head(skb);
790exit:
791 /* When we clone an SKB we copy the reycling bit. The pp_recycle
792 * bit is only set on the head though, so in order to avoid races
793 * while trying to recycle fragments on __skb_frag_unref() we need
794 * to make one SKB responsible for triggering the recycle path.
795 * So disable the recycling bit if an SKB is cloned and we have
796 * additional references to the fragmented part of the SKB.
797 * Eventually the last SKB will have the recycling bit set and it's
798 * dataref set to 0, which will trigger the recycling
799 */
800 skb->pp_recycle = 0;
801}
802
803/*
804 * Free an skbuff by memory without cleaning the state.
805 */
806static void kfree_skbmem(struct sk_buff *skb)
807{
808 struct sk_buff_fclones *fclones;
809
810 switch (skb->fclone) {
811 case SKB_FCLONE_UNAVAILABLE:
812 kmem_cache_free(skbuff_head_cache, skb);
813 return;
814
815 case SKB_FCLONE_ORIG:
816 fclones = container_of(skb, struct sk_buff_fclones, skb1);
817
818 /* We usually free the clone (TX completion) before original skb
819 * This test would have no chance to be true for the clone,
820 * while here, branch prediction will be good.
821 */
822 if (refcount_read(&fclones->fclone_ref) == 1)
823 goto fastpath;
824 break;
825
826 default: /* SKB_FCLONE_CLONE */
827 fclones = container_of(skb, struct sk_buff_fclones, skb2);
828 break;
829 }
830 if (!refcount_dec_and_test(&fclones->fclone_ref))
831 return;
832fastpath:
833 kmem_cache_free(skbuff_fclone_cache, fclones);
834}
835
836void skb_release_head_state(struct sk_buff *skb)
837{
838 skb_dst_drop(skb);
839 if (skb->destructor) {
840 DEBUG_NET_WARN_ON_ONCE(in_hardirq());
841 skb->destructor(skb);
842 }
843#if IS_ENABLED(CONFIG_NF_CONNTRACK)
844 nf_conntrack_put(skb_nfct(skb));
845#endif
846 skb_ext_put(skb);
847}
848
849/* Free everything but the sk_buff shell. */
850static void skb_release_all(struct sk_buff *skb)
851{
852 skb_release_head_state(skb);
853 if (likely(skb->head))
854 skb_release_data(skb);
855}
856
857/**
858 * __kfree_skb - private function
859 * @skb: buffer
860 *
861 * Free an sk_buff. Release anything attached to the buffer.
862 * Clean the state. This is an internal helper function. Users should
863 * always call kfree_skb
864 */
865
866void __kfree_skb(struct sk_buff *skb)
867{
868 skb_release_all(skb);
869 kfree_skbmem(skb);
870}
871EXPORT_SYMBOL(__kfree_skb);
872
873/**
874 * kfree_skb_reason - free an sk_buff with special reason
875 * @skb: buffer to free
876 * @reason: reason why this skb is dropped
877 *
878 * Drop a reference to the buffer and free it if the usage count has
879 * hit zero. Meanwhile, pass the drop reason to 'kfree_skb'
880 * tracepoint.
881 */
882void __fix_address
883kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
884{
885 if (unlikely(!skb_unref(skb)))
886 return;
887
888 DEBUG_NET_WARN_ON_ONCE(reason <= 0 || reason >= SKB_DROP_REASON_MAX);
889
890 trace_kfree_skb(skb, __builtin_return_address(0), reason);
891 __kfree_skb(skb);
892}
893EXPORT_SYMBOL(kfree_skb_reason);
894
895void kfree_skb_list_reason(struct sk_buff *segs,
896 enum skb_drop_reason reason)
897{
898 while (segs) {
899 struct sk_buff *next = segs->next;
900
901 kfree_skb_reason(segs, reason);
902 segs = next;
903 }
904}
905EXPORT_SYMBOL(kfree_skb_list_reason);
906
907/* Dump skb information and contents.
908 *
909 * Must only be called from net_ratelimit()-ed paths.
910 *
911 * Dumps whole packets if full_pkt, only headers otherwise.
912 */
913void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
914{
915 struct skb_shared_info *sh = skb_shinfo(skb);
916 struct net_device *dev = skb->dev;
917 struct sock *sk = skb->sk;
918 struct sk_buff *list_skb;
919 bool has_mac, has_trans;
920 int headroom, tailroom;
921 int i, len, seg_len;
922
923 if (full_pkt)
924 len = skb->len;
925 else
926 len = min_t(int, skb->len, MAX_HEADER + 128);
927
928 headroom = skb_headroom(skb);
929 tailroom = skb_tailroom(skb);
930
931 has_mac = skb_mac_header_was_set(skb);
932 has_trans = skb_transport_header_was_set(skb);
933
934 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
935 "mac=(%d,%d) net=(%d,%d) trans=%d\n"
936 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
937 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
938 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
939 level, skb->len, headroom, skb_headlen(skb), tailroom,
940 has_mac ? skb->mac_header : -1,
941 has_mac ? skb_mac_header_len(skb) : -1,
942 skb->network_header,
943 has_trans ? skb_network_header_len(skb) : -1,
944 has_trans ? skb->transport_header : -1,
945 sh->tx_flags, sh->nr_frags,
946 sh->gso_size, sh->gso_type, sh->gso_segs,
947 skb->csum, skb->ip_summed, skb->csum_complete_sw,
948 skb->csum_valid, skb->csum_level,
949 skb->hash, skb->sw_hash, skb->l4_hash,
950 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
951
952 if (dev)
953 printk("%sdev name=%s feat=%pNF\n",
954 level, dev->name, &dev->features);
955 if (sk)
956 printk("%ssk family=%hu type=%u proto=%u\n",
957 level, sk->sk_family, sk->sk_type, sk->sk_protocol);
958
959 if (full_pkt && headroom)
960 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
961 16, 1, skb->head, headroom, false);
962
963 seg_len = min_t(int, skb_headlen(skb), len);
964 if (seg_len)
965 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET,
966 16, 1, skb->data, seg_len, false);
967 len -= seg_len;
968
969 if (full_pkt && tailroom)
970 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET,
971 16, 1, skb_tail_pointer(skb), tailroom, false);
972
973 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
974 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
975 u32 p_off, p_len, copied;
976 struct page *p;
977 u8 *vaddr;
978
979 skb_frag_foreach_page(frag, skb_frag_off(frag),
980 skb_frag_size(frag), p, p_off, p_len,
981 copied) {
982 seg_len = min_t(int, p_len, len);
983 vaddr = kmap_atomic(p);
984 print_hex_dump(level, "skb frag: ",
985 DUMP_PREFIX_OFFSET,
986 16, 1, vaddr + p_off, seg_len, false);
987 kunmap_atomic(vaddr);
988 len -= seg_len;
989 if (!len)
990 break;
991 }
992 }
993
994 if (full_pkt && skb_has_frag_list(skb)) {
995 printk("skb fraglist:\n");
996 skb_walk_frags(skb, list_skb)
997 skb_dump(level, list_skb, true);
998 }
999}
1000EXPORT_SYMBOL(skb_dump);
1001
1002/**
1003 * skb_tx_error - report an sk_buff xmit error
1004 * @skb: buffer that triggered an error
1005 *
1006 * Report xmit error if a device callback is tracking this skb.
1007 * skb must be freed afterwards.
1008 */
1009void skb_tx_error(struct sk_buff *skb)
1010{
1011 if (skb) {
1012 skb_zcopy_downgrade_managed(skb);
1013 skb_zcopy_clear(skb, true);
1014 }
1015}
1016EXPORT_SYMBOL(skb_tx_error);
1017
1018#ifdef CONFIG_TRACEPOINTS
1019/**
1020 * consume_skb - free an skbuff
1021 * @skb: buffer to free
1022 *
1023 * Drop a ref to the buffer and free it if the usage count has hit zero
1024 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
1025 * is being dropped after a failure and notes that
1026 */
1027void consume_skb(struct sk_buff *skb)
1028{
1029 if (!skb_unref(skb))
1030 return;
1031
1032 trace_consume_skb(skb);
1033 __kfree_skb(skb);
1034}
1035EXPORT_SYMBOL(consume_skb);
1036#endif
1037
1038/**
1039 * __consume_stateless_skb - free an skbuff, assuming it is stateless
1040 * @skb: buffer to free
1041 *
1042 * Alike consume_skb(), but this variant assumes that this is the last
1043 * skb reference and all the head states have been already dropped
1044 */
1045void __consume_stateless_skb(struct sk_buff *skb)
1046{
1047 trace_consume_skb(skb);
1048 skb_release_data(skb);
1049 kfree_skbmem(skb);
1050}
1051
1052static void napi_skb_cache_put(struct sk_buff *skb)
1053{
1054 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
1055 u32 i;
1056
1057 kasan_poison_object_data(skbuff_head_cache, skb);
1058 nc->skb_cache[nc->skb_count++] = skb;
1059
1060 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
1061 for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
1062 kasan_unpoison_object_data(skbuff_head_cache,
1063 nc->skb_cache[i]);
1064
1065 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF,
1066 nc->skb_cache + NAPI_SKB_CACHE_HALF);
1067 nc->skb_count = NAPI_SKB_CACHE_HALF;
1068 }
1069}
1070
1071void __kfree_skb_defer(struct sk_buff *skb)
1072{
1073 skb_release_all(skb);
1074 napi_skb_cache_put(skb);
1075}
1076
1077void napi_skb_free_stolen_head(struct sk_buff *skb)
1078{
1079 if (unlikely(skb->slow_gro)) {
1080 nf_reset_ct(skb);
1081 skb_dst_drop(skb);
1082 skb_ext_put(skb);
1083 skb_orphan(skb);
1084 skb->slow_gro = 0;
1085 }
1086 napi_skb_cache_put(skb);
1087}
1088
1089void napi_consume_skb(struct sk_buff *skb, int budget)
1090{
1091 /* Zero budget indicate non-NAPI context called us, like netpoll */
1092 if (unlikely(!budget)) {
1093 dev_consume_skb_any(skb);
1094 return;
1095 }
1096
1097 DEBUG_NET_WARN_ON_ONCE(!in_softirq());
1098
1099 if (!skb_unref(skb))
1100 return;
1101
1102 /* if reaching here SKB is ready to free */
1103 trace_consume_skb(skb);
1104
1105 /* if SKB is a clone, don't handle this case */
1106 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
1107 __kfree_skb(skb);
1108 return;
1109 }
1110
1111 skb_release_all(skb);
1112 napi_skb_cache_put(skb);
1113}
1114EXPORT_SYMBOL(napi_consume_skb);
1115
1116/* Make sure a field is contained by headers group */
1117#define CHECK_SKB_FIELD(field) \
1118 BUILD_BUG_ON(offsetof(struct sk_buff, field) != \
1119 offsetof(struct sk_buff, headers.field)); \
1120
1121static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1122{
1123 new->tstamp = old->tstamp;
1124 /* We do not copy old->sk */
1125 new->dev = old->dev;
1126 memcpy(new->cb, old->cb, sizeof(old->cb));
1127 skb_dst_copy(new, old);
1128 __skb_ext_copy(new, old);
1129 __nf_copy(new, old, false);
1130
1131 /* Note : this field could be in the headers group.
1132 * It is not yet because we do not want to have a 16 bit hole
1133 */
1134 new->queue_mapping = old->queue_mapping;
1135
1136 memcpy(&new->headers, &old->headers, sizeof(new->headers));
1137 CHECK_SKB_FIELD(protocol);
1138 CHECK_SKB_FIELD(csum);
1139 CHECK_SKB_FIELD(hash);
1140 CHECK_SKB_FIELD(priority);
1141 CHECK_SKB_FIELD(skb_iif);
1142 CHECK_SKB_FIELD(vlan_proto);
1143 CHECK_SKB_FIELD(vlan_tci);
1144 CHECK_SKB_FIELD(transport_header);
1145 CHECK_SKB_FIELD(network_header);
1146 CHECK_SKB_FIELD(mac_header);
1147 CHECK_SKB_FIELD(inner_protocol);
1148 CHECK_SKB_FIELD(inner_transport_header);
1149 CHECK_SKB_FIELD(inner_network_header);
1150 CHECK_SKB_FIELD(inner_mac_header);
1151 CHECK_SKB_FIELD(mark);
1152#ifdef CONFIG_NETWORK_SECMARK
1153 CHECK_SKB_FIELD(secmark);
1154#endif
1155#ifdef CONFIG_NET_RX_BUSY_POLL
1156 CHECK_SKB_FIELD(napi_id);
1157#endif
1158 CHECK_SKB_FIELD(alloc_cpu);
1159#ifdef CONFIG_XPS
1160 CHECK_SKB_FIELD(sender_cpu);
1161#endif
1162#ifdef CONFIG_NET_SCHED
1163 CHECK_SKB_FIELD(tc_index);
1164#endif
1165
1166}
1167
1168/*
1169 * You should not add any new code to this function. Add it to
1170 * __copy_skb_header above instead.
1171 */
1172static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
1173{
1174#define C(x) n->x = skb->x
1175
1176 n->next = n->prev = NULL;
1177 n->sk = NULL;
1178 __copy_skb_header(n, skb);
1179
1180 C(len);
1181 C(data_len);
1182 C(mac_len);
1183 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
1184 n->cloned = 1;
1185 n->nohdr = 0;
1186 n->peeked = 0;
1187 C(pfmemalloc);
1188 C(pp_recycle);
1189 n->destructor = NULL;
1190 C(tail);
1191 C(end);
1192 C(head);
1193 C(head_frag);
1194 C(data);
1195 C(truesize);
1196 refcount_set(&n->users, 1);
1197
1198 atomic_inc(&(skb_shinfo(skb)->dataref));
1199 skb->cloned = 1;
1200
1201 return n;
1202#undef C
1203}
1204
1205/**
1206 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1207 * @first: first sk_buff of the msg
1208 */
1209struct sk_buff *alloc_skb_for_msg(struct sk_buff *first)
1210{
1211 struct sk_buff *n;
1212
1213 n = alloc_skb(0, GFP_ATOMIC);
1214 if (!n)
1215 return NULL;
1216
1217 n->len = first->len;
1218 n->data_len = first->len;
1219 n->truesize = first->truesize;
1220
1221 skb_shinfo(n)->frag_list = first;
1222
1223 __copy_skb_header(n, first);
1224 n->destructor = NULL;
1225
1226 return n;
1227}
1228EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
1229
1230/**
1231 * skb_morph - morph one skb into another
1232 * @dst: the skb to receive the contents
1233 * @src: the skb to supply the contents
1234 *
1235 * This is identical to skb_clone except that the target skb is
1236 * supplied by the user.
1237 *
1238 * The target skb is returned upon exit.
1239 */
1240struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
1241{
1242 skb_release_all(dst);
1243 return __skb_clone(dst, src);
1244}
1245EXPORT_SYMBOL_GPL(skb_morph);
1246
1247int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
1248{
1249 unsigned long max_pg, num_pg, new_pg, old_pg;
1250 struct user_struct *user;
1251
1252 if (capable(CAP_IPC_LOCK) || !size)
1253 return 0;
1254
1255 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */
1256 max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1257 user = mmp->user ? : current_user();
1258
1259 do {
1260 old_pg = atomic_long_read(&user->locked_vm);
1261 new_pg = old_pg + num_pg;
1262 if (new_pg > max_pg)
1263 return -ENOBUFS;
1264 } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
1265 old_pg);
1266
1267 if (!mmp->user) {
1268 mmp->user = get_uid(user);
1269 mmp->num_pg = num_pg;
1270 } else {
1271 mmp->num_pg += num_pg;
1272 }
1273
1274 return 0;
1275}
1276EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
1277
1278void mm_unaccount_pinned_pages(struct mmpin *mmp)
1279{
1280 if (mmp->user) {
1281 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
1282 free_uid(mmp->user);
1283 }
1284}
1285EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
1286
1287static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size)
1288{
1289 struct ubuf_info_msgzc *uarg;
1290 struct sk_buff *skb;
1291
1292 WARN_ON_ONCE(!in_task());
1293
1294 skb = sock_omalloc(sk, 0, GFP_KERNEL);
1295 if (!skb)
1296 return NULL;
1297
1298 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
1299 uarg = (void *)skb->cb;
1300 uarg->mmp.user = NULL;
1301
1302 if (mm_account_pinned_pages(&uarg->mmp, size)) {
1303 kfree_skb(skb);
1304 return NULL;
1305 }
1306
1307 uarg->ubuf.callback = msg_zerocopy_callback;
1308 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
1309 uarg->len = 1;
1310 uarg->bytelen = size;
1311 uarg->zerocopy = 1;
1312 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
1313 refcount_set(&uarg->ubuf.refcnt, 1);
1314 sock_hold(sk);
1315
1316 return &uarg->ubuf;
1317}
1318
1319static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg)
1320{
1321 return container_of((void *)uarg, struct sk_buff, cb);
1322}
1323
1324struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
1325 struct ubuf_info *uarg)
1326{
1327 if (uarg) {
1328 struct ubuf_info_msgzc *uarg_zc;
1329 const u32 byte_limit = 1 << 19; /* limit to a few TSO */
1330 u32 bytelen, next;
1331
1332 /* there might be non MSG_ZEROCOPY users */
1333 if (uarg->callback != msg_zerocopy_callback)
1334 return NULL;
1335
1336 /* realloc only when socket is locked (TCP, UDP cork),
1337 * so uarg->len and sk_zckey access is serialized
1338 */
1339 if (!sock_owned_by_user(sk)) {
1340 WARN_ON_ONCE(1);
1341 return NULL;
1342 }
1343
1344 uarg_zc = uarg_to_msgzc(uarg);
1345 bytelen = uarg_zc->bytelen + size;
1346 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) {
1347 /* TCP can create new skb to attach new uarg */
1348 if (sk->sk_type == SOCK_STREAM)
1349 goto new_alloc;
1350 return NULL;
1351 }
1352
1353 next = (u32)atomic_read(&sk->sk_zckey);
1354 if ((u32)(uarg_zc->id + uarg_zc->len) == next) {
1355 if (mm_account_pinned_pages(&uarg_zc->mmp, size))
1356 return NULL;
1357 uarg_zc->len++;
1358 uarg_zc->bytelen = bytelen;
1359 atomic_set(&sk->sk_zckey, ++next);
1360
1361 /* no extra ref when appending to datagram (MSG_MORE) */
1362 if (sk->sk_type == SOCK_STREAM)
1363 net_zcopy_get(uarg);
1364
1365 return uarg;
1366 }
1367 }
1368
1369new_alloc:
1370 return msg_zerocopy_alloc(sk, size);
1371}
1372EXPORT_SYMBOL_GPL(msg_zerocopy_realloc);
1373
1374static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1375{
1376 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1377 u32 old_lo, old_hi;
1378 u64 sum_len;
1379
1380 old_lo = serr->ee.ee_info;
1381 old_hi = serr->ee.ee_data;
1382 sum_len = old_hi - old_lo + 1ULL + len;
1383
1384 if (sum_len >= (1ULL << 32))
1385 return false;
1386
1387 if (lo != old_hi + 1)
1388 return false;
1389
1390 serr->ee.ee_data += len;
1391 return true;
1392}
1393
1394static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg)
1395{
1396 struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1397 struct sock_exterr_skb *serr;
1398 struct sock *sk = skb->sk;
1399 struct sk_buff_head *q;
1400 unsigned long flags;
1401 bool is_zerocopy;
1402 u32 lo, hi;
1403 u16 len;
1404
1405 mm_unaccount_pinned_pages(&uarg->mmp);
1406
1407 /* if !len, there was only 1 call, and it was aborted
1408 * so do not queue a completion notification
1409 */
1410 if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1411 goto release;
1412
1413 len = uarg->len;
1414 lo = uarg->id;
1415 hi = uarg->id + len - 1;
1416 is_zerocopy = uarg->zerocopy;
1417
1418 serr = SKB_EXT_ERR(skb);
1419 memset(serr, 0, sizeof(*serr));
1420 serr->ee.ee_errno = 0;
1421 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
1422 serr->ee.ee_data = hi;
1423 serr->ee.ee_info = lo;
1424 if (!is_zerocopy)
1425 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1426
1427 q = &sk->sk_error_queue;
1428 spin_lock_irqsave(&q->lock, flags);
1429 tail = skb_peek_tail(q);
1430 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1431 !skb_zerocopy_notify_extend(tail, lo, len)) {
1432 __skb_queue_tail(q, skb);
1433 skb = NULL;
1434 }
1435 spin_unlock_irqrestore(&q->lock, flags);
1436
1437 sk_error_report(sk);
1438
1439release:
1440 consume_skb(skb);
1441 sock_put(sk);
1442}
1443
1444void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
1445 bool success)
1446{
1447 struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg);
1448
1449 uarg_zc->zerocopy = uarg_zc->zerocopy & success;
1450
1451 if (refcount_dec_and_test(&uarg->refcnt))
1452 __msg_zerocopy_callback(uarg_zc);
1453}
1454EXPORT_SYMBOL_GPL(msg_zerocopy_callback);
1455
1456void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
1457{
1458 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk;
1459
1460 atomic_dec(&sk->sk_zckey);
1461 uarg_to_msgzc(uarg)->len--;
1462
1463 if (have_uref)
1464 msg_zerocopy_callback(NULL, uarg, true);
1465}
1466EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort);
1467
1468int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1469 struct msghdr *msg, int len,
1470 struct ubuf_info *uarg)
1471{
1472 struct ubuf_info *orig_uarg = skb_zcopy(skb);
1473 int err, orig_len = skb->len;
1474
1475 /* An skb can only point to one uarg. This edge case happens when
1476 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1477 */
1478 if (orig_uarg && uarg != orig_uarg)
1479 return -EEXIST;
1480
1481 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len);
1482 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1483 struct sock *save_sk = skb->sk;
1484
1485 /* Streams do not free skb on error. Reset to prev state. */
1486 iov_iter_revert(&msg->msg_iter, skb->len - orig_len);
1487 skb->sk = sk;
1488 ___pskb_trim(skb, orig_len);
1489 skb->sk = save_sk;
1490 return err;
1491 }
1492
1493 skb_zcopy_set(skb, uarg, NULL);
1494 return skb->len - orig_len;
1495}
1496EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1497
1498void __skb_zcopy_downgrade_managed(struct sk_buff *skb)
1499{
1500 int i;
1501
1502 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS;
1503 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1504 skb_frag_ref(skb, i);
1505}
1506EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed);
1507
1508static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1509 gfp_t gfp_mask)
1510{
1511 if (skb_zcopy(orig)) {
1512 if (skb_zcopy(nskb)) {
1513 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1514 if (!gfp_mask) {
1515 WARN_ON_ONCE(1);
1516 return -ENOMEM;
1517 }
1518 if (skb_uarg(nskb) == skb_uarg(orig))
1519 return 0;
1520 if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1521 return -EIO;
1522 }
1523 skb_zcopy_set(nskb, skb_uarg(orig), NULL);
1524 }
1525 return 0;
1526}
1527
1528/**
1529 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1530 * @skb: the skb to modify
1531 * @gfp_mask: allocation priority
1532 *
1533 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
1534 * It will copy all frags into kernel and drop the reference
1535 * to userspace pages.
1536 *
1537 * If this function is called from an interrupt gfp_mask() must be
1538 * %GFP_ATOMIC.
1539 *
1540 * Returns 0 on success or a negative error code on failure
1541 * to allocate kernel memory to copy to.
1542 */
1543int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1544{
1545 int num_frags = skb_shinfo(skb)->nr_frags;
1546 struct page *page, *head = NULL;
1547 int i, new_frags;
1548 u32 d_off;
1549
1550 if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1551 return -EINVAL;
1552
1553 if (!num_frags)
1554 goto release;
1555
1556 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1557 for (i = 0; i < new_frags; i++) {
1558 page = alloc_page(gfp_mask);
1559 if (!page) {
1560 while (head) {
1561 struct page *next = (struct page *)page_private(head);
1562 put_page(head);
1563 head = next;
1564 }
1565 return -ENOMEM;
1566 }
1567 set_page_private(page, (unsigned long)head);
1568 head = page;
1569 }
1570
1571 page = head;
1572 d_off = 0;
1573 for (i = 0; i < num_frags; i++) {
1574 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1575 u32 p_off, p_len, copied;
1576 struct page *p;
1577 u8 *vaddr;
1578
1579 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
1580 p, p_off, p_len, copied) {
1581 u32 copy, done = 0;
1582 vaddr = kmap_atomic(p);
1583
1584 while (done < p_len) {
1585 if (d_off == PAGE_SIZE) {
1586 d_off = 0;
1587 page = (struct page *)page_private(page);
1588 }
1589 copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
1590 memcpy(page_address(page) + d_off,
1591 vaddr + p_off + done, copy);
1592 done += copy;
1593 d_off += copy;
1594 }
1595 kunmap_atomic(vaddr);
1596 }
1597 }
1598
1599 /* skb frags release userspace buffers */
1600 for (i = 0; i < num_frags; i++)
1601 skb_frag_unref(skb, i);
1602
1603 /* skb frags point to kernel buffers */
1604 for (i = 0; i < new_frags - 1; i++) {
1605 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
1606 head = (struct page *)page_private(head);
1607 }
1608 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1609 skb_shinfo(skb)->nr_frags = new_frags;
1610
1611release:
1612 skb_zcopy_clear(skb, false);
1613 return 0;
1614}
1615EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1616
1617/**
1618 * skb_clone - duplicate an sk_buff
1619 * @skb: buffer to clone
1620 * @gfp_mask: allocation priority
1621 *
1622 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
1623 * copies share the same packet data but not structure. The new
1624 * buffer has a reference count of 1. If the allocation fails the
1625 * function returns %NULL otherwise the new buffer is returned.
1626 *
1627 * If this function is called from an interrupt gfp_mask() must be
1628 * %GFP_ATOMIC.
1629 */
1630
1631struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1632{
1633 struct sk_buff_fclones *fclones = container_of(skb,
1634 struct sk_buff_fclones,
1635 skb1);
1636 struct sk_buff *n;
1637
1638 if (skb_orphan_frags(skb, gfp_mask))
1639 return NULL;
1640
1641 if (skb->fclone == SKB_FCLONE_ORIG &&
1642 refcount_read(&fclones->fclone_ref) == 1) {
1643 n = &fclones->skb2;
1644 refcount_set(&fclones->fclone_ref, 2);
1645 n->fclone = SKB_FCLONE_CLONE;
1646 } else {
1647 if (skb_pfmemalloc(skb))
1648 gfp_mask |= __GFP_MEMALLOC;
1649
1650 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1651 if (!n)
1652 return NULL;
1653
1654 n->fclone = SKB_FCLONE_UNAVAILABLE;
1655 }
1656
1657 return __skb_clone(n, skb);
1658}
1659EXPORT_SYMBOL(skb_clone);
1660
1661void skb_headers_offset_update(struct sk_buff *skb, int off)
1662{
1663 /* Only adjust this if it actually is csum_start rather than csum */
1664 if (skb->ip_summed == CHECKSUM_PARTIAL)
1665 skb->csum_start += off;
1666 /* {transport,network,mac}_header and tail are relative to skb->head */
1667 skb->transport_header += off;
1668 skb->network_header += off;
1669 if (skb_mac_header_was_set(skb))
1670 skb->mac_header += off;
1671 skb->inner_transport_header += off;
1672 skb->inner_network_header += off;
1673 skb->inner_mac_header += off;
1674}
1675EXPORT_SYMBOL(skb_headers_offset_update);
1676
1677void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
1678{
1679 __copy_skb_header(new, old);
1680
1681 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1682 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1683 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1684}
1685EXPORT_SYMBOL(skb_copy_header);
1686
1687static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1688{
1689 if (skb_pfmemalloc(skb))
1690 return SKB_ALLOC_RX;
1691 return 0;
1692}
1693
1694/**
1695 * skb_copy - create private copy of an sk_buff
1696 * @skb: buffer to copy
1697 * @gfp_mask: allocation priority
1698 *
1699 * Make a copy of both an &sk_buff and its data. This is used when the
1700 * caller wishes to modify the data and needs a private copy of the
1701 * data to alter. Returns %NULL on failure or the pointer to the buffer
1702 * on success. The returned buffer has a reference count of 1.
1703 *
1704 * As by-product this function converts non-linear &sk_buff to linear
1705 * one, so that &sk_buff becomes completely private and caller is allowed
1706 * to modify all the data of returned buffer. This means that this
1707 * function is not recommended for use in circumstances when only
1708 * header is going to be modified. Use pskb_copy() instead.
1709 */
1710
1711struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1712{
1713 int headerlen = skb_headroom(skb);
1714 unsigned int size = skb_end_offset(skb) + skb->data_len;
1715 struct sk_buff *n = __alloc_skb(size, gfp_mask,
1716 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1717
1718 if (!n)
1719 return NULL;
1720
1721 /* Set the data pointer */
1722 skb_reserve(n, headerlen);
1723 /* Set the tail pointer and length */
1724 skb_put(n, skb->len);
1725
1726 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
1727
1728 skb_copy_header(n, skb);
1729 return n;
1730}
1731EXPORT_SYMBOL(skb_copy);
1732
1733/**
1734 * __pskb_copy_fclone - create copy of an sk_buff with private head.
1735 * @skb: buffer to copy
1736 * @headroom: headroom of new skb
1737 * @gfp_mask: allocation priority
1738 * @fclone: if true allocate the copy of the skb from the fclone
1739 * cache instead of the head cache; it is recommended to set this
1740 * to true for the cases where the copy will likely be cloned
1741 *
1742 * Make a copy of both an &sk_buff and part of its data, located
1743 * in header. Fragmented data remain shared. This is used when
1744 * the caller wishes to modify only header of &sk_buff and needs
1745 * private copy of the header to alter. Returns %NULL on failure
1746 * or the pointer to the buffer on success.
1747 * The returned buffer has a reference count of 1.
1748 */
1749
1750struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1751 gfp_t gfp_mask, bool fclone)
1752{
1753 unsigned int size = skb_headlen(skb) + headroom;
1754 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1755 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1756
1757 if (!n)
1758 goto out;
1759
1760 /* Set the data pointer */
1761 skb_reserve(n, headroom);
1762 /* Set the tail pointer and length */
1763 skb_put(n, skb_headlen(skb));
1764 /* Copy the bytes */
1765 skb_copy_from_linear_data(skb, n->data, n->len);
1766
1767 n->truesize += skb->data_len;
1768 n->data_len = skb->data_len;
1769 n->len = skb->len;
1770
1771 if (skb_shinfo(skb)->nr_frags) {
1772 int i;
1773
1774 if (skb_orphan_frags(skb, gfp_mask) ||
1775 skb_zerocopy_clone(n, skb, gfp_mask)) {
1776 kfree_skb(n);
1777 n = NULL;
1778 goto out;
1779 }
1780 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1781 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1782 skb_frag_ref(skb, i);
1783 }
1784 skb_shinfo(n)->nr_frags = i;
1785 }
1786
1787 if (skb_has_frag_list(skb)) {
1788 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1789 skb_clone_fraglist(n);
1790 }
1791
1792 skb_copy_header(n, skb);
1793out:
1794 return n;
1795}
1796EXPORT_SYMBOL(__pskb_copy_fclone);
1797
1798/**
1799 * pskb_expand_head - reallocate header of &sk_buff
1800 * @skb: buffer to reallocate
1801 * @nhead: room to add at head
1802 * @ntail: room to add at tail
1803 * @gfp_mask: allocation priority
1804 *
1805 * Expands (or creates identical copy, if @nhead and @ntail are zero)
1806 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1807 * reference count of 1. Returns zero in the case of success or error,
1808 * if expansion failed. In the last case, &sk_buff is not changed.
1809 *
1810 * All the pointers pointing into skb header may change and must be
1811 * reloaded after call to this function.
1812 */
1813
1814int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1815 gfp_t gfp_mask)
1816{
1817 int i, osize = skb_end_offset(skb);
1818 int size = osize + nhead + ntail;
1819 long off;
1820 u8 *data;
1821
1822 BUG_ON(nhead < 0);
1823
1824 BUG_ON(skb_shared(skb));
1825
1826 skb_zcopy_downgrade_managed(skb);
1827
1828 size = SKB_DATA_ALIGN(size);
1829
1830 if (skb_pfmemalloc(skb))
1831 gfp_mask |= __GFP_MEMALLOC;
1832 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1833 gfp_mask, NUMA_NO_NODE, NULL);
1834 if (!data)
1835 goto nodata;
1836 size = SKB_WITH_OVERHEAD(ksize(data));
1837
1838 /* Copy only real data... and, alas, header. This should be
1839 * optimized for the cases when header is void.
1840 */
1841 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1842
1843 memcpy((struct skb_shared_info *)(data + size),
1844 skb_shinfo(skb),
1845 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1846
1847 /*
1848 * if shinfo is shared we must drop the old head gracefully, but if it
1849 * is not we can just drop the old head and let the existing refcount
1850 * be since all we did is relocate the values
1851 */
1852 if (skb_cloned(skb)) {
1853 if (skb_orphan_frags(skb, gfp_mask))
1854 goto nofrags;
1855 if (skb_zcopy(skb))
1856 refcount_inc(&skb_uarg(skb)->refcnt);
1857 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1858 skb_frag_ref(skb, i);
1859
1860 if (skb_has_frag_list(skb))
1861 skb_clone_fraglist(skb);
1862
1863 skb_release_data(skb);
1864 } else {
1865 skb_free_head(skb);
1866 }
1867 off = (data + nhead) - skb->head;
1868
1869 skb->head = data;
1870 skb->head_frag = 0;
1871 skb->data += off;
1872
1873 skb_set_end_offset(skb, size);
1874#ifdef NET_SKBUFF_DATA_USES_OFFSET
1875 off = nhead;
1876#endif
1877 skb->tail += off;
1878 skb_headers_offset_update(skb, nhead);
1879 skb->cloned = 0;
1880 skb->hdr_len = 0;
1881 skb->nohdr = 0;
1882 atomic_set(&skb_shinfo(skb)->dataref, 1);
1883
1884 skb_metadata_clear(skb);
1885
1886 /* It is not generally safe to change skb->truesize.
1887 * For the moment, we really care of rx path, or
1888 * when skb is orphaned (not attached to a socket).
1889 */
1890 if (!skb->sk || skb->destructor == sock_edemux)
1891 skb->truesize += size - osize;
1892
1893 return 0;
1894
1895nofrags:
1896 kfree(data);
1897nodata:
1898 return -ENOMEM;
1899}
1900EXPORT_SYMBOL(pskb_expand_head);
1901
1902/* Make private copy of skb with writable head and some headroom */
1903
1904struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1905{
1906 struct sk_buff *skb2;
1907 int delta = headroom - skb_headroom(skb);
1908
1909 if (delta <= 0)
1910 skb2 = pskb_copy(skb, GFP_ATOMIC);
1911 else {
1912 skb2 = skb_clone(skb, GFP_ATOMIC);
1913 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1914 GFP_ATOMIC)) {
1915 kfree_skb(skb2);
1916 skb2 = NULL;
1917 }
1918 }
1919 return skb2;
1920}
1921EXPORT_SYMBOL(skb_realloc_headroom);
1922
1923int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
1924{
1925 unsigned int saved_end_offset, saved_truesize;
1926 struct skb_shared_info *shinfo;
1927 int res;
1928
1929 saved_end_offset = skb_end_offset(skb);
1930 saved_truesize = skb->truesize;
1931
1932 res = pskb_expand_head(skb, 0, 0, pri);
1933 if (res)
1934 return res;
1935
1936 skb->truesize = saved_truesize;
1937
1938 if (likely(skb_end_offset(skb) == saved_end_offset))
1939 return 0;
1940
1941 shinfo = skb_shinfo(skb);
1942
1943 /* We are about to change back skb->end,
1944 * we need to move skb_shinfo() to its new location.
1945 */
1946 memmove(skb->head + saved_end_offset,
1947 shinfo,
1948 offsetof(struct skb_shared_info, frags[shinfo->nr_frags]));
1949
1950 skb_set_end_offset(skb, saved_end_offset);
1951
1952 return 0;
1953}
1954
1955/**
1956 * skb_expand_head - reallocate header of &sk_buff
1957 * @skb: buffer to reallocate
1958 * @headroom: needed headroom
1959 *
1960 * Unlike skb_realloc_headroom, this one does not allocate a new skb
1961 * if possible; copies skb->sk to new skb as needed
1962 * and frees original skb in case of failures.
1963 *
1964 * It expect increased headroom and generates warning otherwise.
1965 */
1966
1967struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom)
1968{
1969 int delta = headroom - skb_headroom(skb);
1970 int osize = skb_end_offset(skb);
1971 struct sock *sk = skb->sk;
1972
1973 if (WARN_ONCE(delta <= 0,
1974 "%s is expecting an increase in the headroom", __func__))
1975 return skb;
1976
1977 delta = SKB_DATA_ALIGN(delta);
1978 /* pskb_expand_head() might crash, if skb is shared. */
1979 if (skb_shared(skb) || !is_skb_wmem(skb)) {
1980 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1981
1982 if (unlikely(!nskb))
1983 goto fail;
1984
1985 if (sk)
1986 skb_set_owner_w(nskb, sk);
1987 consume_skb(skb);
1988 skb = nskb;
1989 }
1990 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC))
1991 goto fail;
1992
1993 if (sk && is_skb_wmem(skb)) {
1994 delta = skb_end_offset(skb) - osize;
1995 refcount_add(delta, &sk->sk_wmem_alloc);
1996 skb->truesize += delta;
1997 }
1998 return skb;
1999
2000fail:
2001 kfree_skb(skb);
2002 return NULL;
2003}
2004EXPORT_SYMBOL(skb_expand_head);
2005
2006/**
2007 * skb_copy_expand - copy and expand sk_buff
2008 * @skb: buffer to copy
2009 * @newheadroom: new free bytes at head
2010 * @newtailroom: new free bytes at tail
2011 * @gfp_mask: allocation priority
2012 *
2013 * Make a copy of both an &sk_buff and its data and while doing so
2014 * allocate additional space.
2015 *
2016 * This is used when the caller wishes to modify the data and needs a
2017 * private copy of the data to alter as well as more space for new fields.
2018 * Returns %NULL on failure or the pointer to the buffer
2019 * on success. The returned buffer has a reference count of 1.
2020 *
2021 * You must pass %GFP_ATOMIC as the allocation priority if this function
2022 * is called from an interrupt.
2023 */
2024struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
2025 int newheadroom, int newtailroom,
2026 gfp_t gfp_mask)
2027{
2028 /*
2029 * Allocate the copy buffer
2030 */
2031 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
2032 gfp_mask, skb_alloc_rx_flag(skb),
2033 NUMA_NO_NODE);
2034 int oldheadroom = skb_headroom(skb);
2035 int head_copy_len, head_copy_off;
2036
2037 if (!n)
2038 return NULL;
2039
2040 skb_reserve(n, newheadroom);
2041
2042 /* Set the tail pointer and length */
2043 skb_put(n, skb->len);
2044
2045 head_copy_len = oldheadroom;
2046 head_copy_off = 0;
2047 if (newheadroom <= head_copy_len)
2048 head_copy_len = newheadroom;
2049 else
2050 head_copy_off = newheadroom - head_copy_len;
2051
2052 /* Copy the linear header and data. */
2053 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
2054 skb->len + head_copy_len));
2055
2056 skb_copy_header(n, skb);
2057
2058 skb_headers_offset_update(n, newheadroom - oldheadroom);
2059
2060 return n;
2061}
2062EXPORT_SYMBOL(skb_copy_expand);
2063
2064/**
2065 * __skb_pad - zero pad the tail of an skb
2066 * @skb: buffer to pad
2067 * @pad: space to pad
2068 * @free_on_error: free buffer on error
2069 *
2070 * Ensure that a buffer is followed by a padding area that is zero
2071 * filled. Used by network drivers which may DMA or transfer data
2072 * beyond the buffer end onto the wire.
2073 *
2074 * May return error in out of memory cases. The skb is freed on error
2075 * if @free_on_error is true.
2076 */
2077
2078int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
2079{
2080 int err;
2081 int ntail;
2082
2083 /* If the skbuff is non linear tailroom is always zero.. */
2084 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
2085 memset(skb->data+skb->len, 0, pad);
2086 return 0;
2087 }
2088
2089 ntail = skb->data_len + pad - (skb->end - skb->tail);
2090 if (likely(skb_cloned(skb) || ntail > 0)) {
2091 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
2092 if (unlikely(err))
2093 goto free_skb;
2094 }
2095
2096 /* FIXME: The use of this function with non-linear skb's really needs
2097 * to be audited.
2098 */
2099 err = skb_linearize(skb);
2100 if (unlikely(err))
2101 goto free_skb;
2102
2103 memset(skb->data + skb->len, 0, pad);
2104 return 0;
2105
2106free_skb:
2107 if (free_on_error)
2108 kfree_skb(skb);
2109 return err;
2110}
2111EXPORT_SYMBOL(__skb_pad);
2112
2113/**
2114 * pskb_put - add data to the tail of a potentially fragmented buffer
2115 * @skb: start of the buffer to use
2116 * @tail: tail fragment of the buffer to use
2117 * @len: amount of data to add
2118 *
2119 * This function extends the used data area of the potentially
2120 * fragmented buffer. @tail must be the last fragment of @skb -- or
2121 * @skb itself. If this would exceed the total buffer size the kernel
2122 * will panic. A pointer to the first byte of the extra data is
2123 * returned.
2124 */
2125
2126void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
2127{
2128 if (tail != skb) {
2129 skb->data_len += len;
2130 skb->len += len;
2131 }
2132 return skb_put(tail, len);
2133}
2134EXPORT_SYMBOL_GPL(pskb_put);
2135
2136/**
2137 * skb_put - add data to a buffer
2138 * @skb: buffer to use
2139 * @len: amount of data to add
2140 *
2141 * This function extends the used data area of the buffer. If this would
2142 * exceed the total buffer size the kernel will panic. A pointer to the
2143 * first byte of the extra data is returned.
2144 */
2145void *skb_put(struct sk_buff *skb, unsigned int len)
2146{
2147 void *tmp = skb_tail_pointer(skb);
2148 SKB_LINEAR_ASSERT(skb);
2149 skb->tail += len;
2150 skb->len += len;
2151 if (unlikely(skb->tail > skb->end))
2152 skb_over_panic(skb, len, __builtin_return_address(0));
2153 return tmp;
2154}
2155EXPORT_SYMBOL(skb_put);
2156
2157/**
2158 * skb_push - add data to the start of a buffer
2159 * @skb: buffer to use
2160 * @len: amount of data to add
2161 *
2162 * This function extends the used data area of the buffer at the buffer
2163 * start. If this would exceed the total buffer headroom the kernel will
2164 * panic. A pointer to the first byte of the extra data is returned.
2165 */
2166void *skb_push(struct sk_buff *skb, unsigned int len)
2167{
2168 skb->data -= len;
2169 skb->len += len;
2170 if (unlikely(skb->data < skb->head))
2171 skb_under_panic(skb, len, __builtin_return_address(0));
2172 return skb->data;
2173}
2174EXPORT_SYMBOL(skb_push);
2175
2176/**
2177 * skb_pull - remove data from the start of a buffer
2178 * @skb: buffer to use
2179 * @len: amount of data to remove
2180 *
2181 * This function removes data from the start of a buffer, returning
2182 * the memory to the headroom. A pointer to the next data in the buffer
2183 * is returned. Once the data has been pulled future pushes will overwrite
2184 * the old data.
2185 */
2186void *skb_pull(struct sk_buff *skb, unsigned int len)
2187{
2188 return skb_pull_inline(skb, len);
2189}
2190EXPORT_SYMBOL(skb_pull);
2191
2192/**
2193 * skb_pull_data - remove data from the start of a buffer returning its
2194 * original position.
2195 * @skb: buffer to use
2196 * @len: amount of data to remove
2197 *
2198 * This function removes data from the start of a buffer, returning
2199 * the memory to the headroom. A pointer to the original data in the buffer
2200 * is returned after checking if there is enough data to pull. Once the
2201 * data has been pulled future pushes will overwrite the old data.
2202 */
2203void *skb_pull_data(struct sk_buff *skb, size_t len)
2204{
2205 void *data = skb->data;
2206
2207 if (skb->len < len)
2208 return NULL;
2209
2210 skb_pull(skb, len);
2211
2212 return data;
2213}
2214EXPORT_SYMBOL(skb_pull_data);
2215
2216/**
2217 * skb_trim - remove end from a buffer
2218 * @skb: buffer to alter
2219 * @len: new length
2220 *
2221 * Cut the length of a buffer down by removing data from the tail. If
2222 * the buffer is already under the length specified it is not modified.
2223 * The skb must be linear.
2224 */
2225void skb_trim(struct sk_buff *skb, unsigned int len)
2226{
2227 if (skb->len > len)
2228 __skb_trim(skb, len);
2229}
2230EXPORT_SYMBOL(skb_trim);
2231
2232/* Trims skb to length len. It can change skb pointers.
2233 */
2234
2235int ___pskb_trim(struct sk_buff *skb, unsigned int len)
2236{
2237 struct sk_buff **fragp;
2238 struct sk_buff *frag;
2239 int offset = skb_headlen(skb);
2240 int nfrags = skb_shinfo(skb)->nr_frags;
2241 int i;
2242 int err;
2243
2244 if (skb_cloned(skb) &&
2245 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
2246 return err;
2247
2248 i = 0;
2249 if (offset >= len)
2250 goto drop_pages;
2251
2252 for (; i < nfrags; i++) {
2253 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2254
2255 if (end < len) {
2256 offset = end;
2257 continue;
2258 }
2259
2260 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
2261
2262drop_pages:
2263 skb_shinfo(skb)->nr_frags = i;
2264
2265 for (; i < nfrags; i++)
2266 skb_frag_unref(skb, i);
2267
2268 if (skb_has_frag_list(skb))
2269 skb_drop_fraglist(skb);
2270 goto done;
2271 }
2272
2273 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
2274 fragp = &frag->next) {
2275 int end = offset + frag->len;
2276
2277 if (skb_shared(frag)) {
2278 struct sk_buff *nfrag;
2279
2280 nfrag = skb_clone(frag, GFP_ATOMIC);
2281 if (unlikely(!nfrag))
2282 return -ENOMEM;
2283
2284 nfrag->next = frag->next;
2285 consume_skb(frag);
2286 frag = nfrag;
2287 *fragp = frag;
2288 }
2289
2290 if (end < len) {
2291 offset = end;
2292 continue;
2293 }
2294
2295 if (end > len &&
2296 unlikely((err = pskb_trim(frag, len - offset))))
2297 return err;
2298
2299 if (frag->next)
2300 skb_drop_list(&frag->next);
2301 break;
2302 }
2303
2304done:
2305 if (len > skb_headlen(skb)) {
2306 skb->data_len -= skb->len - len;
2307 skb->len = len;
2308 } else {
2309 skb->len = len;
2310 skb->data_len = 0;
2311 skb_set_tail_pointer(skb, len);
2312 }
2313
2314 if (!skb->sk || skb->destructor == sock_edemux)
2315 skb_condense(skb);
2316 return 0;
2317}
2318EXPORT_SYMBOL(___pskb_trim);
2319
2320/* Note : use pskb_trim_rcsum() instead of calling this directly
2321 */
2322int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
2323{
2324 if (skb->ip_summed == CHECKSUM_COMPLETE) {
2325 int delta = skb->len - len;
2326
2327 skb->csum = csum_block_sub(skb->csum,
2328 skb_checksum(skb, len, delta, 0),
2329 len);
2330 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2331 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
2332 int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
2333
2334 if (offset + sizeof(__sum16) > hdlen)
2335 return -EINVAL;
2336 }
2337 return __pskb_trim(skb, len);
2338}
2339EXPORT_SYMBOL(pskb_trim_rcsum_slow);
2340
2341/**
2342 * __pskb_pull_tail - advance tail of skb header
2343 * @skb: buffer to reallocate
2344 * @delta: number of bytes to advance tail
2345 *
2346 * The function makes a sense only on a fragmented &sk_buff,
2347 * it expands header moving its tail forward and copying necessary
2348 * data from fragmented part.
2349 *
2350 * &sk_buff MUST have reference count of 1.
2351 *
2352 * Returns %NULL (and &sk_buff does not change) if pull failed
2353 * or value of new tail of skb in the case of success.
2354 *
2355 * All the pointers pointing into skb header may change and must be
2356 * reloaded after call to this function.
2357 */
2358
2359/* Moves tail of skb head forward, copying data from fragmented part,
2360 * when it is necessary.
2361 * 1. It may fail due to malloc failure.
2362 * 2. It may change skb pointers.
2363 *
2364 * It is pretty complicated. Luckily, it is called only in exceptional cases.
2365 */
2366void *__pskb_pull_tail(struct sk_buff *skb, int delta)
2367{
2368 /* If skb has not enough free space at tail, get new one
2369 * plus 128 bytes for future expansions. If we have enough
2370 * room at tail, reallocate without expansion only if skb is cloned.
2371 */
2372 int i, k, eat = (skb->tail + delta) - skb->end;
2373
2374 if (eat > 0 || skb_cloned(skb)) {
2375 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
2376 GFP_ATOMIC))
2377 return NULL;
2378 }
2379
2380 BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
2381 skb_tail_pointer(skb), delta));
2382
2383 /* Optimization: no fragments, no reasons to preestimate
2384 * size of pulled pages. Superb.
2385 */
2386 if (!skb_has_frag_list(skb))
2387 goto pull_pages;
2388
2389 /* Estimate size of pulled pages. */
2390 eat = delta;
2391 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2392 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2393
2394 if (size >= eat)
2395 goto pull_pages;
2396 eat -= size;
2397 }
2398
2399 /* If we need update frag list, we are in troubles.
2400 * Certainly, it is possible to add an offset to skb data,
2401 * but taking into account that pulling is expected to
2402 * be very rare operation, it is worth to fight against
2403 * further bloating skb head and crucify ourselves here instead.
2404 * Pure masohism, indeed. 8)8)
2405 */
2406 if (eat) {
2407 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2408 struct sk_buff *clone = NULL;
2409 struct sk_buff *insp = NULL;
2410
2411 do {
2412 if (list->len <= eat) {
2413 /* Eaten as whole. */
2414 eat -= list->len;
2415 list = list->next;
2416 insp = list;
2417 } else {
2418 /* Eaten partially. */
2419
2420 if (skb_shared(list)) {
2421 /* Sucks! We need to fork list. :-( */
2422 clone = skb_clone(list, GFP_ATOMIC);
2423 if (!clone)
2424 return NULL;
2425 insp = list->next;
2426 list = clone;
2427 } else {
2428 /* This may be pulled without
2429 * problems. */
2430 insp = list;
2431 }
2432 if (!pskb_pull(list, eat)) {
2433 kfree_skb(clone);
2434 return NULL;
2435 }
2436 break;
2437 }
2438 } while (eat);
2439
2440 /* Free pulled out fragments. */
2441 while ((list = skb_shinfo(skb)->frag_list) != insp) {
2442 skb_shinfo(skb)->frag_list = list->next;
2443 consume_skb(list);
2444 }
2445 /* And insert new clone at head. */
2446 if (clone) {
2447 clone->next = list;
2448 skb_shinfo(skb)->frag_list = clone;
2449 }
2450 }
2451 /* Success! Now we may commit changes to skb data. */
2452
2453pull_pages:
2454 eat = delta;
2455 k = 0;
2456 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2457 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2458
2459 if (size <= eat) {
2460 skb_frag_unref(skb, i);
2461 eat -= size;
2462 } else {
2463 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2464
2465 *frag = skb_shinfo(skb)->frags[i];
2466 if (eat) {
2467 skb_frag_off_add(frag, eat);
2468 skb_frag_size_sub(frag, eat);
2469 if (!i)
2470 goto end;
2471 eat = 0;
2472 }
2473 k++;
2474 }
2475 }
2476 skb_shinfo(skb)->nr_frags = k;
2477
2478end:
2479 skb->tail += delta;
2480 skb->data_len -= delta;
2481
2482 if (!skb->data_len)
2483 skb_zcopy_clear(skb, false);
2484
2485 return skb_tail_pointer(skb);
2486}
2487EXPORT_SYMBOL(__pskb_pull_tail);
2488
2489/**
2490 * skb_copy_bits - copy bits from skb to kernel buffer
2491 * @skb: source skb
2492 * @offset: offset in source
2493 * @to: destination buffer
2494 * @len: number of bytes to copy
2495 *
2496 * Copy the specified number of bytes from the source skb to the
2497 * destination buffer.
2498 *
2499 * CAUTION ! :
2500 * If its prototype is ever changed,
2501 * check arch/{*}/net/{*}.S files,
2502 * since it is called from BPF assembly code.
2503 */
2504int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2505{
2506 int start = skb_headlen(skb);
2507 struct sk_buff *frag_iter;
2508 int i, copy;
2509
2510 if (offset > (int)skb->len - len)
2511 goto fault;
2512
2513 /* Copy header. */
2514 if ((copy = start - offset) > 0) {
2515 if (copy > len)
2516 copy = len;
2517 skb_copy_from_linear_data_offset(skb, offset, to, copy);
2518 if ((len -= copy) == 0)
2519 return 0;
2520 offset += copy;
2521 to += copy;
2522 }
2523
2524 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2525 int end;
2526 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2527
2528 WARN_ON(start > offset + len);
2529
2530 end = start + skb_frag_size(f);
2531 if ((copy = end - offset) > 0) {
2532 u32 p_off, p_len, copied;
2533 struct page *p;
2534 u8 *vaddr;
2535
2536 if (copy > len)
2537 copy = len;
2538
2539 skb_frag_foreach_page(f,
2540 skb_frag_off(f) + offset - start,
2541 copy, p, p_off, p_len, copied) {
2542 vaddr = kmap_atomic(p);
2543 memcpy(to + copied, vaddr + p_off, p_len);
2544 kunmap_atomic(vaddr);
2545 }
2546
2547 if ((len -= copy) == 0)
2548 return 0;
2549 offset += copy;
2550 to += copy;
2551 }
2552 start = end;
2553 }
2554
2555 skb_walk_frags(skb, frag_iter) {
2556 int end;
2557
2558 WARN_ON(start > offset + len);
2559
2560 end = start + frag_iter->len;
2561 if ((copy = end - offset) > 0) {
2562 if (copy > len)
2563 copy = len;
2564 if (skb_copy_bits(frag_iter, offset - start, to, copy))
2565 goto fault;
2566 if ((len -= copy) == 0)
2567 return 0;
2568 offset += copy;
2569 to += copy;
2570 }
2571 start = end;
2572 }
2573
2574 if (!len)
2575 return 0;
2576
2577fault:
2578 return -EFAULT;
2579}
2580EXPORT_SYMBOL(skb_copy_bits);
2581
2582/*
2583 * Callback from splice_to_pipe(), if we need to release some pages
2584 * at the end of the spd in case we error'ed out in filling the pipe.
2585 */
2586static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2587{
2588 put_page(spd->pages[i]);
2589}
2590
2591static struct page *linear_to_page(struct page *page, unsigned int *len,
2592 unsigned int *offset,
2593 struct sock *sk)
2594{
2595 struct page_frag *pfrag = sk_page_frag(sk);
2596
2597 if (!sk_page_frag_refill(sk, pfrag))
2598 return NULL;
2599
2600 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
2601
2602 memcpy(page_address(pfrag->page) + pfrag->offset,
2603 page_address(page) + *offset, *len);
2604 *offset = pfrag->offset;
2605 pfrag->offset += *len;
2606
2607 return pfrag->page;
2608}
2609
2610static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
2611 struct page *page,
2612 unsigned int offset)
2613{
2614 return spd->nr_pages &&
2615 spd->pages[spd->nr_pages - 1] == page &&
2616 (spd->partial[spd->nr_pages - 1].offset +
2617 spd->partial[spd->nr_pages - 1].len == offset);
2618}
2619
2620/*
2621 * Fill page/offset/length into spd, if it can hold more pages.
2622 */
2623static bool spd_fill_page(struct splice_pipe_desc *spd,
2624 struct pipe_inode_info *pipe, struct page *page,
2625 unsigned int *len, unsigned int offset,
2626 bool linear,
2627 struct sock *sk)
2628{
2629 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2630 return true;
2631
2632 if (linear) {
2633 page = linear_to_page(page, len, &offset, sk);
2634 if (!page)
2635 return true;
2636 }
2637 if (spd_can_coalesce(spd, page, offset)) {
2638 spd->partial[spd->nr_pages - 1].len += *len;
2639 return false;
2640 }
2641 get_page(page);
2642 spd->pages[spd->nr_pages] = page;
2643 spd->partial[spd->nr_pages].len = *len;
2644 spd->partial[spd->nr_pages].offset = offset;
2645 spd->nr_pages++;
2646
2647 return false;
2648}
2649
2650static bool __splice_segment(struct page *page, unsigned int poff,
2651 unsigned int plen, unsigned int *off,
2652 unsigned int *len,
2653 struct splice_pipe_desc *spd, bool linear,
2654 struct sock *sk,
2655 struct pipe_inode_info *pipe)
2656{
2657 if (!*len)
2658 return true;
2659
2660 /* skip this segment if already processed */
2661 if (*off >= plen) {
2662 *off -= plen;
2663 return false;
2664 }
2665
2666 /* ignore any bits we already processed */
2667 poff += *off;
2668 plen -= *off;
2669 *off = 0;
2670
2671 do {
2672 unsigned int flen = min(*len, plen);
2673
2674 if (spd_fill_page(spd, pipe, page, &flen, poff,
2675 linear, sk))
2676 return true;
2677 poff += flen;
2678 plen -= flen;
2679 *len -= flen;
2680 } while (*len && plen);
2681
2682 return false;
2683}
2684
2685/*
2686 * Map linear and fragment data from the skb to spd. It reports true if the
2687 * pipe is full or if we already spliced the requested length.
2688 */
2689static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2690 unsigned int *offset, unsigned int *len,
2691 struct splice_pipe_desc *spd, struct sock *sk)
2692{
2693 int seg;
2694 struct sk_buff *iter;
2695
2696 /* map the linear part :
2697 * If skb->head_frag is set, this 'linear' part is backed by a
2698 * fragment, and if the head is not shared with any clones then
2699 * we can avoid a copy since we own the head portion of this page.
2700 */
2701 if (__splice_segment(virt_to_page(skb->data),
2702 (unsigned long) skb->data & (PAGE_SIZE - 1),
2703 skb_headlen(skb),
2704 offset, len, spd,
2705 skb_head_is_locked(skb),
2706 sk, pipe))
2707 return true;
2708
2709 /*
2710 * then map the fragments
2711 */
2712 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
2713 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
2714
2715 if (__splice_segment(skb_frag_page(f),
2716 skb_frag_off(f), skb_frag_size(f),
2717 offset, len, spd, false, sk, pipe))
2718 return true;
2719 }
2720
2721 skb_walk_frags(skb, iter) {
2722 if (*offset >= iter->len) {
2723 *offset -= iter->len;
2724 continue;
2725 }
2726 /* __skb_splice_bits() only fails if the output has no room
2727 * left, so no point in going over the frag_list for the error
2728 * case.
2729 */
2730 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2731 return true;
2732 }
2733
2734 return false;
2735}
2736
2737/*
2738 * Map data from the skb to a pipe. Should handle both the linear part,
2739 * the fragments, and the frag list.
2740 */
2741int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2742 struct pipe_inode_info *pipe, unsigned int tlen,
2743 unsigned int flags)
2744{
2745 struct partial_page partial[MAX_SKB_FRAGS];
2746 struct page *pages[MAX_SKB_FRAGS];
2747 struct splice_pipe_desc spd = {
2748 .pages = pages,
2749 .partial = partial,
2750 .nr_pages_max = MAX_SKB_FRAGS,
2751 .ops = &nosteal_pipe_buf_ops,
2752 .spd_release = sock_spd_release,
2753 };
2754 int ret = 0;
2755
2756 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
2757
2758 if (spd.nr_pages)
2759 ret = splice_to_pipe(pipe, &spd);
2760
2761 return ret;
2762}
2763EXPORT_SYMBOL_GPL(skb_splice_bits);
2764
2765static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg,
2766 struct kvec *vec, size_t num, size_t size)
2767{
2768 struct socket *sock = sk->sk_socket;
2769
2770 if (!sock)
2771 return -EINVAL;
2772 return kernel_sendmsg(sock, msg, vec, num, size);
2773}
2774
2775static int sendpage_unlocked(struct sock *sk, struct page *page, int offset,
2776 size_t size, int flags)
2777{
2778 struct socket *sock = sk->sk_socket;
2779
2780 if (!sock)
2781 return -EINVAL;
2782 return kernel_sendpage(sock, page, offset, size, flags);
2783}
2784
2785typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg,
2786 struct kvec *vec, size_t num, size_t size);
2787typedef int (*sendpage_func)(struct sock *sk, struct page *page, int offset,
2788 size_t size, int flags);
2789static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
2790 int len, sendmsg_func sendmsg, sendpage_func sendpage)
2791{
2792 unsigned int orig_len = len;
2793 struct sk_buff *head = skb;
2794 unsigned short fragidx;
2795 int slen, ret;
2796
2797do_frag_list:
2798
2799 /* Deal with head data */
2800 while (offset < skb_headlen(skb) && len) {
2801 struct kvec kv;
2802 struct msghdr msg;
2803
2804 slen = min_t(int, len, skb_headlen(skb) - offset);
2805 kv.iov_base = skb->data + offset;
2806 kv.iov_len = slen;
2807 memset(&msg, 0, sizeof(msg));
2808 msg.msg_flags = MSG_DONTWAIT;
2809
2810 ret = INDIRECT_CALL_2(sendmsg, kernel_sendmsg_locked,
2811 sendmsg_unlocked, sk, &msg, &kv, 1, slen);
2812 if (ret <= 0)
2813 goto error;
2814
2815 offset += ret;
2816 len -= ret;
2817 }
2818
2819 /* All the data was skb head? */
2820 if (!len)
2821 goto out;
2822
2823 /* Make offset relative to start of frags */
2824 offset -= skb_headlen(skb);
2825
2826 /* Find where we are in frag list */
2827 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2828 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2829
2830 if (offset < skb_frag_size(frag))
2831 break;
2832
2833 offset -= skb_frag_size(frag);
2834 }
2835
2836 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2837 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2838
2839 slen = min_t(size_t, len, skb_frag_size(frag) - offset);
2840
2841 while (slen) {
2842 ret = INDIRECT_CALL_2(sendpage, kernel_sendpage_locked,
2843 sendpage_unlocked, sk,
2844 skb_frag_page(frag),
2845 skb_frag_off(frag) + offset,
2846 slen, MSG_DONTWAIT);
2847 if (ret <= 0)
2848 goto error;
2849
2850 len -= ret;
2851 offset += ret;
2852 slen -= ret;
2853 }
2854
2855 offset = 0;
2856 }
2857
2858 if (len) {
2859 /* Process any frag lists */
2860
2861 if (skb == head) {
2862 if (skb_has_frag_list(skb)) {
2863 skb = skb_shinfo(skb)->frag_list;
2864 goto do_frag_list;
2865 }
2866 } else if (skb->next) {
2867 skb = skb->next;
2868 goto do_frag_list;
2869 }
2870 }
2871
2872out:
2873 return orig_len - len;
2874
2875error:
2876 return orig_len == len ? ret : orig_len - len;
2877}
2878
2879/* Send skb data on a socket. Socket must be locked. */
2880int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
2881 int len)
2882{
2883 return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked,
2884 kernel_sendpage_locked);
2885}
2886EXPORT_SYMBOL_GPL(skb_send_sock_locked);
2887
2888/* Send skb data on a socket. Socket must be unlocked. */
2889int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
2890{
2891 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked,
2892 sendpage_unlocked);
2893}
2894
2895/**
2896 * skb_store_bits - store bits from kernel buffer to skb
2897 * @skb: destination buffer
2898 * @offset: offset in destination
2899 * @from: source buffer
2900 * @len: number of bytes to copy
2901 *
2902 * Copy the specified number of bytes from the source buffer to the
2903 * destination skb. This function handles all the messy bits of
2904 * traversing fragment lists and such.
2905 */
2906
2907int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2908{
2909 int start = skb_headlen(skb);
2910 struct sk_buff *frag_iter;
2911 int i, copy;
2912
2913 if (offset > (int)skb->len - len)
2914 goto fault;
2915
2916 if ((copy = start - offset) > 0) {
2917 if (copy > len)
2918 copy = len;
2919 skb_copy_to_linear_data_offset(skb, offset, from, copy);
2920 if ((len -= copy) == 0)
2921 return 0;
2922 offset += copy;
2923 from += copy;
2924 }
2925
2926 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2927 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2928 int end;
2929
2930 WARN_ON(start > offset + len);
2931
2932 end = start + skb_frag_size(frag);
2933 if ((copy = end - offset) > 0) {
2934 u32 p_off, p_len, copied;
2935 struct page *p;
2936 u8 *vaddr;
2937
2938 if (copy > len)
2939 copy = len;
2940
2941 skb_frag_foreach_page(frag,
2942 skb_frag_off(frag) + offset - start,
2943 copy, p, p_off, p_len, copied) {
2944 vaddr = kmap_atomic(p);
2945 memcpy(vaddr + p_off, from + copied, p_len);
2946 kunmap_atomic(vaddr);
2947 }
2948
2949 if ((len -= copy) == 0)
2950 return 0;
2951 offset += copy;
2952 from += copy;
2953 }
2954 start = end;
2955 }
2956
2957 skb_walk_frags(skb, frag_iter) {
2958 int end;
2959
2960 WARN_ON(start > offset + len);
2961
2962 end = start + frag_iter->len;
2963 if ((copy = end - offset) > 0) {
2964 if (copy > len)
2965 copy = len;
2966 if (skb_store_bits(frag_iter, offset - start,
2967 from, copy))
2968 goto fault;
2969 if ((len -= copy) == 0)
2970 return 0;
2971 offset += copy;
2972 from += copy;
2973 }
2974 start = end;
2975 }
2976 if (!len)
2977 return 0;
2978
2979fault:
2980 return -EFAULT;
2981}
2982EXPORT_SYMBOL(skb_store_bits);
2983
2984/* Checksum skb data. */
2985__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2986 __wsum csum, const struct skb_checksum_ops *ops)
2987{
2988 int start = skb_headlen(skb);
2989 int i, copy = start - offset;
2990 struct sk_buff *frag_iter;
2991 int pos = 0;
2992
2993 /* Checksum header. */
2994 if (copy > 0) {
2995 if (copy > len)
2996 copy = len;
2997 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext,
2998 skb->data + offset, copy, csum);
2999 if ((len -= copy) == 0)
3000 return csum;
3001 offset += copy;
3002 pos = copy;
3003 }
3004
3005 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3006 int end;
3007 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3008
3009 WARN_ON(start > offset + len);
3010
3011 end = start + skb_frag_size(frag);
3012 if ((copy = end - offset) > 0) {
3013 u32 p_off, p_len, copied;
3014 struct page *p;
3015 __wsum csum2;
3016 u8 *vaddr;
3017
3018 if (copy > len)
3019 copy = len;
3020
3021 skb_frag_foreach_page(frag,
3022 skb_frag_off(frag) + offset - start,
3023 copy, p, p_off, p_len, copied) {
3024 vaddr = kmap_atomic(p);
3025 csum2 = INDIRECT_CALL_1(ops->update,
3026 csum_partial_ext,
3027 vaddr + p_off, p_len, 0);
3028 kunmap_atomic(vaddr);
3029 csum = INDIRECT_CALL_1(ops->combine,
3030 csum_block_add_ext, csum,
3031 csum2, pos, p_len);
3032 pos += p_len;
3033 }
3034
3035 if (!(len -= copy))
3036 return csum;
3037 offset += copy;
3038 }
3039 start = end;
3040 }
3041
3042 skb_walk_frags(skb, frag_iter) {
3043 int end;
3044
3045 WARN_ON(start > offset + len);
3046
3047 end = start + frag_iter->len;
3048 if ((copy = end - offset) > 0) {
3049 __wsum csum2;
3050 if (copy > len)
3051 copy = len;
3052 csum2 = __skb_checksum(frag_iter, offset - start,
3053 copy, 0, ops);
3054 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext,
3055 csum, csum2, pos, copy);
3056 if ((len -= copy) == 0)
3057 return csum;
3058 offset += copy;
3059 pos += copy;
3060 }
3061 start = end;
3062 }
3063 BUG_ON(len);
3064
3065 return csum;
3066}
3067EXPORT_SYMBOL(__skb_checksum);
3068
3069__wsum skb_checksum(const struct sk_buff *skb, int offset,
3070 int len, __wsum csum)
3071{
3072 const struct skb_checksum_ops ops = {
3073 .update = csum_partial_ext,
3074 .combine = csum_block_add_ext,
3075 };
3076
3077 return __skb_checksum(skb, offset, len, csum, &ops);
3078}
3079EXPORT_SYMBOL(skb_checksum);
3080
3081/* Both of above in one bottle. */
3082
3083__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
3084 u8 *to, int len)
3085{
3086 int start = skb_headlen(skb);
3087 int i, copy = start - offset;
3088 struct sk_buff *frag_iter;
3089 int pos = 0;
3090 __wsum csum = 0;
3091
3092 /* Copy header. */
3093 if (copy > 0) {
3094 if (copy > len)
3095 copy = len;
3096 csum = csum_partial_copy_nocheck(skb->data + offset, to,
3097 copy);
3098 if ((len -= copy) == 0)
3099 return csum;
3100 offset += copy;
3101 to += copy;
3102 pos = copy;
3103 }
3104
3105 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3106 int end;
3107
3108 WARN_ON(start > offset + len);
3109
3110 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3111 if ((copy = end - offset) > 0) {
3112 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3113 u32 p_off, p_len, copied;
3114 struct page *p;
3115 __wsum csum2;
3116 u8 *vaddr;
3117
3118 if (copy > len)
3119 copy = len;
3120
3121 skb_frag_foreach_page(frag,
3122 skb_frag_off(frag) + offset - start,
3123 copy, p, p_off, p_len, copied) {
3124 vaddr = kmap_atomic(p);
3125 csum2 = csum_partial_copy_nocheck(vaddr + p_off,
3126 to + copied,
3127 p_len);
3128 kunmap_atomic(vaddr);
3129 csum = csum_block_add(csum, csum2, pos);
3130 pos += p_len;
3131 }
3132
3133 if (!(len -= copy))
3134 return csum;
3135 offset += copy;
3136 to += copy;
3137 }
3138 start = end;
3139 }
3140
3141 skb_walk_frags(skb, frag_iter) {
3142 __wsum csum2;
3143 int end;
3144
3145 WARN_ON(start > offset + len);
3146
3147 end = start + frag_iter->len;
3148 if ((copy = end - offset) > 0) {
3149 if (copy > len)
3150 copy = len;
3151 csum2 = skb_copy_and_csum_bits(frag_iter,
3152 offset - start,
3153 to, copy);
3154 csum = csum_block_add(csum, csum2, pos);
3155 if ((len -= copy) == 0)
3156 return csum;
3157 offset += copy;
3158 to += copy;
3159 pos += copy;
3160 }
3161 start = end;
3162 }
3163 BUG_ON(len);
3164 return csum;
3165}
3166EXPORT_SYMBOL(skb_copy_and_csum_bits);
3167
3168__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
3169{
3170 __sum16 sum;
3171
3172 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
3173 /* See comments in __skb_checksum_complete(). */
3174 if (likely(!sum)) {
3175 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
3176 !skb->csum_complete_sw)
3177 netdev_rx_csum_fault(skb->dev, skb);
3178 }
3179 if (!skb_shared(skb))
3180 skb->csum_valid = !sum;
3181 return sum;
3182}
3183EXPORT_SYMBOL(__skb_checksum_complete_head);
3184
3185/* This function assumes skb->csum already holds pseudo header's checksum,
3186 * which has been changed from the hardware checksum, for example, by
3187 * __skb_checksum_validate_complete(). And, the original skb->csum must
3188 * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
3189 *
3190 * It returns non-zero if the recomputed checksum is still invalid, otherwise
3191 * zero. The new checksum is stored back into skb->csum unless the skb is
3192 * shared.
3193 */
3194__sum16 __skb_checksum_complete(struct sk_buff *skb)
3195{
3196 __wsum csum;
3197 __sum16 sum;
3198
3199 csum = skb_checksum(skb, 0, skb->len, 0);
3200
3201 sum = csum_fold(csum_add(skb->csum, csum));
3202 /* This check is inverted, because we already knew the hardware
3203 * checksum is invalid before calling this function. So, if the
3204 * re-computed checksum is valid instead, then we have a mismatch
3205 * between the original skb->csum and skb_checksum(). This means either
3206 * the original hardware checksum is incorrect or we screw up skb->csum
3207 * when moving skb->data around.
3208 */
3209 if (likely(!sum)) {
3210 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
3211 !skb->csum_complete_sw)
3212 netdev_rx_csum_fault(skb->dev, skb);
3213 }
3214
3215 if (!skb_shared(skb)) {
3216 /* Save full packet checksum */
3217 skb->csum = csum;
3218 skb->ip_summed = CHECKSUM_COMPLETE;
3219 skb->csum_complete_sw = 1;
3220 skb->csum_valid = !sum;
3221 }
3222
3223 return sum;
3224}
3225EXPORT_SYMBOL(__skb_checksum_complete);
3226
3227static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
3228{
3229 net_warn_ratelimited(
3230 "%s: attempt to compute crc32c without libcrc32c.ko\n",
3231 __func__);
3232 return 0;
3233}
3234
3235static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
3236 int offset, int len)
3237{
3238 net_warn_ratelimited(
3239 "%s: attempt to compute crc32c without libcrc32c.ko\n",
3240 __func__);
3241 return 0;
3242}
3243
3244static const struct skb_checksum_ops default_crc32c_ops = {
3245 .update = warn_crc32c_csum_update,
3246 .combine = warn_crc32c_csum_combine,
3247};
3248
3249const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
3250 &default_crc32c_ops;
3251EXPORT_SYMBOL(crc32c_csum_stub);
3252
3253 /**
3254 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
3255 * @from: source buffer
3256 *
3257 * Calculates the amount of linear headroom needed in the 'to' skb passed
3258 * into skb_zerocopy().
3259 */
3260unsigned int
3261skb_zerocopy_headlen(const struct sk_buff *from)
3262{
3263 unsigned int hlen = 0;
3264
3265 if (!from->head_frag ||
3266 skb_headlen(from) < L1_CACHE_BYTES ||
3267 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) {
3268 hlen = skb_headlen(from);
3269 if (!hlen)
3270 hlen = from->len;
3271 }
3272
3273 if (skb_has_frag_list(from))
3274 hlen = from->len;
3275
3276 return hlen;
3277}
3278EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
3279
3280/**
3281 * skb_zerocopy - Zero copy skb to skb
3282 * @to: destination buffer
3283 * @from: source buffer
3284 * @len: number of bytes to copy from source buffer
3285 * @hlen: size of linear headroom in destination buffer
3286 *
3287 * Copies up to `len` bytes from `from` to `to` by creating references
3288 * to the frags in the source buffer.
3289 *
3290 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
3291 * headroom in the `to` buffer.
3292 *
3293 * Return value:
3294 * 0: everything is OK
3295 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
3296 * -EFAULT: skb_copy_bits() found some problem with skb geometry
3297 */
3298int
3299skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
3300{
3301 int i, j = 0;
3302 int plen = 0; /* length of skb->head fragment */
3303 int ret;
3304 struct page *page;
3305 unsigned int offset;
3306
3307 BUG_ON(!from->head_frag && !hlen);
3308
3309 /* dont bother with small payloads */
3310 if (len <= skb_tailroom(to))
3311 return skb_copy_bits(from, 0, skb_put(to, len), len);
3312
3313 if (hlen) {
3314 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
3315 if (unlikely(ret))
3316 return ret;
3317 len -= hlen;
3318 } else {
3319 plen = min_t(int, skb_headlen(from), len);
3320 if (plen) {
3321 page = virt_to_head_page(from->head);
3322 offset = from->data - (unsigned char *)page_address(page);
3323 __skb_fill_page_desc(to, 0, page, offset, plen);
3324 get_page(page);
3325 j = 1;
3326 len -= plen;
3327 }
3328 }
3329
3330 skb_len_add(to, len + plen);
3331
3332 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
3333 skb_tx_error(from);
3334 return -ENOMEM;
3335 }
3336 skb_zerocopy_clone(to, from, GFP_ATOMIC);
3337
3338 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
3339 int size;
3340
3341 if (!len)
3342 break;
3343 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
3344 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
3345 len);
3346 skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
3347 len -= size;
3348 skb_frag_ref(to, j);
3349 j++;
3350 }
3351 skb_shinfo(to)->nr_frags = j;
3352
3353 return 0;
3354}
3355EXPORT_SYMBOL_GPL(skb_zerocopy);
3356
3357void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
3358{
3359 __wsum csum;
3360 long csstart;
3361
3362 if (skb->ip_summed == CHECKSUM_PARTIAL)
3363 csstart = skb_checksum_start_offset(skb);
3364 else
3365 csstart = skb_headlen(skb);
3366
3367 BUG_ON(csstart > skb_headlen(skb));
3368
3369 skb_copy_from_linear_data(skb, to, csstart);
3370
3371 csum = 0;
3372 if (csstart != skb->len)
3373 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
3374 skb->len - csstart);
3375
3376 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3377 long csstuff = csstart + skb->csum_offset;
3378
3379 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
3380 }
3381}
3382EXPORT_SYMBOL(skb_copy_and_csum_dev);
3383
3384/**
3385 * skb_dequeue - remove from the head of the queue
3386 * @list: list to dequeue from
3387 *
3388 * Remove the head of the list. The list lock is taken so the function
3389 * may be used safely with other locking list functions. The head item is
3390 * returned or %NULL if the list is empty.
3391 */
3392
3393struct sk_buff *skb_dequeue(struct sk_buff_head *list)
3394{
3395 unsigned long flags;
3396 struct sk_buff *result;
3397
3398 spin_lock_irqsave(&list->lock, flags);
3399 result = __skb_dequeue(list);
3400 spin_unlock_irqrestore(&list->lock, flags);
3401 return result;
3402}
3403EXPORT_SYMBOL(skb_dequeue);
3404
3405/**
3406 * skb_dequeue_tail - remove from the tail of the queue
3407 * @list: list to dequeue from
3408 *
3409 * Remove the tail of the list. The list lock is taken so the function
3410 * may be used safely with other locking list functions. The tail item is
3411 * returned or %NULL if the list is empty.
3412 */
3413struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
3414{
3415 unsigned long flags;
3416 struct sk_buff *result;
3417
3418 spin_lock_irqsave(&list->lock, flags);
3419 result = __skb_dequeue_tail(list);
3420 spin_unlock_irqrestore(&list->lock, flags);
3421 return result;
3422}
3423EXPORT_SYMBOL(skb_dequeue_tail);
3424
3425/**
3426 * skb_queue_purge - empty a list
3427 * @list: list to empty
3428 *
3429 * Delete all buffers on an &sk_buff list. Each buffer is removed from
3430 * the list and one reference dropped. This function takes the list
3431 * lock and is atomic with respect to other list locking functions.
3432 */
3433void skb_queue_purge(struct sk_buff_head *list)
3434{
3435 struct sk_buff *skb;
3436 while ((skb = skb_dequeue(list)) != NULL)
3437 kfree_skb(skb);
3438}
3439EXPORT_SYMBOL(skb_queue_purge);
3440
3441/**
3442 * skb_rbtree_purge - empty a skb rbtree
3443 * @root: root of the rbtree to empty
3444 * Return value: the sum of truesizes of all purged skbs.
3445 *
3446 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
3447 * the list and one reference dropped. This function does not take
3448 * any lock. Synchronization should be handled by the caller (e.g., TCP
3449 * out-of-order queue is protected by the socket lock).
3450 */
3451unsigned int skb_rbtree_purge(struct rb_root *root)
3452{
3453 struct rb_node *p = rb_first(root);
3454 unsigned int sum = 0;
3455
3456 while (p) {
3457 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3458
3459 p = rb_next(p);
3460 rb_erase(&skb->rbnode, root);
3461 sum += skb->truesize;
3462 kfree_skb(skb);
3463 }
3464 return sum;
3465}
3466
3467/**
3468 * skb_queue_head - queue a buffer at the list head
3469 * @list: list to use
3470 * @newsk: buffer to queue
3471 *
3472 * Queue a buffer at the start of the list. This function takes the
3473 * list lock and can be used safely with other locking &sk_buff functions
3474 * safely.
3475 *
3476 * A buffer cannot be placed on two lists at the same time.
3477 */
3478void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
3479{
3480 unsigned long flags;
3481
3482 spin_lock_irqsave(&list->lock, flags);
3483 __skb_queue_head(list, newsk);
3484 spin_unlock_irqrestore(&list->lock, flags);
3485}
3486EXPORT_SYMBOL(skb_queue_head);
3487
3488/**
3489 * skb_queue_tail - queue a buffer at the list tail
3490 * @list: list to use
3491 * @newsk: buffer to queue
3492 *
3493 * Queue a buffer at the tail of the list. This function takes the
3494 * list lock and can be used safely with other locking &sk_buff functions
3495 * safely.
3496 *
3497 * A buffer cannot be placed on two lists at the same time.
3498 */
3499void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
3500{
3501 unsigned long flags;
3502
3503 spin_lock_irqsave(&list->lock, flags);
3504 __skb_queue_tail(list, newsk);
3505 spin_unlock_irqrestore(&list->lock, flags);
3506}
3507EXPORT_SYMBOL(skb_queue_tail);
3508
3509/**
3510 * skb_unlink - remove a buffer from a list
3511 * @skb: buffer to remove
3512 * @list: list to use
3513 *
3514 * Remove a packet from a list. The list locks are taken and this
3515 * function is atomic with respect to other list locked calls
3516 *
3517 * You must know what list the SKB is on.
3518 */
3519void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
3520{
3521 unsigned long flags;
3522
3523 spin_lock_irqsave(&list->lock, flags);
3524 __skb_unlink(skb, list);
3525 spin_unlock_irqrestore(&list->lock, flags);
3526}
3527EXPORT_SYMBOL(skb_unlink);
3528
3529/**
3530 * skb_append - append a buffer
3531 * @old: buffer to insert after
3532 * @newsk: buffer to insert
3533 * @list: list to use
3534 *
3535 * Place a packet after a given packet in a list. The list locks are taken
3536 * and this function is atomic with respect to other list locked calls.
3537 * A buffer cannot be placed on two lists at the same time.
3538 */
3539void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
3540{
3541 unsigned long flags;
3542
3543 spin_lock_irqsave(&list->lock, flags);
3544 __skb_queue_after(list, old, newsk);
3545 spin_unlock_irqrestore(&list->lock, flags);
3546}
3547EXPORT_SYMBOL(skb_append);
3548
3549static inline void skb_split_inside_header(struct sk_buff *skb,
3550 struct sk_buff* skb1,
3551 const u32 len, const int pos)
3552{
3553 int i;
3554
3555 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
3556 pos - len);
3557 /* And move data appendix as is. */
3558 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
3559 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
3560
3561 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
3562 skb_shinfo(skb)->nr_frags = 0;
3563 skb1->data_len = skb->data_len;
3564 skb1->len += skb1->data_len;
3565 skb->data_len = 0;
3566 skb->len = len;
3567 skb_set_tail_pointer(skb, len);
3568}
3569
3570static inline void skb_split_no_header(struct sk_buff *skb,
3571 struct sk_buff* skb1,
3572 const u32 len, int pos)
3573{
3574 int i, k = 0;
3575 const int nfrags = skb_shinfo(skb)->nr_frags;
3576
3577 skb_shinfo(skb)->nr_frags = 0;
3578 skb1->len = skb1->data_len = skb->len - len;
3579 skb->len = len;
3580 skb->data_len = len - pos;
3581
3582 for (i = 0; i < nfrags; i++) {
3583 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
3584
3585 if (pos + size > len) {
3586 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3587
3588 if (pos < len) {
3589 /* Split frag.
3590 * We have two variants in this case:
3591 * 1. Move all the frag to the second
3592 * part, if it is possible. F.e.
3593 * this approach is mandatory for TUX,
3594 * where splitting is expensive.
3595 * 2. Split is accurately. We make this.
3596 */
3597 skb_frag_ref(skb, i);
3598 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
3599 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3600 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3601 skb_shinfo(skb)->nr_frags++;
3602 }
3603 k++;
3604 } else
3605 skb_shinfo(skb)->nr_frags++;
3606 pos += size;
3607 }
3608 skb_shinfo(skb1)->nr_frags = k;
3609}
3610
3611/**
3612 * skb_split - Split fragmented skb to two parts at length len.
3613 * @skb: the buffer to split
3614 * @skb1: the buffer to receive the second part
3615 * @len: new length for skb
3616 */
3617void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3618{
3619 int pos = skb_headlen(skb);
3620 const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY;
3621
3622 skb_zcopy_downgrade_managed(skb);
3623
3624 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags;
3625 skb_zerocopy_clone(skb1, skb, 0);
3626 if (len < pos) /* Split line is inside header. */
3627 skb_split_inside_header(skb, skb1, len, pos);
3628 else /* Second chunk has no header, nothing to copy. */
3629 skb_split_no_header(skb, skb1, len, pos);
3630}
3631EXPORT_SYMBOL(skb_split);
3632
3633/* Shifting from/to a cloned skb is a no-go.
3634 *
3635 * Caller cannot keep skb_shinfo related pointers past calling here!
3636 */
3637static int skb_prepare_for_shift(struct sk_buff *skb)
3638{
3639 return skb_unclone_keeptruesize(skb, GFP_ATOMIC);
3640}
3641
3642/**
3643 * skb_shift - Shifts paged data partially from skb to another
3644 * @tgt: buffer into which tail data gets added
3645 * @skb: buffer from which the paged data comes from
3646 * @shiftlen: shift up to this many bytes
3647 *
3648 * Attempts to shift up to shiftlen worth of bytes, which may be less than
3649 * the length of the skb, from skb to tgt. Returns number bytes shifted.
3650 * It's up to caller to free skb if everything was shifted.
3651 *
3652 * If @tgt runs out of frags, the whole operation is aborted.
3653 *
3654 * Skb cannot include anything else but paged data while tgt is allowed
3655 * to have non-paged data as well.
3656 *
3657 * TODO: full sized shift could be optimized but that would need
3658 * specialized skb free'er to handle frags without up-to-date nr_frags.
3659 */
3660int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3661{
3662 int from, to, merge, todo;
3663 skb_frag_t *fragfrom, *fragto;
3664
3665 BUG_ON(shiftlen > skb->len);
3666
3667 if (skb_headlen(skb))
3668 return 0;
3669 if (skb_zcopy(tgt) || skb_zcopy(skb))
3670 return 0;
3671
3672 todo = shiftlen;
3673 from = 0;
3674 to = skb_shinfo(tgt)->nr_frags;
3675 fragfrom = &skb_shinfo(skb)->frags[from];
3676
3677 /* Actual merge is delayed until the point when we know we can
3678 * commit all, so that we don't have to undo partial changes
3679 */
3680 if (!to ||
3681 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
3682 skb_frag_off(fragfrom))) {
3683 merge = -1;
3684 } else {
3685 merge = to - 1;
3686
3687 todo -= skb_frag_size(fragfrom);
3688 if (todo < 0) {
3689 if (skb_prepare_for_shift(skb) ||
3690 skb_prepare_for_shift(tgt))
3691 return 0;
3692
3693 /* All previous frag pointers might be stale! */
3694 fragfrom = &skb_shinfo(skb)->frags[from];
3695 fragto = &skb_shinfo(tgt)->frags[merge];
3696
3697 skb_frag_size_add(fragto, shiftlen);
3698 skb_frag_size_sub(fragfrom, shiftlen);
3699 skb_frag_off_add(fragfrom, shiftlen);
3700
3701 goto onlymerged;
3702 }
3703
3704 from++;
3705 }
3706
3707 /* Skip full, not-fitting skb to avoid expensive operations */
3708 if ((shiftlen == skb->len) &&
3709 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3710 return 0;
3711
3712 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3713 return 0;
3714
3715 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3716 if (to == MAX_SKB_FRAGS)
3717 return 0;
3718
3719 fragfrom = &skb_shinfo(skb)->frags[from];
3720 fragto = &skb_shinfo(tgt)->frags[to];
3721
3722 if (todo >= skb_frag_size(fragfrom)) {
3723 *fragto = *fragfrom;
3724 todo -= skb_frag_size(fragfrom);
3725 from++;
3726 to++;
3727
3728 } else {
3729 __skb_frag_ref(fragfrom);
3730 skb_frag_page_copy(fragto, fragfrom);
3731 skb_frag_off_copy(fragto, fragfrom);
3732 skb_frag_size_set(fragto, todo);
3733
3734 skb_frag_off_add(fragfrom, todo);
3735 skb_frag_size_sub(fragfrom, todo);
3736 todo = 0;
3737
3738 to++;
3739 break;
3740 }
3741 }
3742
3743 /* Ready to "commit" this state change to tgt */
3744 skb_shinfo(tgt)->nr_frags = to;
3745
3746 if (merge >= 0) {
3747 fragfrom = &skb_shinfo(skb)->frags[0];
3748 fragto = &skb_shinfo(tgt)->frags[merge];
3749
3750 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
3751 __skb_frag_unref(fragfrom, skb->pp_recycle);
3752 }
3753
3754 /* Reposition in the original skb */
3755 to = 0;
3756 while (from < skb_shinfo(skb)->nr_frags)
3757 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3758 skb_shinfo(skb)->nr_frags = to;
3759
3760 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3761
3762onlymerged:
3763 /* Most likely the tgt won't ever need its checksum anymore, skb on
3764 * the other hand might need it if it needs to be resent
3765 */
3766 tgt->ip_summed = CHECKSUM_PARTIAL;
3767 skb->ip_summed = CHECKSUM_PARTIAL;
3768
3769 skb_len_add(skb, -shiftlen);
3770 skb_len_add(tgt, shiftlen);
3771
3772 return shiftlen;
3773}
3774
3775/**
3776 * skb_prepare_seq_read - Prepare a sequential read of skb data
3777 * @skb: the buffer to read
3778 * @from: lower offset of data to be read
3779 * @to: upper offset of data to be read
3780 * @st: state variable
3781 *
3782 * Initializes the specified state variable. Must be called before
3783 * invoking skb_seq_read() for the first time.
3784 */
3785void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3786 unsigned int to, struct skb_seq_state *st)
3787{
3788 st->lower_offset = from;
3789 st->upper_offset = to;
3790 st->root_skb = st->cur_skb = skb;
3791 st->frag_idx = st->stepped_offset = 0;
3792 st->frag_data = NULL;
3793 st->frag_off = 0;
3794}
3795EXPORT_SYMBOL(skb_prepare_seq_read);
3796
3797/**
3798 * skb_seq_read - Sequentially read skb data
3799 * @consumed: number of bytes consumed by the caller so far
3800 * @data: destination pointer for data to be returned
3801 * @st: state variable
3802 *
3803 * Reads a block of skb data at @consumed relative to the
3804 * lower offset specified to skb_prepare_seq_read(). Assigns
3805 * the head of the data block to @data and returns the length
3806 * of the block or 0 if the end of the skb data or the upper
3807 * offset has been reached.
3808 *
3809 * The caller is not required to consume all of the data
3810 * returned, i.e. @consumed is typically set to the number
3811 * of bytes already consumed and the next call to
3812 * skb_seq_read() will return the remaining part of the block.
3813 *
3814 * Note 1: The size of each block of data returned can be arbitrary,
3815 * this limitation is the cost for zerocopy sequential
3816 * reads of potentially non linear data.
3817 *
3818 * Note 2: Fragment lists within fragments are not implemented
3819 * at the moment, state->root_skb could be replaced with
3820 * a stack for this purpose.
3821 */
3822unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3823 struct skb_seq_state *st)
3824{
3825 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3826 skb_frag_t *frag;
3827
3828 if (unlikely(abs_offset >= st->upper_offset)) {
3829 if (st->frag_data) {
3830 kunmap_atomic(st->frag_data);
3831 st->frag_data = NULL;
3832 }
3833 return 0;
3834 }
3835
3836next_skb:
3837 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
3838
3839 if (abs_offset < block_limit && !st->frag_data) {
3840 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
3841 return block_limit - abs_offset;
3842 }
3843
3844 if (st->frag_idx == 0 && !st->frag_data)
3845 st->stepped_offset += skb_headlen(st->cur_skb);
3846
3847 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
3848 unsigned int pg_idx, pg_off, pg_sz;
3849
3850 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
3851
3852 pg_idx = 0;
3853 pg_off = skb_frag_off(frag);
3854 pg_sz = skb_frag_size(frag);
3855
3856 if (skb_frag_must_loop(skb_frag_page(frag))) {
3857 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT;
3858 pg_off = offset_in_page(pg_off + st->frag_off);
3859 pg_sz = min_t(unsigned int, pg_sz - st->frag_off,
3860 PAGE_SIZE - pg_off);
3861 }
3862
3863 block_limit = pg_sz + st->stepped_offset;
3864 if (abs_offset < block_limit) {
3865 if (!st->frag_data)
3866 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx);
3867
3868 *data = (u8 *)st->frag_data + pg_off +
3869 (abs_offset - st->stepped_offset);
3870
3871 return block_limit - abs_offset;
3872 }
3873
3874 if (st->frag_data) {
3875 kunmap_atomic(st->frag_data);
3876 st->frag_data = NULL;
3877 }
3878
3879 st->stepped_offset += pg_sz;
3880 st->frag_off += pg_sz;
3881 if (st->frag_off == skb_frag_size(frag)) {
3882 st->frag_off = 0;
3883 st->frag_idx++;
3884 }
3885 }
3886
3887 if (st->frag_data) {
3888 kunmap_atomic(st->frag_data);
3889 st->frag_data = NULL;
3890 }
3891
3892 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
3893 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
3894 st->frag_idx = 0;
3895 goto next_skb;
3896 } else if (st->cur_skb->next) {
3897 st->cur_skb = st->cur_skb->next;
3898 st->frag_idx = 0;
3899 goto next_skb;
3900 }
3901
3902 return 0;
3903}
3904EXPORT_SYMBOL(skb_seq_read);
3905
3906/**
3907 * skb_abort_seq_read - Abort a sequential read of skb data
3908 * @st: state variable
3909 *
3910 * Must be called if skb_seq_read() was not called until it
3911 * returned 0.
3912 */
3913void skb_abort_seq_read(struct skb_seq_state *st)
3914{
3915 if (st->frag_data)
3916 kunmap_atomic(st->frag_data);
3917}
3918EXPORT_SYMBOL(skb_abort_seq_read);
3919
3920#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
3921
3922static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
3923 struct ts_config *conf,
3924 struct ts_state *state)
3925{
3926 return skb_seq_read(offset, text, TS_SKB_CB(state));
3927}
3928
3929static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
3930{
3931 skb_abort_seq_read(TS_SKB_CB(state));
3932}
3933
3934/**
3935 * skb_find_text - Find a text pattern in skb data
3936 * @skb: the buffer to look in
3937 * @from: search offset
3938 * @to: search limit
3939 * @config: textsearch configuration
3940 *
3941 * Finds a pattern in the skb data according to the specified
3942 * textsearch configuration. Use textsearch_next() to retrieve
3943 * subsequent occurrences of the pattern. Returns the offset
3944 * to the first occurrence or UINT_MAX if no match was found.
3945 */
3946unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
3947 unsigned int to, struct ts_config *config)
3948{
3949 struct ts_state state;
3950 unsigned int ret;
3951
3952 BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb));
3953
3954 config->get_next_block = skb_ts_get_next_block;
3955 config->finish = skb_ts_finish;
3956
3957 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
3958
3959 ret = textsearch_find(config, &state);
3960 return (ret <= to - from ? ret : UINT_MAX);
3961}
3962EXPORT_SYMBOL(skb_find_text);
3963
3964int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3965 int offset, size_t size)
3966{
3967 int i = skb_shinfo(skb)->nr_frags;
3968
3969 if (skb_can_coalesce(skb, i, page, offset)) {
3970 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3971 } else if (i < MAX_SKB_FRAGS) {
3972 skb_zcopy_downgrade_managed(skb);
3973 get_page(page);
3974 skb_fill_page_desc_noacc(skb, i, page, offset, size);
3975 } else {
3976 return -EMSGSIZE;
3977 }
3978
3979 return 0;
3980}
3981EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3982
3983/**
3984 * skb_pull_rcsum - pull skb and update receive checksum
3985 * @skb: buffer to update
3986 * @len: length of data pulled
3987 *
3988 * This function performs an skb_pull on the packet and updates
3989 * the CHECKSUM_COMPLETE checksum. It should be used on
3990 * receive path processing instead of skb_pull unless you know
3991 * that the checksum difference is zero (e.g., a valid IP header)
3992 * or you are setting ip_summed to CHECKSUM_NONE.
3993 */
3994void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3995{
3996 unsigned char *data = skb->data;
3997
3998 BUG_ON(len > skb->len);
3999 __skb_pull(skb, len);
4000 skb_postpull_rcsum(skb, data, len);
4001 return skb->data;
4002}
4003EXPORT_SYMBOL_GPL(skb_pull_rcsum);
4004
4005static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
4006{
4007 skb_frag_t head_frag;
4008 struct page *page;
4009
4010 page = virt_to_head_page(frag_skb->head);
4011 __skb_frag_set_page(&head_frag, page);
4012 skb_frag_off_set(&head_frag, frag_skb->data -
4013 (unsigned char *)page_address(page));
4014 skb_frag_size_set(&head_frag, skb_headlen(frag_skb));
4015 return head_frag;
4016}
4017
4018struct sk_buff *skb_segment_list(struct sk_buff *skb,
4019 netdev_features_t features,
4020 unsigned int offset)
4021{
4022 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
4023 unsigned int tnl_hlen = skb_tnl_header_len(skb);
4024 unsigned int delta_truesize = 0;
4025 unsigned int delta_len = 0;
4026 struct sk_buff *tail = NULL;
4027 struct sk_buff *nskb, *tmp;
4028 int len_diff, err;
4029
4030 skb_push(skb, -skb_network_offset(skb) + offset);
4031
4032 skb_shinfo(skb)->frag_list = NULL;
4033
4034 do {
4035 nskb = list_skb;
4036 list_skb = list_skb->next;
4037
4038 err = 0;
4039 delta_truesize += nskb->truesize;
4040 if (skb_shared(nskb)) {
4041 tmp = skb_clone(nskb, GFP_ATOMIC);
4042 if (tmp) {
4043 consume_skb(nskb);
4044 nskb = tmp;
4045 err = skb_unclone(nskb, GFP_ATOMIC);
4046 } else {
4047 err = -ENOMEM;
4048 }
4049 }
4050
4051 if (!tail)
4052 skb->next = nskb;
4053 else
4054 tail->next = nskb;
4055
4056 if (unlikely(err)) {
4057 nskb->next = list_skb;
4058 goto err_linearize;
4059 }
4060
4061 tail = nskb;
4062
4063 delta_len += nskb->len;
4064
4065 skb_push(nskb, -skb_network_offset(nskb) + offset);
4066
4067 skb_release_head_state(nskb);
4068 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
4069 __copy_skb_header(nskb, skb);
4070
4071 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
4072 nskb->transport_header += len_diff;
4073 skb_copy_from_linear_data_offset(skb, -tnl_hlen,
4074 nskb->data - tnl_hlen,
4075 offset + tnl_hlen);
4076
4077 if (skb_needs_linearize(nskb, features) &&
4078 __skb_linearize(nskb))
4079 goto err_linearize;
4080
4081 } while (list_skb);
4082
4083 skb->truesize = skb->truesize - delta_truesize;
4084 skb->data_len = skb->data_len - delta_len;
4085 skb->len = skb->len - delta_len;
4086
4087 skb_gso_reset(skb);
4088
4089 skb->prev = tail;
4090
4091 if (skb_needs_linearize(skb, features) &&
4092 __skb_linearize(skb))
4093 goto err_linearize;
4094
4095 skb_get(skb);
4096
4097 return skb;
4098
4099err_linearize:
4100 kfree_skb_list(skb->next);
4101 skb->next = NULL;
4102 return ERR_PTR(-ENOMEM);
4103}
4104EXPORT_SYMBOL_GPL(skb_segment_list);
4105
4106/**
4107 * skb_segment - Perform protocol segmentation on skb.
4108 * @head_skb: buffer to segment
4109 * @features: features for the output path (see dev->features)
4110 *
4111 * This function performs segmentation on the given skb. It returns
4112 * a pointer to the first in a list of new skbs for the segments.
4113 * In case of error it returns ERR_PTR(err).
4114 */
4115struct sk_buff *skb_segment(struct sk_buff *head_skb,
4116 netdev_features_t features)
4117{
4118 struct sk_buff *segs = NULL;
4119 struct sk_buff *tail = NULL;
4120 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
4121 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
4122 unsigned int mss = skb_shinfo(head_skb)->gso_size;
4123 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
4124 struct sk_buff *frag_skb = head_skb;
4125 unsigned int offset = doffset;
4126 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
4127 unsigned int partial_segs = 0;
4128 unsigned int headroom;
4129 unsigned int len = head_skb->len;
4130 __be16 proto;
4131 bool csum, sg;
4132 int nfrags = skb_shinfo(head_skb)->nr_frags;
4133 int err = -ENOMEM;
4134 int i = 0;
4135 int pos;
4136
4137 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
4138 mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
4139 struct sk_buff *check_skb;
4140
4141 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
4142 if (skb_headlen(check_skb) && !check_skb->head_frag) {
4143 /* gso_size is untrusted, and we have a frag_list with
4144 * a linear non head_frag item.
4145 *
4146 * If head_skb's headlen does not fit requested gso_size,
4147 * it means that the frag_list members do NOT terminate
4148 * on exact gso_size boundaries. Hence we cannot perform
4149 * skb_frag_t page sharing. Therefore we must fallback to
4150 * copying the frag_list skbs; we do so by disabling SG.
4151 */
4152 features &= ~NETIF_F_SG;
4153 break;
4154 }
4155 }
4156 }
4157
4158 __skb_push(head_skb, doffset);
4159 proto = skb_network_protocol(head_skb, NULL);
4160 if (unlikely(!proto))
4161 return ERR_PTR(-EINVAL);
4162
4163 sg = !!(features & NETIF_F_SG);
4164 csum = !!can_checksum_protocol(features, proto);
4165
4166 if (sg && csum && (mss != GSO_BY_FRAGS)) {
4167 if (!(features & NETIF_F_GSO_PARTIAL)) {
4168 struct sk_buff *iter;
4169 unsigned int frag_len;
4170
4171 if (!list_skb ||
4172 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
4173 goto normal;
4174
4175 /* If we get here then all the required
4176 * GSO features except frag_list are supported.
4177 * Try to split the SKB to multiple GSO SKBs
4178 * with no frag_list.
4179 * Currently we can do that only when the buffers don't
4180 * have a linear part and all the buffers except
4181 * the last are of the same length.
4182 */
4183 frag_len = list_skb->len;
4184 skb_walk_frags(head_skb, iter) {
4185 if (frag_len != iter->len && iter->next)
4186 goto normal;
4187 if (skb_headlen(iter) && !iter->head_frag)
4188 goto normal;
4189
4190 len -= iter->len;
4191 }
4192
4193 if (len != frag_len)
4194 goto normal;
4195 }
4196
4197 /* GSO partial only requires that we trim off any excess that
4198 * doesn't fit into an MSS sized block, so take care of that
4199 * now.
4200 */
4201 partial_segs = len / mss;
4202 if (partial_segs > 1)
4203 mss *= partial_segs;
4204 else
4205 partial_segs = 0;
4206 }
4207
4208normal:
4209 headroom = skb_headroom(head_skb);
4210 pos = skb_headlen(head_skb);
4211
4212 do {
4213 struct sk_buff *nskb;
4214 skb_frag_t *nskb_frag;
4215 int hsize;
4216 int size;
4217
4218 if (unlikely(mss == GSO_BY_FRAGS)) {
4219 len = list_skb->len;
4220 } else {
4221 len = head_skb->len - offset;
4222 if (len > mss)
4223 len = mss;
4224 }
4225
4226 hsize = skb_headlen(head_skb) - offset;
4227
4228 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) &&
4229 (skb_headlen(list_skb) == len || sg)) {
4230 BUG_ON(skb_headlen(list_skb) > len);
4231
4232 i = 0;
4233 nfrags = skb_shinfo(list_skb)->nr_frags;
4234 frag = skb_shinfo(list_skb)->frags;
4235 frag_skb = list_skb;
4236 pos += skb_headlen(list_skb);
4237
4238 while (pos < offset + len) {
4239 BUG_ON(i >= nfrags);
4240
4241 size = skb_frag_size(frag);
4242 if (pos + size > offset + len)
4243 break;
4244
4245 i++;
4246 pos += size;
4247 frag++;
4248 }
4249
4250 nskb = skb_clone(list_skb, GFP_ATOMIC);
4251 list_skb = list_skb->next;
4252
4253 if (unlikely(!nskb))
4254 goto err;
4255
4256 if (unlikely(pskb_trim(nskb, len))) {
4257 kfree_skb(nskb);
4258 goto err;
4259 }
4260
4261 hsize = skb_end_offset(nskb);
4262 if (skb_cow_head(nskb, doffset + headroom)) {
4263 kfree_skb(nskb);
4264 goto err;
4265 }
4266
4267 nskb->truesize += skb_end_offset(nskb) - hsize;
4268 skb_release_head_state(nskb);
4269 __skb_push(nskb, doffset);
4270 } else {
4271 if (hsize < 0)
4272 hsize = 0;
4273 if (hsize > len || !sg)
4274 hsize = len;
4275
4276 nskb = __alloc_skb(hsize + doffset + headroom,
4277 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
4278 NUMA_NO_NODE);
4279
4280 if (unlikely(!nskb))
4281 goto err;
4282
4283 skb_reserve(nskb, headroom);
4284 __skb_put(nskb, doffset);
4285 }
4286
4287 if (segs)
4288 tail->next = nskb;
4289 else
4290 segs = nskb;
4291 tail = nskb;
4292
4293 __copy_skb_header(nskb, head_skb);
4294
4295 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
4296 skb_reset_mac_len(nskb);
4297
4298 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
4299 nskb->data - tnl_hlen,
4300 doffset + tnl_hlen);
4301
4302 if (nskb->len == len + doffset)
4303 goto perform_csum_check;
4304
4305 if (!sg) {
4306 if (!csum) {
4307 if (!nskb->remcsum_offload)
4308 nskb->ip_summed = CHECKSUM_NONE;
4309 SKB_GSO_CB(nskb)->csum =
4310 skb_copy_and_csum_bits(head_skb, offset,
4311 skb_put(nskb,
4312 len),
4313 len);
4314 SKB_GSO_CB(nskb)->csum_start =
4315 skb_headroom(nskb) + doffset;
4316 } else {
4317 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len))
4318 goto err;
4319 }
4320 continue;
4321 }
4322
4323 nskb_frag = skb_shinfo(nskb)->frags;
4324
4325 skb_copy_from_linear_data_offset(head_skb, offset,
4326 skb_put(nskb, hsize), hsize);
4327
4328 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &
4329 SKBFL_SHARED_FRAG;
4330
4331 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
4332 skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
4333 goto err;
4334
4335 while (pos < offset + len) {
4336 if (i >= nfrags) {
4337 i = 0;
4338 nfrags = skb_shinfo(list_skb)->nr_frags;
4339 frag = skb_shinfo(list_skb)->frags;
4340 frag_skb = list_skb;
4341 if (!skb_headlen(list_skb)) {
4342 BUG_ON(!nfrags);
4343 } else {
4344 BUG_ON(!list_skb->head_frag);
4345
4346 /* to make room for head_frag. */
4347 i--;
4348 frag--;
4349 }
4350 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
4351 skb_zerocopy_clone(nskb, frag_skb,
4352 GFP_ATOMIC))
4353 goto err;
4354
4355 list_skb = list_skb->next;
4356 }
4357
4358 if (unlikely(skb_shinfo(nskb)->nr_frags >=
4359 MAX_SKB_FRAGS)) {
4360 net_warn_ratelimited(
4361 "skb_segment: too many frags: %u %u\n",
4362 pos, mss);
4363 err = -EINVAL;
4364 goto err;
4365 }
4366
4367 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
4368 __skb_frag_ref(nskb_frag);
4369 size = skb_frag_size(nskb_frag);
4370
4371 if (pos < offset) {
4372 skb_frag_off_add(nskb_frag, offset - pos);
4373 skb_frag_size_sub(nskb_frag, offset - pos);
4374 }
4375
4376 skb_shinfo(nskb)->nr_frags++;
4377
4378 if (pos + size <= offset + len) {
4379 i++;
4380 frag++;
4381 pos += size;
4382 } else {
4383 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
4384 goto skip_fraglist;
4385 }
4386
4387 nskb_frag++;
4388 }
4389
4390skip_fraglist:
4391 nskb->data_len = len - hsize;
4392 nskb->len += nskb->data_len;
4393 nskb->truesize += nskb->data_len;
4394
4395perform_csum_check:
4396 if (!csum) {
4397 if (skb_has_shared_frag(nskb) &&
4398 __skb_linearize(nskb))
4399 goto err;
4400
4401 if (!nskb->remcsum_offload)
4402 nskb->ip_summed = CHECKSUM_NONE;
4403 SKB_GSO_CB(nskb)->csum =
4404 skb_checksum(nskb, doffset,
4405 nskb->len - doffset, 0);
4406 SKB_GSO_CB(nskb)->csum_start =
4407 skb_headroom(nskb) + doffset;
4408 }
4409 } while ((offset += len) < head_skb->len);
4410
4411 /* Some callers want to get the end of the list.
4412 * Put it in segs->prev to avoid walking the list.
4413 * (see validate_xmit_skb_list() for example)
4414 */
4415 segs->prev = tail;
4416
4417 if (partial_segs) {
4418 struct sk_buff *iter;
4419 int type = skb_shinfo(head_skb)->gso_type;
4420 unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
4421
4422 /* Update type to add partial and then remove dodgy if set */
4423 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
4424 type &= ~SKB_GSO_DODGY;
4425
4426 /* Update GSO info and prepare to start updating headers on
4427 * our way back down the stack of protocols.
4428 */
4429 for (iter = segs; iter; iter = iter->next) {
4430 skb_shinfo(iter)->gso_size = gso_size;
4431 skb_shinfo(iter)->gso_segs = partial_segs;
4432 skb_shinfo(iter)->gso_type = type;
4433 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
4434 }
4435
4436 if (tail->len - doffset <= gso_size)
4437 skb_shinfo(tail)->gso_size = 0;
4438 else if (tail != segs)
4439 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
4440 }
4441
4442 /* Following permits correct backpressure, for protocols
4443 * using skb_set_owner_w().
4444 * Idea is to tranfert ownership from head_skb to last segment.
4445 */
4446 if (head_skb->destructor == sock_wfree) {
4447 swap(tail->truesize, head_skb->truesize);
4448 swap(tail->destructor, head_skb->destructor);
4449 swap(tail->sk, head_skb->sk);
4450 }
4451 return segs;
4452
4453err:
4454 kfree_skb_list(segs);
4455 return ERR_PTR(err);
4456}
4457EXPORT_SYMBOL_GPL(skb_segment);
4458
4459#ifdef CONFIG_SKB_EXTENSIONS
4460#define SKB_EXT_ALIGN_VALUE 8
4461#define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
4462
4463static const u8 skb_ext_type_len[] = {
4464#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4465 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
4466#endif
4467#ifdef CONFIG_XFRM
4468 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
4469#endif
4470#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4471 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext),
4472#endif
4473#if IS_ENABLED(CONFIG_MPTCP)
4474 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
4475#endif
4476#if IS_ENABLED(CONFIG_MCTP_FLOWS)
4477 [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow),
4478#endif
4479};
4480
4481static __always_inline unsigned int skb_ext_total_length(void)
4482{
4483 return SKB_EXT_CHUNKSIZEOF(struct skb_ext) +
4484#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4485 skb_ext_type_len[SKB_EXT_BRIDGE_NF] +
4486#endif
4487#ifdef CONFIG_XFRM
4488 skb_ext_type_len[SKB_EXT_SEC_PATH] +
4489#endif
4490#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4491 skb_ext_type_len[TC_SKB_EXT] +
4492#endif
4493#if IS_ENABLED(CONFIG_MPTCP)
4494 skb_ext_type_len[SKB_EXT_MPTCP] +
4495#endif
4496#if IS_ENABLED(CONFIG_MCTP_FLOWS)
4497 skb_ext_type_len[SKB_EXT_MCTP] +
4498#endif
4499 0;
4500}
4501
4502static void skb_extensions_init(void)
4503{
4504 BUILD_BUG_ON(SKB_EXT_NUM >= 8);
4505 BUILD_BUG_ON(skb_ext_total_length() > 255);
4506
4507 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
4508 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
4509 0,
4510 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4511 NULL);
4512}
4513#else
4514static void skb_extensions_init(void) {}
4515#endif
4516
4517void __init skb_init(void)
4518{
4519 skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
4520 sizeof(struct sk_buff),
4521 0,
4522 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4523 offsetof(struct sk_buff, cb),
4524 sizeof_field(struct sk_buff, cb),
4525 NULL);
4526 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
4527 sizeof(struct sk_buff_fclones),
4528 0,
4529 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4530 NULL);
4531 skb_extensions_init();
4532}
4533
4534static int
4535__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
4536 unsigned int recursion_level)
4537{
4538 int start = skb_headlen(skb);
4539 int i, copy = start - offset;
4540 struct sk_buff *frag_iter;
4541 int elt = 0;
4542
4543 if (unlikely(recursion_level >= 24))
4544 return -EMSGSIZE;
4545
4546 if (copy > 0) {
4547 if (copy > len)
4548 copy = len;
4549 sg_set_buf(sg, skb->data + offset, copy);
4550 elt++;
4551 if ((len -= copy) == 0)
4552 return elt;
4553 offset += copy;
4554 }
4555
4556 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4557 int end;
4558
4559 WARN_ON(start > offset + len);
4560
4561 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
4562 if ((copy = end - offset) > 0) {
4563 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4564 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4565 return -EMSGSIZE;
4566
4567 if (copy > len)
4568 copy = len;
4569 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
4570 skb_frag_off(frag) + offset - start);
4571 elt++;
4572 if (!(len -= copy))
4573 return elt;
4574 offset += copy;
4575 }
4576 start = end;
4577 }
4578
4579 skb_walk_frags(skb, frag_iter) {
4580 int end, ret;
4581
4582 WARN_ON(start > offset + len);
4583
4584 end = start + frag_iter->len;
4585 if ((copy = end - offset) > 0) {
4586 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4587 return -EMSGSIZE;
4588
4589 if (copy > len)
4590 copy = len;
4591 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
4592 copy, recursion_level + 1);
4593 if (unlikely(ret < 0))
4594 return ret;
4595 elt += ret;
4596 if ((len -= copy) == 0)
4597 return elt;
4598 offset += copy;
4599 }
4600 start = end;
4601 }
4602 BUG_ON(len);
4603 return elt;
4604}
4605
4606/**
4607 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4608 * @skb: Socket buffer containing the buffers to be mapped
4609 * @sg: The scatter-gather list to map into
4610 * @offset: The offset into the buffer's contents to start mapping
4611 * @len: Length of buffer space to be mapped
4612 *
4613 * Fill the specified scatter-gather list with mappings/pointers into a
4614 * region of the buffer space attached to a socket buffer. Returns either
4615 * the number of scatterlist items used, or -EMSGSIZE if the contents
4616 * could not fit.
4617 */
4618int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4619{
4620 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4621
4622 if (nsg <= 0)
4623 return nsg;
4624
4625 sg_mark_end(&sg[nsg - 1]);
4626
4627 return nsg;
4628}
4629EXPORT_SYMBOL_GPL(skb_to_sgvec);
4630
4631/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4632 * sglist without mark the sg which contain last skb data as the end.
4633 * So the caller can mannipulate sg list as will when padding new data after
4634 * the first call without calling sg_unmark_end to expend sg list.
4635 *
4636 * Scenario to use skb_to_sgvec_nomark:
4637 * 1. sg_init_table
4638 * 2. skb_to_sgvec_nomark(payload1)
4639 * 3. skb_to_sgvec_nomark(payload2)
4640 *
4641 * This is equivalent to:
4642 * 1. sg_init_table
4643 * 2. skb_to_sgvec(payload1)
4644 * 3. sg_unmark_end
4645 * 4. skb_to_sgvec(payload2)
4646 *
4647 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4648 * is more preferable.
4649 */
4650int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
4651 int offset, int len)
4652{
4653 return __skb_to_sgvec(skb, sg, offset, len, 0);
4654}
4655EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
4656
4657
4658
4659/**
4660 * skb_cow_data - Check that a socket buffer's data buffers are writable
4661 * @skb: The socket buffer to check.
4662 * @tailbits: Amount of trailing space to be added
4663 * @trailer: Returned pointer to the skb where the @tailbits space begins
4664 *
4665 * Make sure that the data buffers attached to a socket buffer are
4666 * writable. If they are not, private copies are made of the data buffers
4667 * and the socket buffer is set to use these instead.
4668 *
4669 * If @tailbits is given, make sure that there is space to write @tailbits
4670 * bytes of data beyond current end of socket buffer. @trailer will be
4671 * set to point to the skb in which this space begins.
4672 *
4673 * The number of scatterlist elements required to completely map the
4674 * COW'd and extended socket buffer will be returned.
4675 */
4676int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4677{
4678 int copyflag;
4679 int elt;
4680 struct sk_buff *skb1, **skb_p;
4681
4682 /* If skb is cloned or its head is paged, reallocate
4683 * head pulling out all the pages (pages are considered not writable
4684 * at the moment even if they are anonymous).
4685 */
4686 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
4687 !__pskb_pull_tail(skb, __skb_pagelen(skb)))
4688 return -ENOMEM;
4689
4690 /* Easy case. Most of packets will go this way. */
4691 if (!skb_has_frag_list(skb)) {
4692 /* A little of trouble, not enough of space for trailer.
4693 * This should not happen, when stack is tuned to generate
4694 * good frames. OK, on miss we reallocate and reserve even more
4695 * space, 128 bytes is fair. */
4696
4697 if (skb_tailroom(skb) < tailbits &&
4698 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4699 return -ENOMEM;
4700
4701 /* Voila! */
4702 *trailer = skb;
4703 return 1;
4704 }
4705
4706 /* Misery. We are in troubles, going to mincer fragments... */
4707
4708 elt = 1;
4709 skb_p = &skb_shinfo(skb)->frag_list;
4710 copyflag = 0;
4711
4712 while ((skb1 = *skb_p) != NULL) {
4713 int ntail = 0;
4714
4715 /* The fragment is partially pulled by someone,
4716 * this can happen on input. Copy it and everything
4717 * after it. */
4718
4719 if (skb_shared(skb1))
4720 copyflag = 1;
4721
4722 /* If the skb is the last, worry about trailer. */
4723
4724 if (skb1->next == NULL && tailbits) {
4725 if (skb_shinfo(skb1)->nr_frags ||
4726 skb_has_frag_list(skb1) ||
4727 skb_tailroom(skb1) < tailbits)
4728 ntail = tailbits + 128;
4729 }
4730
4731 if (copyflag ||
4732 skb_cloned(skb1) ||
4733 ntail ||
4734 skb_shinfo(skb1)->nr_frags ||
4735 skb_has_frag_list(skb1)) {
4736 struct sk_buff *skb2;
4737
4738 /* Fuck, we are miserable poor guys... */
4739 if (ntail == 0)
4740 skb2 = skb_copy(skb1, GFP_ATOMIC);
4741 else
4742 skb2 = skb_copy_expand(skb1,
4743 skb_headroom(skb1),
4744 ntail,
4745 GFP_ATOMIC);
4746 if (unlikely(skb2 == NULL))
4747 return -ENOMEM;
4748
4749 if (skb1->sk)
4750 skb_set_owner_w(skb2, skb1->sk);
4751
4752 /* Looking around. Are we still alive?
4753 * OK, link new skb, drop old one */
4754
4755 skb2->next = skb1->next;
4756 *skb_p = skb2;
4757 kfree_skb(skb1);
4758 skb1 = skb2;
4759 }
4760 elt++;
4761 *trailer = skb1;
4762 skb_p = &skb1->next;
4763 }
4764
4765 return elt;
4766}
4767EXPORT_SYMBOL_GPL(skb_cow_data);
4768
4769static void sock_rmem_free(struct sk_buff *skb)
4770{
4771 struct sock *sk = skb->sk;
4772
4773 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4774}
4775
4776static void skb_set_err_queue(struct sk_buff *skb)
4777{
4778 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
4779 * So, it is safe to (mis)use it to mark skbs on the error queue.
4780 */
4781 skb->pkt_type = PACKET_OUTGOING;
4782 BUILD_BUG_ON(PACKET_OUTGOING == 0);
4783}
4784
4785/*
4786 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4787 */
4788int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4789{
4790 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
4791 (unsigned int)READ_ONCE(sk->sk_rcvbuf))
4792 return -ENOMEM;
4793
4794 skb_orphan(skb);
4795 skb->sk = sk;
4796 skb->destructor = sock_rmem_free;
4797 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
4798 skb_set_err_queue(skb);
4799
4800 /* before exiting rcu section, make sure dst is refcounted */
4801 skb_dst_force(skb);
4802
4803 skb_queue_tail(&sk->sk_error_queue, skb);
4804 if (!sock_flag(sk, SOCK_DEAD))
4805 sk_error_report(sk);
4806 return 0;
4807}
4808EXPORT_SYMBOL(sock_queue_err_skb);
4809
4810static bool is_icmp_err_skb(const struct sk_buff *skb)
4811{
4812 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
4813 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
4814}
4815
4816struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4817{
4818 struct sk_buff_head *q = &sk->sk_error_queue;
4819 struct sk_buff *skb, *skb_next = NULL;
4820 bool icmp_next = false;
4821 unsigned long flags;
4822
4823 spin_lock_irqsave(&q->lock, flags);
4824 skb = __skb_dequeue(q);
4825 if (skb && (skb_next = skb_peek(q))) {
4826 icmp_next = is_icmp_err_skb(skb_next);
4827 if (icmp_next)
4828 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
4829 }
4830 spin_unlock_irqrestore(&q->lock, flags);
4831
4832 if (is_icmp_err_skb(skb) && !icmp_next)
4833 sk->sk_err = 0;
4834
4835 if (skb_next)
4836 sk_error_report(sk);
4837
4838 return skb;
4839}
4840EXPORT_SYMBOL(sock_dequeue_err_skb);
4841
4842/**
4843 * skb_clone_sk - create clone of skb, and take reference to socket
4844 * @skb: the skb to clone
4845 *
4846 * This function creates a clone of a buffer that holds a reference on
4847 * sk_refcnt. Buffers created via this function are meant to be
4848 * returned using sock_queue_err_skb, or free via kfree_skb.
4849 *
4850 * When passing buffers allocated with this function to sock_queue_err_skb
4851 * it is necessary to wrap the call with sock_hold/sock_put in order to
4852 * prevent the socket from being released prior to being enqueued on
4853 * the sk_error_queue.
4854 */
4855struct sk_buff *skb_clone_sk(struct sk_buff *skb)
4856{
4857 struct sock *sk = skb->sk;
4858 struct sk_buff *clone;
4859
4860 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
4861 return NULL;
4862
4863 clone = skb_clone(skb, GFP_ATOMIC);
4864 if (!clone) {
4865 sock_put(sk);
4866 return NULL;
4867 }
4868
4869 clone->sk = sk;
4870 clone->destructor = sock_efree;
4871
4872 return clone;
4873}
4874EXPORT_SYMBOL(skb_clone_sk);
4875
4876static void __skb_complete_tx_timestamp(struct sk_buff *skb,
4877 struct sock *sk,
4878 int tstype,
4879 bool opt_stats)
4880{
4881 struct sock_exterr_skb *serr;
4882 int err;
4883
4884 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
4885
4886 serr = SKB_EXT_ERR(skb);
4887 memset(serr, 0, sizeof(*serr));
4888 serr->ee.ee_errno = ENOMSG;
4889 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
4890 serr->ee.ee_info = tstype;
4891 serr->opt_stats = opt_stats;
4892 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
4893 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
4894 serr->ee.ee_data = skb_shinfo(skb)->tskey;
4895 if (sk_is_tcp(sk))
4896 serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
4897 }
4898
4899 err = sock_queue_err_skb(sk, skb);
4900
4901 if (err)
4902 kfree_skb(skb);
4903}
4904
4905static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4906{
4907 bool ret;
4908
4909 if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
4910 return true;
4911
4912 read_lock_bh(&sk->sk_callback_lock);
4913 ret = sk->sk_socket && sk->sk_socket->file &&
4914 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4915 read_unlock_bh(&sk->sk_callback_lock);
4916 return ret;
4917}
4918
4919void skb_complete_tx_timestamp(struct sk_buff *skb,
4920 struct skb_shared_hwtstamps *hwtstamps)
4921{
4922 struct sock *sk = skb->sk;
4923
4924 if (!skb_may_tx_timestamp(sk, false))
4925 goto err;
4926
4927 /* Take a reference to prevent skb_orphan() from freeing the socket,
4928 * but only if the socket refcount is not zero.
4929 */
4930 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4931 *skb_hwtstamps(skb) = *hwtstamps;
4932 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
4933 sock_put(sk);
4934 return;
4935 }
4936
4937err:
4938 kfree_skb(skb);
4939}
4940EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4941
4942void __skb_tstamp_tx(struct sk_buff *orig_skb,
4943 const struct sk_buff *ack_skb,
4944 struct skb_shared_hwtstamps *hwtstamps,
4945 struct sock *sk, int tstype)
4946{
4947 struct sk_buff *skb;
4948 bool tsonly, opt_stats = false;
4949
4950 if (!sk)
4951 return;
4952
4953 if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4954 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4955 return;
4956
4957 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
4958 if (!skb_may_tx_timestamp(sk, tsonly))
4959 return;
4960
4961 if (tsonly) {
4962#ifdef CONFIG_INET
4963 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
4964 sk_is_tcp(sk)) {
4965 skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
4966 ack_skb);
4967 opt_stats = true;
4968 } else
4969#endif
4970 skb = alloc_skb(0, GFP_ATOMIC);
4971 } else {
4972 skb = skb_clone(orig_skb, GFP_ATOMIC);
4973 }
4974 if (!skb)
4975 return;
4976
4977 if (tsonly) {
4978 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4979 SKBTX_ANY_TSTAMP;
4980 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
4981 }
4982
4983 if (hwtstamps)
4984 *skb_hwtstamps(skb) = *hwtstamps;
4985 else
4986 __net_timestamp(skb);
4987
4988 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
4989}
4990EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4991
4992void skb_tstamp_tx(struct sk_buff *orig_skb,
4993 struct skb_shared_hwtstamps *hwtstamps)
4994{
4995 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk,
4996 SCM_TSTAMP_SND);
4997}
4998EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4999
5000void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
5001{
5002 struct sock *sk = skb->sk;
5003 struct sock_exterr_skb *serr;
5004 int err = 1;
5005
5006 skb->wifi_acked_valid = 1;
5007 skb->wifi_acked = acked;
5008
5009 serr = SKB_EXT_ERR(skb);
5010 memset(serr, 0, sizeof(*serr));
5011 serr->ee.ee_errno = ENOMSG;
5012 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
5013
5014 /* Take a reference to prevent skb_orphan() from freeing the socket,
5015 * but only if the socket refcount is not zero.
5016 */
5017 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
5018 err = sock_queue_err_skb(sk, skb);
5019 sock_put(sk);
5020 }
5021 if (err)
5022 kfree_skb(skb);
5023}
5024EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
5025
5026/**
5027 * skb_partial_csum_set - set up and verify partial csum values for packet
5028 * @skb: the skb to set
5029 * @start: the number of bytes after skb->data to start checksumming.
5030 * @off: the offset from start to place the checksum.
5031 *
5032 * For untrusted partially-checksummed packets, we need to make sure the values
5033 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
5034 *
5035 * This function checks and sets those values and skb->ip_summed: if this
5036 * returns false you should drop the packet.
5037 */
5038bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
5039{
5040 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
5041 u32 csum_start = skb_headroom(skb) + (u32)start;
5042
5043 if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
5044 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
5045 start, off, skb_headroom(skb), skb_headlen(skb));
5046 return false;
5047 }
5048 skb->ip_summed = CHECKSUM_PARTIAL;
5049 skb->csum_start = csum_start;
5050 skb->csum_offset = off;
5051 skb_set_transport_header(skb, start);
5052 return true;
5053}
5054EXPORT_SYMBOL_GPL(skb_partial_csum_set);
5055
5056static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
5057 unsigned int max)
5058{
5059 if (skb_headlen(skb) >= len)
5060 return 0;
5061
5062 /* If we need to pullup then pullup to the max, so we
5063 * won't need to do it again.
5064 */
5065 if (max > skb->len)
5066 max = skb->len;
5067
5068 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
5069 return -ENOMEM;
5070
5071 if (skb_headlen(skb) < len)
5072 return -EPROTO;
5073
5074 return 0;
5075}
5076
5077#define MAX_TCP_HDR_LEN (15 * 4)
5078
5079static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
5080 typeof(IPPROTO_IP) proto,
5081 unsigned int off)
5082{
5083 int err;
5084
5085 switch (proto) {
5086 case IPPROTO_TCP:
5087 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
5088 off + MAX_TCP_HDR_LEN);
5089 if (!err && !skb_partial_csum_set(skb, off,
5090 offsetof(struct tcphdr,
5091 check)))
5092 err = -EPROTO;
5093 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
5094
5095 case IPPROTO_UDP:
5096 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
5097 off + sizeof(struct udphdr));
5098 if (!err && !skb_partial_csum_set(skb, off,
5099 offsetof(struct udphdr,
5100 check)))
5101 err = -EPROTO;
5102 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
5103 }
5104
5105 return ERR_PTR(-EPROTO);
5106}
5107
5108/* This value should be large enough to cover a tagged ethernet header plus
5109 * maximally sized IP and TCP or UDP headers.
5110 */
5111#define MAX_IP_HDR_LEN 128
5112
5113static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
5114{
5115 unsigned int off;
5116 bool fragment;
5117 __sum16 *csum;
5118 int err;
5119
5120 fragment = false;
5121
5122 err = skb_maybe_pull_tail(skb,
5123 sizeof(struct iphdr),
5124 MAX_IP_HDR_LEN);
5125 if (err < 0)
5126 goto out;
5127
5128 if (ip_is_fragment(ip_hdr(skb)))
5129 fragment = true;
5130
5131 off = ip_hdrlen(skb);
5132
5133 err = -EPROTO;
5134
5135 if (fragment)
5136 goto out;
5137
5138 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
5139 if (IS_ERR(csum))
5140 return PTR_ERR(csum);
5141
5142 if (recalculate)
5143 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
5144 ip_hdr(skb)->daddr,
5145 skb->len - off,
5146 ip_hdr(skb)->protocol, 0);
5147 err = 0;
5148
5149out:
5150 return err;
5151}
5152
5153/* This value should be large enough to cover a tagged ethernet header plus
5154 * an IPv6 header, all options, and a maximal TCP or UDP header.
5155 */
5156#define MAX_IPV6_HDR_LEN 256
5157
5158#define OPT_HDR(type, skb, off) \
5159 (type *)(skb_network_header(skb) + (off))
5160
5161static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
5162{
5163 int err;
5164 u8 nexthdr;
5165 unsigned int off;
5166 unsigned int len;
5167 bool fragment;
5168 bool done;
5169 __sum16 *csum;
5170
5171 fragment = false;
5172 done = false;
5173
5174 off = sizeof(struct ipv6hdr);
5175
5176 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
5177 if (err < 0)
5178 goto out;
5179
5180 nexthdr = ipv6_hdr(skb)->nexthdr;
5181
5182 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
5183 while (off <= len && !done) {
5184 switch (nexthdr) {
5185 case IPPROTO_DSTOPTS:
5186 case IPPROTO_HOPOPTS:
5187 case IPPROTO_ROUTING: {
5188 struct ipv6_opt_hdr *hp;
5189
5190 err = skb_maybe_pull_tail(skb,
5191 off +
5192 sizeof(struct ipv6_opt_hdr),
5193 MAX_IPV6_HDR_LEN);
5194 if (err < 0)
5195 goto out;
5196
5197 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
5198 nexthdr = hp->nexthdr;
5199 off += ipv6_optlen(hp);
5200 break;
5201 }
5202 case IPPROTO_AH: {
5203 struct ip_auth_hdr *hp;
5204
5205 err = skb_maybe_pull_tail(skb,
5206 off +
5207 sizeof(struct ip_auth_hdr),
5208 MAX_IPV6_HDR_LEN);
5209 if (err < 0)
5210 goto out;
5211
5212 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
5213 nexthdr = hp->nexthdr;
5214 off += ipv6_authlen(hp);
5215 break;
5216 }
5217 case IPPROTO_FRAGMENT: {
5218 struct frag_hdr *hp;
5219
5220 err = skb_maybe_pull_tail(skb,
5221 off +
5222 sizeof(struct frag_hdr),
5223 MAX_IPV6_HDR_LEN);
5224 if (err < 0)
5225 goto out;
5226
5227 hp = OPT_HDR(struct frag_hdr, skb, off);
5228
5229 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
5230 fragment = true;
5231
5232 nexthdr = hp->nexthdr;
5233 off += sizeof(struct frag_hdr);
5234 break;
5235 }
5236 default:
5237 done = true;
5238 break;
5239 }
5240 }
5241
5242 err = -EPROTO;
5243
5244 if (!done || fragment)
5245 goto out;
5246
5247 csum = skb_checksum_setup_ip(skb, nexthdr, off);
5248 if (IS_ERR(csum))
5249 return PTR_ERR(csum);
5250
5251 if (recalculate)
5252 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5253 &ipv6_hdr(skb)->daddr,
5254 skb->len - off, nexthdr, 0);
5255 err = 0;
5256
5257out:
5258 return err;
5259}
5260
5261/**
5262 * skb_checksum_setup - set up partial checksum offset
5263 * @skb: the skb to set up
5264 * @recalculate: if true the pseudo-header checksum will be recalculated
5265 */
5266int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
5267{
5268 int err;
5269
5270 switch (skb->protocol) {
5271 case htons(ETH_P_IP):
5272 err = skb_checksum_setup_ipv4(skb, recalculate);
5273 break;
5274
5275 case htons(ETH_P_IPV6):
5276 err = skb_checksum_setup_ipv6(skb, recalculate);
5277 break;
5278
5279 default:
5280 err = -EPROTO;
5281 break;
5282 }
5283
5284 return err;
5285}
5286EXPORT_SYMBOL(skb_checksum_setup);
5287
5288/**
5289 * skb_checksum_maybe_trim - maybe trims the given skb
5290 * @skb: the skb to check
5291 * @transport_len: the data length beyond the network header
5292 *
5293 * Checks whether the given skb has data beyond the given transport length.
5294 * If so, returns a cloned skb trimmed to this transport length.
5295 * Otherwise returns the provided skb. Returns NULL in error cases
5296 * (e.g. transport_len exceeds skb length or out-of-memory).
5297 *
5298 * Caller needs to set the skb transport header and free any returned skb if it
5299 * differs from the provided skb.
5300 */
5301static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
5302 unsigned int transport_len)
5303{
5304 struct sk_buff *skb_chk;
5305 unsigned int len = skb_transport_offset(skb) + transport_len;
5306 int ret;
5307
5308 if (skb->len < len)
5309 return NULL;
5310 else if (skb->len == len)
5311 return skb;
5312
5313 skb_chk = skb_clone(skb, GFP_ATOMIC);
5314 if (!skb_chk)
5315 return NULL;
5316
5317 ret = pskb_trim_rcsum(skb_chk, len);
5318 if (ret) {
5319 kfree_skb(skb_chk);
5320 return NULL;
5321 }
5322
5323 return skb_chk;
5324}
5325
5326/**
5327 * skb_checksum_trimmed - validate checksum of an skb
5328 * @skb: the skb to check
5329 * @transport_len: the data length beyond the network header
5330 * @skb_chkf: checksum function to use
5331 *
5332 * Applies the given checksum function skb_chkf to the provided skb.
5333 * Returns a checked and maybe trimmed skb. Returns NULL on error.
5334 *
5335 * If the skb has data beyond the given transport length, then a
5336 * trimmed & cloned skb is checked and returned.
5337 *
5338 * Caller needs to set the skb transport header and free any returned skb if it
5339 * differs from the provided skb.
5340 */
5341struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
5342 unsigned int transport_len,
5343 __sum16(*skb_chkf)(struct sk_buff *skb))
5344{
5345 struct sk_buff *skb_chk;
5346 unsigned int offset = skb_transport_offset(skb);
5347 __sum16 ret;
5348
5349 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
5350 if (!skb_chk)
5351 goto err;
5352
5353 if (!pskb_may_pull(skb_chk, offset))
5354 goto err;
5355
5356 skb_pull_rcsum(skb_chk, offset);
5357 ret = skb_chkf(skb_chk);
5358 skb_push_rcsum(skb_chk, offset);
5359
5360 if (ret)
5361 goto err;
5362
5363 return skb_chk;
5364
5365err:
5366 if (skb_chk && skb_chk != skb)
5367 kfree_skb(skb_chk);
5368
5369 return NULL;
5370
5371}
5372EXPORT_SYMBOL(skb_checksum_trimmed);
5373
5374void __skb_warn_lro_forwarding(const struct sk_buff *skb)
5375{
5376 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
5377 skb->dev->name);
5378}
5379EXPORT_SYMBOL(__skb_warn_lro_forwarding);
5380
5381void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
5382{
5383 if (head_stolen) {
5384 skb_release_head_state(skb);
5385 kmem_cache_free(skbuff_head_cache, skb);
5386 } else {
5387 __kfree_skb(skb);
5388 }
5389}
5390EXPORT_SYMBOL(kfree_skb_partial);
5391
5392/**
5393 * skb_try_coalesce - try to merge skb to prior one
5394 * @to: prior buffer
5395 * @from: buffer to add
5396 * @fragstolen: pointer to boolean
5397 * @delta_truesize: how much more was allocated than was requested
5398 */
5399bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
5400 bool *fragstolen, int *delta_truesize)
5401{
5402 struct skb_shared_info *to_shinfo, *from_shinfo;
5403 int i, delta, len = from->len;
5404
5405 *fragstolen = false;
5406
5407 if (skb_cloned(to))
5408 return false;
5409
5410 /* In general, avoid mixing slab allocated and page_pool allocated
5411 * pages within the same SKB. However when @to is not pp_recycle and
5412 * @from is cloned, we can transition frag pages from page_pool to
5413 * reference counted.
5414 *
5415 * On the other hand, don't allow coalescing two pp_recycle SKBs if
5416 * @from is cloned, in case the SKB is using page_pool fragment
5417 * references (PP_FLAG_PAGE_FRAG). Since we only take full page
5418 * references for cloned SKBs at the moment that would result in
5419 * inconsistent reference counts.
5420 */
5421 if (to->pp_recycle != (from->pp_recycle && !skb_cloned(from)))
5422 return false;
5423
5424 if (len <= skb_tailroom(to)) {
5425 if (len)
5426 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
5427 *delta_truesize = 0;
5428 return true;
5429 }
5430
5431 to_shinfo = skb_shinfo(to);
5432 from_shinfo = skb_shinfo(from);
5433 if (to_shinfo->frag_list || from_shinfo->frag_list)
5434 return false;
5435 if (skb_zcopy(to) || skb_zcopy(from))
5436 return false;
5437
5438 if (skb_headlen(from) != 0) {
5439 struct page *page;
5440 unsigned int offset;
5441
5442 if (to_shinfo->nr_frags +
5443 from_shinfo->nr_frags >= MAX_SKB_FRAGS)
5444 return false;
5445
5446 if (skb_head_is_locked(from))
5447 return false;
5448
5449 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
5450
5451 page = virt_to_head_page(from->head);
5452 offset = from->data - (unsigned char *)page_address(page);
5453
5454 skb_fill_page_desc(to, to_shinfo->nr_frags,
5455 page, offset, skb_headlen(from));
5456 *fragstolen = true;
5457 } else {
5458 if (to_shinfo->nr_frags +
5459 from_shinfo->nr_frags > MAX_SKB_FRAGS)
5460 return false;
5461
5462 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
5463 }
5464
5465 WARN_ON_ONCE(delta < len);
5466
5467 memcpy(to_shinfo->frags + to_shinfo->nr_frags,
5468 from_shinfo->frags,
5469 from_shinfo->nr_frags * sizeof(skb_frag_t));
5470 to_shinfo->nr_frags += from_shinfo->nr_frags;
5471
5472 if (!skb_cloned(from))
5473 from_shinfo->nr_frags = 0;
5474
5475 /* if the skb is not cloned this does nothing
5476 * since we set nr_frags to 0.
5477 */
5478 for (i = 0; i < from_shinfo->nr_frags; i++)
5479 __skb_frag_ref(&from_shinfo->frags[i]);
5480
5481 to->truesize += delta;
5482 to->len += len;
5483 to->data_len += len;
5484
5485 *delta_truesize = delta;
5486 return true;
5487}
5488EXPORT_SYMBOL(skb_try_coalesce);
5489
5490/**
5491 * skb_scrub_packet - scrub an skb
5492 *
5493 * @skb: buffer to clean
5494 * @xnet: packet is crossing netns
5495 *
5496 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
5497 * into/from a tunnel. Some information have to be cleared during these
5498 * operations.
5499 * skb_scrub_packet can also be used to clean a skb before injecting it in
5500 * another namespace (@xnet == true). We have to clear all information in the
5501 * skb that could impact namespace isolation.
5502 */
5503void skb_scrub_packet(struct sk_buff *skb, bool xnet)
5504{
5505 skb->pkt_type = PACKET_HOST;
5506 skb->skb_iif = 0;
5507 skb->ignore_df = 0;
5508 skb_dst_drop(skb);
5509 skb_ext_reset(skb);
5510 nf_reset_ct(skb);
5511 nf_reset_trace(skb);
5512
5513#ifdef CONFIG_NET_SWITCHDEV
5514 skb->offload_fwd_mark = 0;
5515 skb->offload_l3_fwd_mark = 0;
5516#endif
5517
5518 if (!xnet)
5519 return;
5520
5521 ipvs_reset(skb);
5522 skb->mark = 0;
5523 skb_clear_tstamp(skb);
5524}
5525EXPORT_SYMBOL_GPL(skb_scrub_packet);
5526
5527/**
5528 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
5529 *
5530 * @skb: GSO skb
5531 *
5532 * skb_gso_transport_seglen is used to determine the real size of the
5533 * individual segments, including Layer4 headers (TCP/UDP).
5534 *
5535 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
5536 */
5537static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
5538{
5539 const struct skb_shared_info *shinfo = skb_shinfo(skb);
5540 unsigned int thlen = 0;
5541
5542 if (skb->encapsulation) {
5543 thlen = skb_inner_transport_header(skb) -
5544 skb_transport_header(skb);
5545
5546 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
5547 thlen += inner_tcp_hdrlen(skb);
5548 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
5549 thlen = tcp_hdrlen(skb);
5550 } else if (unlikely(skb_is_gso_sctp(skb))) {
5551 thlen = sizeof(struct sctphdr);
5552 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
5553 thlen = sizeof(struct udphdr);
5554 }
5555 /* UFO sets gso_size to the size of the fragmentation
5556 * payload, i.e. the size of the L4 (UDP) header is already
5557 * accounted for.
5558 */
5559 return thlen + shinfo->gso_size;
5560}
5561
5562/**
5563 * skb_gso_network_seglen - Return length of individual segments of a gso packet
5564 *
5565 * @skb: GSO skb
5566 *
5567 * skb_gso_network_seglen is used to determine the real size of the
5568 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
5569 *
5570 * The MAC/L2 header is not accounted for.
5571 */
5572static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
5573{
5574 unsigned int hdr_len = skb_transport_header(skb) -
5575 skb_network_header(skb);
5576
5577 return hdr_len + skb_gso_transport_seglen(skb);
5578}
5579
5580/**
5581 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
5582 *
5583 * @skb: GSO skb
5584 *
5585 * skb_gso_mac_seglen is used to determine the real size of the
5586 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
5587 * headers (TCP/UDP).
5588 */
5589static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
5590{
5591 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
5592
5593 return hdr_len + skb_gso_transport_seglen(skb);
5594}
5595
5596/**
5597 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
5598 *
5599 * There are a couple of instances where we have a GSO skb, and we
5600 * want to determine what size it would be after it is segmented.
5601 *
5602 * We might want to check:
5603 * - L3+L4+payload size (e.g. IP forwarding)
5604 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
5605 *
5606 * This is a helper to do that correctly considering GSO_BY_FRAGS.
5607 *
5608 * @skb: GSO skb
5609 *
5610 * @seg_len: The segmented length (from skb_gso_*_seglen). In the
5611 * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
5612 *
5613 * @max_len: The maximum permissible length.
5614 *
5615 * Returns true if the segmented length <= max length.
5616 */
5617static inline bool skb_gso_size_check(const struct sk_buff *skb,
5618 unsigned int seg_len,
5619 unsigned int max_len) {
5620 const struct skb_shared_info *shinfo = skb_shinfo(skb);
5621 const struct sk_buff *iter;
5622
5623 if (shinfo->gso_size != GSO_BY_FRAGS)
5624 return seg_len <= max_len;
5625
5626 /* Undo this so we can re-use header sizes */
5627 seg_len -= GSO_BY_FRAGS;
5628
5629 skb_walk_frags(skb, iter) {
5630 if (seg_len + skb_headlen(iter) > max_len)
5631 return false;
5632 }
5633
5634 return true;
5635}
5636
5637/**
5638 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5639 *
5640 * @skb: GSO skb
5641 * @mtu: MTU to validate against
5642 *
5643 * skb_gso_validate_network_len validates if a given skb will fit a
5644 * wanted MTU once split. It considers L3 headers, L4 headers, and the
5645 * payload.
5646 */
5647bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
5648{
5649 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
5650}
5651EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
5652
5653/**
5654 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
5655 *
5656 * @skb: GSO skb
5657 * @len: length to validate against
5658 *
5659 * skb_gso_validate_mac_len validates if a given skb will fit a wanted
5660 * length once split, including L2, L3 and L4 headers and the payload.
5661 */
5662bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
5663{
5664 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
5665}
5666EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5667
5668static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5669{
5670 int mac_len, meta_len;
5671 void *meta;
5672
5673 if (skb_cow(skb, skb_headroom(skb)) < 0) {
5674 kfree_skb(skb);
5675 return NULL;
5676 }
5677
5678 mac_len = skb->data - skb_mac_header(skb);
5679 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5680 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5681 mac_len - VLAN_HLEN - ETH_TLEN);
5682 }
5683
5684 meta_len = skb_metadata_len(skb);
5685 if (meta_len) {
5686 meta = skb_metadata_end(skb) - meta_len;
5687 memmove(meta + VLAN_HLEN, meta, meta_len);
5688 }
5689
5690 skb->mac_header += VLAN_HLEN;
5691 return skb;
5692}
5693
5694struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5695{
5696 struct vlan_hdr *vhdr;
5697 u16 vlan_tci;
5698
5699 if (unlikely(skb_vlan_tag_present(skb))) {
5700 /* vlan_tci is already set-up so leave this for another time */
5701 return skb;
5702 }
5703
5704 skb = skb_share_check(skb, GFP_ATOMIC);
5705 if (unlikely(!skb))
5706 goto err_free;
5707 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
5708 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
5709 goto err_free;
5710
5711 vhdr = (struct vlan_hdr *)skb->data;
5712 vlan_tci = ntohs(vhdr->h_vlan_TCI);
5713 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5714
5715 skb_pull_rcsum(skb, VLAN_HLEN);
5716 vlan_set_encap_proto(skb, vhdr);
5717
5718 skb = skb_reorder_vlan_header(skb);
5719 if (unlikely(!skb))
5720 goto err_free;
5721
5722 skb_reset_network_header(skb);
5723 if (!skb_transport_header_was_set(skb))
5724 skb_reset_transport_header(skb);
5725 skb_reset_mac_len(skb);
5726
5727 return skb;
5728
5729err_free:
5730 kfree_skb(skb);
5731 return NULL;
5732}
5733EXPORT_SYMBOL(skb_vlan_untag);
5734
5735int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
5736{
5737 if (!pskb_may_pull(skb, write_len))
5738 return -ENOMEM;
5739
5740 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5741 return 0;
5742
5743 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5744}
5745EXPORT_SYMBOL(skb_ensure_writable);
5746
5747/* remove VLAN header from packet and update csum accordingly.
5748 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5749 */
5750int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
5751{
5752 struct vlan_hdr *vhdr;
5753 int offset = skb->data - skb_mac_header(skb);
5754 int err;
5755
5756 if (WARN_ONCE(offset,
5757 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5758 offset)) {
5759 return -EINVAL;
5760 }
5761
5762 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5763 if (unlikely(err))
5764 return err;
5765
5766 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5767
5768 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
5769 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
5770
5771 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
5772 __skb_pull(skb, VLAN_HLEN);
5773
5774 vlan_set_encap_proto(skb, vhdr);
5775 skb->mac_header += VLAN_HLEN;
5776
5777 if (skb_network_offset(skb) < ETH_HLEN)
5778 skb_set_network_header(skb, ETH_HLEN);
5779
5780 skb_reset_mac_len(skb);
5781
5782 return err;
5783}
5784EXPORT_SYMBOL(__skb_vlan_pop);
5785
5786/* Pop a vlan tag either from hwaccel or from payload.
5787 * Expects skb->data at mac header.
5788 */
5789int skb_vlan_pop(struct sk_buff *skb)
5790{
5791 u16 vlan_tci;
5792 __be16 vlan_proto;
5793 int err;
5794
5795 if (likely(skb_vlan_tag_present(skb))) {
5796 __vlan_hwaccel_clear_tag(skb);
5797 } else {
5798 if (unlikely(!eth_type_vlan(skb->protocol)))
5799 return 0;
5800
5801 err = __skb_vlan_pop(skb, &vlan_tci);
5802 if (err)
5803 return err;
5804 }
5805 /* move next vlan tag to hw accel tag */
5806 if (likely(!eth_type_vlan(skb->protocol)))
5807 return 0;
5808
5809 vlan_proto = skb->protocol;
5810 err = __skb_vlan_pop(skb, &vlan_tci);
5811 if (unlikely(err))
5812 return err;
5813
5814 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5815 return 0;
5816}
5817EXPORT_SYMBOL(skb_vlan_pop);
5818
5819/* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5820 * Expects skb->data at mac header.
5821 */
5822int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
5823{
5824 if (skb_vlan_tag_present(skb)) {
5825 int offset = skb->data - skb_mac_header(skb);
5826 int err;
5827
5828 if (WARN_ONCE(offset,
5829 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5830 offset)) {
5831 return -EINVAL;
5832 }
5833
5834 err = __vlan_insert_tag(skb, skb->vlan_proto,
5835 skb_vlan_tag_get(skb));
5836 if (err)
5837 return err;
5838
5839 skb->protocol = skb->vlan_proto;
5840 skb->mac_len += VLAN_HLEN;
5841
5842 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5843 }
5844 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5845 return 0;
5846}
5847EXPORT_SYMBOL(skb_vlan_push);
5848
5849/**
5850 * skb_eth_pop() - Drop the Ethernet header at the head of a packet
5851 *
5852 * @skb: Socket buffer to modify
5853 *
5854 * Drop the Ethernet header of @skb.
5855 *
5856 * Expects that skb->data points to the mac header and that no VLAN tags are
5857 * present.
5858 *
5859 * Returns 0 on success, -errno otherwise.
5860 */
5861int skb_eth_pop(struct sk_buff *skb)
5862{
5863 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) ||
5864 skb_network_offset(skb) < ETH_HLEN)
5865 return -EPROTO;
5866
5867 skb_pull_rcsum(skb, ETH_HLEN);
5868 skb_reset_mac_header(skb);
5869 skb_reset_mac_len(skb);
5870
5871 return 0;
5872}
5873EXPORT_SYMBOL(skb_eth_pop);
5874
5875/**
5876 * skb_eth_push() - Add a new Ethernet header at the head of a packet
5877 *
5878 * @skb: Socket buffer to modify
5879 * @dst: Destination MAC address of the new header
5880 * @src: Source MAC address of the new header
5881 *
5882 * Prepend @skb with a new Ethernet header.
5883 *
5884 * Expects that skb->data points to the mac header, which must be empty.
5885 *
5886 * Returns 0 on success, -errno otherwise.
5887 */
5888int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
5889 const unsigned char *src)
5890{
5891 struct ethhdr *eth;
5892 int err;
5893
5894 if (skb_network_offset(skb) || skb_vlan_tag_present(skb))
5895 return -EPROTO;
5896
5897 err = skb_cow_head(skb, sizeof(*eth));
5898 if (err < 0)
5899 return err;
5900
5901 skb_push(skb, sizeof(*eth));
5902 skb_reset_mac_header(skb);
5903 skb_reset_mac_len(skb);
5904
5905 eth = eth_hdr(skb);
5906 ether_addr_copy(eth->h_dest, dst);
5907 ether_addr_copy(eth->h_source, src);
5908 eth->h_proto = skb->protocol;
5909
5910 skb_postpush_rcsum(skb, eth, sizeof(*eth));
5911
5912 return 0;
5913}
5914EXPORT_SYMBOL(skb_eth_push);
5915
5916/* Update the ethertype of hdr and the skb csum value if required. */
5917static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
5918 __be16 ethertype)
5919{
5920 if (skb->ip_summed == CHECKSUM_COMPLETE) {
5921 __be16 diff[] = { ~hdr->h_proto, ethertype };
5922
5923 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
5924 }
5925
5926 hdr->h_proto = ethertype;
5927}
5928
5929/**
5930 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
5931 * the packet
5932 *
5933 * @skb: buffer
5934 * @mpls_lse: MPLS label stack entry to push
5935 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
5936 * @mac_len: length of the MAC header
5937 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is
5938 * ethernet
5939 *
5940 * Expects skb->data at mac header.
5941 *
5942 * Returns 0 on success, -errno otherwise.
5943 */
5944int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
5945 int mac_len, bool ethernet)
5946{
5947 struct mpls_shim_hdr *lse;
5948 int err;
5949
5950 if (unlikely(!eth_p_mpls(mpls_proto)))
5951 return -EINVAL;
5952
5953 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */
5954 if (skb->encapsulation)
5955 return -EINVAL;
5956
5957 err = skb_cow_head(skb, MPLS_HLEN);
5958 if (unlikely(err))
5959 return err;
5960
5961 if (!skb->inner_protocol) {
5962 skb_set_inner_network_header(skb, skb_network_offset(skb));
5963 skb_set_inner_protocol(skb, skb->protocol);
5964 }
5965
5966 skb_push(skb, MPLS_HLEN);
5967 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
5968 mac_len);
5969 skb_reset_mac_header(skb);
5970 skb_set_network_header(skb, mac_len);
5971 skb_reset_mac_len(skb);
5972
5973 lse = mpls_hdr(skb);
5974 lse->label_stack_entry = mpls_lse;
5975 skb_postpush_rcsum(skb, lse, MPLS_HLEN);
5976
5977 if (ethernet && mac_len >= ETH_HLEN)
5978 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
5979 skb->protocol = mpls_proto;
5980
5981 return 0;
5982}
5983EXPORT_SYMBOL_GPL(skb_mpls_push);
5984
5985/**
5986 * skb_mpls_pop() - pop the outermost MPLS header
5987 *
5988 * @skb: buffer
5989 * @next_proto: ethertype of header after popped MPLS header
5990 * @mac_len: length of the MAC header
5991 * @ethernet: flag to indicate if the packet is ethernet
5992 *
5993 * Expects skb->data at mac header.
5994 *
5995 * Returns 0 on success, -errno otherwise.
5996 */
5997int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
5998 bool ethernet)
5999{
6000 int err;
6001
6002 if (unlikely(!eth_p_mpls(skb->protocol)))
6003 return 0;
6004
6005 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
6006 if (unlikely(err))
6007 return err;
6008
6009 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
6010 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
6011 mac_len);
6012
6013 __skb_pull(skb, MPLS_HLEN);
6014 skb_reset_mac_header(skb);
6015 skb_set_network_header(skb, mac_len);
6016
6017 if (ethernet && mac_len >= ETH_HLEN) {
6018 struct ethhdr *hdr;
6019
6020 /* use mpls_hdr() to get ethertype to account for VLANs. */
6021 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
6022 skb_mod_eth_type(skb, hdr, next_proto);
6023 }
6024 skb->protocol = next_proto;
6025
6026 return 0;
6027}
6028EXPORT_SYMBOL_GPL(skb_mpls_pop);
6029
6030/**
6031 * skb_mpls_update_lse() - modify outermost MPLS header and update csum
6032 *
6033 * @skb: buffer
6034 * @mpls_lse: new MPLS label stack entry to update to
6035 *
6036 * Expects skb->data at mac header.
6037 *
6038 * Returns 0 on success, -errno otherwise.
6039 */
6040int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
6041{
6042 int err;
6043
6044 if (unlikely(!eth_p_mpls(skb->protocol)))
6045 return -EINVAL;
6046
6047 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
6048 if (unlikely(err))
6049 return err;
6050
6051 if (skb->ip_summed == CHECKSUM_COMPLETE) {
6052 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
6053
6054 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
6055 }
6056
6057 mpls_hdr(skb)->label_stack_entry = mpls_lse;
6058
6059 return 0;
6060}
6061EXPORT_SYMBOL_GPL(skb_mpls_update_lse);
6062
6063/**
6064 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
6065 *
6066 * @skb: buffer
6067 *
6068 * Expects skb->data at mac header.
6069 *
6070 * Returns 0 on success, -errno otherwise.
6071 */
6072int skb_mpls_dec_ttl(struct sk_buff *skb)
6073{
6074 u32 lse;
6075 u8 ttl;
6076
6077 if (unlikely(!eth_p_mpls(skb->protocol)))
6078 return -EINVAL;
6079
6080 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
6081 return -ENOMEM;
6082
6083 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
6084 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
6085 if (!--ttl)
6086 return -EINVAL;
6087
6088 lse &= ~MPLS_LS_TTL_MASK;
6089 lse |= ttl << MPLS_LS_TTL_SHIFT;
6090
6091 return skb_mpls_update_lse(skb, cpu_to_be32(lse));
6092}
6093EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
6094
6095/**
6096 * alloc_skb_with_frags - allocate skb with page frags
6097 *
6098 * @header_len: size of linear part
6099 * @data_len: needed length in frags
6100 * @max_page_order: max page order desired.
6101 * @errcode: pointer to error code if any
6102 * @gfp_mask: allocation mask
6103 *
6104 * This can be used to allocate a paged skb, given a maximal order for frags.
6105 */
6106struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
6107 unsigned long data_len,
6108 int max_page_order,
6109 int *errcode,
6110 gfp_t gfp_mask)
6111{
6112 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
6113 unsigned long chunk;
6114 struct sk_buff *skb;
6115 struct page *page;
6116 int i;
6117
6118 *errcode = -EMSGSIZE;
6119 /* Note this test could be relaxed, if we succeed to allocate
6120 * high order pages...
6121 */
6122 if (npages > MAX_SKB_FRAGS)
6123 return NULL;
6124
6125 *errcode = -ENOBUFS;
6126 skb = alloc_skb(header_len, gfp_mask);
6127 if (!skb)
6128 return NULL;
6129
6130 skb->truesize += npages << PAGE_SHIFT;
6131
6132 for (i = 0; npages > 0; i++) {
6133 int order = max_page_order;
6134
6135 while (order) {
6136 if (npages >= 1 << order) {
6137 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
6138 __GFP_COMP |
6139 __GFP_NOWARN,
6140 order);
6141 if (page)
6142 goto fill_page;
6143 /* Do not retry other high order allocations */
6144 order = 1;
6145 max_page_order = 0;
6146 }
6147 order--;
6148 }
6149 page = alloc_page(gfp_mask);
6150 if (!page)
6151 goto failure;
6152fill_page:
6153 chunk = min_t(unsigned long, data_len,
6154 PAGE_SIZE << order);
6155 skb_fill_page_desc(skb, i, page, 0, chunk);
6156 data_len -= chunk;
6157 npages -= 1 << order;
6158 }
6159 return skb;
6160
6161failure:
6162 kfree_skb(skb);
6163 return NULL;
6164}
6165EXPORT_SYMBOL(alloc_skb_with_frags);
6166
6167/* carve out the first off bytes from skb when off < headlen */
6168static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
6169 const int headlen, gfp_t gfp_mask)
6170{
6171 int i;
6172 int size = skb_end_offset(skb);
6173 int new_hlen = headlen - off;
6174 u8 *data;
6175
6176 size = SKB_DATA_ALIGN(size);
6177
6178 if (skb_pfmemalloc(skb))
6179 gfp_mask |= __GFP_MEMALLOC;
6180 data = kmalloc_reserve(size +
6181 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
6182 gfp_mask, NUMA_NO_NODE, NULL);
6183 if (!data)
6184 return -ENOMEM;
6185
6186 size = SKB_WITH_OVERHEAD(ksize(data));
6187
6188 /* Copy real data, and all frags */
6189 skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
6190 skb->len -= off;
6191
6192 memcpy((struct skb_shared_info *)(data + size),
6193 skb_shinfo(skb),
6194 offsetof(struct skb_shared_info,
6195 frags[skb_shinfo(skb)->nr_frags]));
6196 if (skb_cloned(skb)) {
6197 /* drop the old head gracefully */
6198 if (skb_orphan_frags(skb, gfp_mask)) {
6199 kfree(data);
6200 return -ENOMEM;
6201 }
6202 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
6203 skb_frag_ref(skb, i);
6204 if (skb_has_frag_list(skb))
6205 skb_clone_fraglist(skb);
6206 skb_release_data(skb);
6207 } else {
6208 /* we can reuse existing recount- all we did was
6209 * relocate values
6210 */
6211 skb_free_head(skb);
6212 }
6213
6214 skb->head = data;
6215 skb->data = data;
6216 skb->head_frag = 0;
6217 skb_set_end_offset(skb, size);
6218 skb_set_tail_pointer(skb, skb_headlen(skb));
6219 skb_headers_offset_update(skb, 0);
6220 skb->cloned = 0;
6221 skb->hdr_len = 0;
6222 skb->nohdr = 0;
6223 atomic_set(&skb_shinfo(skb)->dataref, 1);
6224
6225 return 0;
6226}
6227
6228static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6229
6230/* carve out the first eat bytes from skb's frag_list. May recurse into
6231 * pskb_carve()
6232 */
6233static int pskb_carve_frag_list(struct sk_buff *skb,
6234 struct skb_shared_info *shinfo, int eat,
6235 gfp_t gfp_mask)
6236{
6237 struct sk_buff *list = shinfo->frag_list;
6238 struct sk_buff *clone = NULL;
6239 struct sk_buff *insp = NULL;
6240
6241 do {
6242 if (!list) {
6243 pr_err("Not enough bytes to eat. Want %d\n", eat);
6244 return -EFAULT;
6245 }
6246 if (list->len <= eat) {
6247 /* Eaten as whole. */
6248 eat -= list->len;
6249 list = list->next;
6250 insp = list;
6251 } else {
6252 /* Eaten partially. */
6253 if (skb_shared(list)) {
6254 clone = skb_clone(list, gfp_mask);
6255 if (!clone)
6256 return -ENOMEM;
6257 insp = list->next;
6258 list = clone;
6259 } else {
6260 /* This may be pulled without problems. */
6261 insp = list;
6262 }
6263 if (pskb_carve(list, eat, gfp_mask) < 0) {
6264 kfree_skb(clone);
6265 return -ENOMEM;
6266 }
6267 break;
6268 }
6269 } while (eat);
6270
6271 /* Free pulled out fragments. */
6272 while ((list = shinfo->frag_list) != insp) {
6273 shinfo->frag_list = list->next;
6274 consume_skb(list);
6275 }
6276 /* And insert new clone at head. */
6277 if (clone) {
6278 clone->next = list;
6279 shinfo->frag_list = clone;
6280 }
6281 return 0;
6282}
6283
6284/* carve off first len bytes from skb. Split line (off) is in the
6285 * non-linear part of skb
6286 */
6287static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
6288 int pos, gfp_t gfp_mask)
6289{
6290 int i, k = 0;
6291 int size = skb_end_offset(skb);
6292 u8 *data;
6293 const int nfrags = skb_shinfo(skb)->nr_frags;
6294 struct skb_shared_info *shinfo;
6295
6296 size = SKB_DATA_ALIGN(size);
6297
6298 if (skb_pfmemalloc(skb))
6299 gfp_mask |= __GFP_MEMALLOC;
6300 data = kmalloc_reserve(size +
6301 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
6302 gfp_mask, NUMA_NO_NODE, NULL);
6303 if (!data)
6304 return -ENOMEM;
6305
6306 size = SKB_WITH_OVERHEAD(ksize(data));
6307
6308 memcpy((struct skb_shared_info *)(data + size),
6309 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
6310 if (skb_orphan_frags(skb, gfp_mask)) {
6311 kfree(data);
6312 return -ENOMEM;
6313 }
6314 shinfo = (struct skb_shared_info *)(data + size);
6315 for (i = 0; i < nfrags; i++) {
6316 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
6317
6318 if (pos + fsize > off) {
6319 shinfo->frags[k] = skb_shinfo(skb)->frags[i];
6320
6321 if (pos < off) {
6322 /* Split frag.
6323 * We have two variants in this case:
6324 * 1. Move all the frag to the second
6325 * part, if it is possible. F.e.
6326 * this approach is mandatory for TUX,
6327 * where splitting is expensive.
6328 * 2. Split is accurately. We make this.
6329 */
6330 skb_frag_off_add(&shinfo->frags[0], off - pos);
6331 skb_frag_size_sub(&shinfo->frags[0], off - pos);
6332 }
6333 skb_frag_ref(skb, i);
6334 k++;
6335 }
6336 pos += fsize;
6337 }
6338 shinfo->nr_frags = k;
6339 if (skb_has_frag_list(skb))
6340 skb_clone_fraglist(skb);
6341
6342 /* split line is in frag list */
6343 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) {
6344 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
6345 if (skb_has_frag_list(skb))
6346 kfree_skb_list(skb_shinfo(skb)->frag_list);
6347 kfree(data);
6348 return -ENOMEM;
6349 }
6350 skb_release_data(skb);
6351
6352 skb->head = data;
6353 skb->head_frag = 0;
6354 skb->data = data;
6355 skb_set_end_offset(skb, size);
6356 skb_reset_tail_pointer(skb);
6357 skb_headers_offset_update(skb, 0);
6358 skb->cloned = 0;
6359 skb->hdr_len = 0;
6360 skb->nohdr = 0;
6361 skb->len -= off;
6362 skb->data_len = skb->len;
6363 atomic_set(&skb_shinfo(skb)->dataref, 1);
6364 return 0;
6365}
6366
6367/* remove len bytes from the beginning of the skb */
6368static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
6369{
6370 int headlen = skb_headlen(skb);
6371
6372 if (len < headlen)
6373 return pskb_carve_inside_header(skb, len, headlen, gfp);
6374 else
6375 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
6376}
6377
6378/* Extract to_copy bytes starting at off from skb, and return this in
6379 * a new skb
6380 */
6381struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
6382 int to_copy, gfp_t gfp)
6383{
6384 struct sk_buff *clone = skb_clone(skb, gfp);
6385
6386 if (!clone)
6387 return NULL;
6388
6389 if (pskb_carve(clone, off, gfp) < 0 ||
6390 pskb_trim(clone, to_copy)) {
6391 kfree_skb(clone);
6392 return NULL;
6393 }
6394 return clone;
6395}
6396EXPORT_SYMBOL(pskb_extract);
6397
6398/**
6399 * skb_condense - try to get rid of fragments/frag_list if possible
6400 * @skb: buffer
6401 *
6402 * Can be used to save memory before skb is added to a busy queue.
6403 * If packet has bytes in frags and enough tail room in skb->head,
6404 * pull all of them, so that we can free the frags right now and adjust
6405 * truesize.
6406 * Notes:
6407 * We do not reallocate skb->head thus can not fail.
6408 * Caller must re-evaluate skb->truesize if needed.
6409 */
6410void skb_condense(struct sk_buff *skb)
6411{
6412 if (skb->data_len) {
6413 if (skb->data_len > skb->end - skb->tail ||
6414 skb_cloned(skb))
6415 return;
6416
6417 /* Nice, we can free page frag(s) right now */
6418 __pskb_pull_tail(skb, skb->data_len);
6419 }
6420 /* At this point, skb->truesize might be over estimated,
6421 * because skb had a fragment, and fragments do not tell
6422 * their truesize.
6423 * When we pulled its content into skb->head, fragment
6424 * was freed, but __pskb_pull_tail() could not possibly
6425 * adjust skb->truesize, not knowing the frag truesize.
6426 */
6427 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6428}
6429
6430#ifdef CONFIG_SKB_EXTENSIONS
6431static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
6432{
6433 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
6434}
6435
6436/**
6437 * __skb_ext_alloc - allocate a new skb extensions storage
6438 *
6439 * @flags: See kmalloc().
6440 *
6441 * Returns the newly allocated pointer. The pointer can later attached to a
6442 * skb via __skb_ext_set().
6443 * Note: caller must handle the skb_ext as an opaque data.
6444 */
6445struct skb_ext *__skb_ext_alloc(gfp_t flags)
6446{
6447 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
6448
6449 if (new) {
6450 memset(new->offset, 0, sizeof(new->offset));
6451 refcount_set(&new->refcnt, 1);
6452 }
6453
6454 return new;
6455}
6456
6457static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
6458 unsigned int old_active)
6459{
6460 struct skb_ext *new;
6461
6462 if (refcount_read(&old->refcnt) == 1)
6463 return old;
6464
6465 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
6466 if (!new)
6467 return NULL;
6468
6469 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
6470 refcount_set(&new->refcnt, 1);
6471
6472#ifdef CONFIG_XFRM
6473 if (old_active & (1 << SKB_EXT_SEC_PATH)) {
6474 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
6475 unsigned int i;
6476
6477 for (i = 0; i < sp->len; i++)
6478 xfrm_state_hold(sp->xvec[i]);
6479 }
6480#endif
6481 __skb_ext_put(old);
6482 return new;
6483}
6484
6485/**
6486 * __skb_ext_set - attach the specified extension storage to this skb
6487 * @skb: buffer
6488 * @id: extension id
6489 * @ext: extension storage previously allocated via __skb_ext_alloc()
6490 *
6491 * Existing extensions, if any, are cleared.
6492 *
6493 * Returns the pointer to the extension.
6494 */
6495void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
6496 struct skb_ext *ext)
6497{
6498 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext);
6499
6500 skb_ext_put(skb);
6501 newlen = newoff + skb_ext_type_len[id];
6502 ext->chunks = newlen;
6503 ext->offset[id] = newoff;
6504 skb->extensions = ext;
6505 skb->active_extensions = 1 << id;
6506 return skb_ext_get_ptr(ext, id);
6507}
6508
6509/**
6510 * skb_ext_add - allocate space for given extension, COW if needed
6511 * @skb: buffer
6512 * @id: extension to allocate space for
6513 *
6514 * Allocates enough space for the given extension.
6515 * If the extension is already present, a pointer to that extension
6516 * is returned.
6517 *
6518 * If the skb was cloned, COW applies and the returned memory can be
6519 * modified without changing the extension space of clones buffers.
6520 *
6521 * Returns pointer to the extension or NULL on allocation failure.
6522 */
6523void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
6524{
6525 struct skb_ext *new, *old = NULL;
6526 unsigned int newlen, newoff;
6527
6528 if (skb->active_extensions) {
6529 old = skb->extensions;
6530
6531 new = skb_ext_maybe_cow(old, skb->active_extensions);
6532 if (!new)
6533 return NULL;
6534
6535 if (__skb_ext_exist(new, id))
6536 goto set_active;
6537
6538 newoff = new->chunks;
6539 } else {
6540 newoff = SKB_EXT_CHUNKSIZEOF(*new);
6541
6542 new = __skb_ext_alloc(GFP_ATOMIC);
6543 if (!new)
6544 return NULL;
6545 }
6546
6547 newlen = newoff + skb_ext_type_len[id];
6548 new->chunks = newlen;
6549 new->offset[id] = newoff;
6550set_active:
6551 skb->slow_gro = 1;
6552 skb->extensions = new;
6553 skb->active_extensions |= 1 << id;
6554 return skb_ext_get_ptr(new, id);
6555}
6556EXPORT_SYMBOL(skb_ext_add);
6557
6558#ifdef CONFIG_XFRM
6559static void skb_ext_put_sp(struct sec_path *sp)
6560{
6561 unsigned int i;
6562
6563 for (i = 0; i < sp->len; i++)
6564 xfrm_state_put(sp->xvec[i]);
6565}
6566#endif
6567
6568#ifdef CONFIG_MCTP_FLOWS
6569static void skb_ext_put_mctp(struct mctp_flow *flow)
6570{
6571 if (flow->key)
6572 mctp_key_unref(flow->key);
6573}
6574#endif
6575
6576void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
6577{
6578 struct skb_ext *ext = skb->extensions;
6579
6580 skb->active_extensions &= ~(1 << id);
6581 if (skb->active_extensions == 0) {
6582 skb->extensions = NULL;
6583 __skb_ext_put(ext);
6584#ifdef CONFIG_XFRM
6585 } else if (id == SKB_EXT_SEC_PATH &&
6586 refcount_read(&ext->refcnt) == 1) {
6587 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
6588
6589 skb_ext_put_sp(sp);
6590 sp->len = 0;
6591#endif
6592 }
6593}
6594EXPORT_SYMBOL(__skb_ext_del);
6595
6596void __skb_ext_put(struct skb_ext *ext)
6597{
6598 /* If this is last clone, nothing can increment
6599 * it after check passes. Avoids one atomic op.
6600 */
6601 if (refcount_read(&ext->refcnt) == 1)
6602 goto free_now;
6603
6604 if (!refcount_dec_and_test(&ext->refcnt))
6605 return;
6606free_now:
6607#ifdef CONFIG_XFRM
6608 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
6609 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
6610#endif
6611#ifdef CONFIG_MCTP_FLOWS
6612 if (__skb_ext_exist(ext, SKB_EXT_MCTP))
6613 skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP));
6614#endif
6615
6616 kmem_cache_free(skbuff_ext_cache, ext);
6617}
6618EXPORT_SYMBOL(__skb_ext_put);
6619#endif /* CONFIG_SKB_EXTENSIONS */
6620
6621/**
6622 * skb_attempt_defer_free - queue skb for remote freeing
6623 * @skb: buffer
6624 *
6625 * Put @skb in a per-cpu list, using the cpu which
6626 * allocated the skb/pages to reduce false sharing
6627 * and memory zone spinlock contention.
6628 */
6629void skb_attempt_defer_free(struct sk_buff *skb)
6630{
6631 int cpu = skb->alloc_cpu;
6632 struct softnet_data *sd;
6633 unsigned long flags;
6634 unsigned int defer_max;
6635 bool kick;
6636
6637 if (WARN_ON_ONCE(cpu >= nr_cpu_ids) ||
6638 !cpu_online(cpu) ||
6639 cpu == raw_smp_processor_id()) {
6640nodefer: __kfree_skb(skb);
6641 return;
6642 }
6643
6644 sd = &per_cpu(softnet_data, cpu);
6645 defer_max = READ_ONCE(sysctl_skb_defer_max);
6646 if (READ_ONCE(sd->defer_count) >= defer_max)
6647 goto nodefer;
6648
6649 spin_lock_irqsave(&sd->defer_lock, flags);
6650 /* Send an IPI every time queue reaches half capacity. */
6651 kick = sd->defer_count == (defer_max >> 1);
6652 /* Paired with the READ_ONCE() few lines above */
6653 WRITE_ONCE(sd->defer_count, sd->defer_count + 1);
6654
6655 skb->next = sd->defer_list;
6656 /* Paired with READ_ONCE() in skb_defer_free_flush() */
6657 WRITE_ONCE(sd->defer_list, skb);
6658 spin_unlock_irqrestore(&sd->defer_lock, flags);
6659
6660 /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
6661 * if we are unlucky enough (this seems very unlikely).
6662 */
6663 if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
6664 smp_call_function_single_async(cpu, &sd->defer_csd);
6665}