Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Routines having to do with the 'struct sk_buff' memory handlers.
4 *
5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
6 * Florian La Roche <rzsfl@rz.uni-sb.de>
7 *
8 * Fixes:
9 * Alan Cox : Fixed the worst of the load
10 * balancer bugs.
11 * Dave Platt : Interrupt stacking fix.
12 * Richard Kooijman : Timestamp fixes.
13 * Alan Cox : Changed buffer format.
14 * Alan Cox : destructor hook for AF_UNIX etc.
15 * Linus Torvalds : Better skb_clone.
16 * Alan Cox : Added skb_copy.
17 * Alan Cox : Added all the changed routines Linus
18 * only put in the headers
19 * Ray VanTassle : Fixed --skb->lock in free
20 * Alan Cox : skb_copy copy arp field
21 * Andi Kleen : slabified it.
22 * Robert Olsson : Removed skb_head_pool
23 *
24 * NOTE:
25 * The __skb_ routines should be called with interrupts
26 * disabled, or you better be *real* sure that the operation is atomic
27 * with respect to whatever list is being frobbed (e.g. via lock_sock()
28 * or via disabling bottom half handlers, etc).
29 */
30
31/*
32 * The functions in this file will not compile correctly with gcc 2.4.x
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/module.h>
38#include <linux/types.h>
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/interrupt.h>
42#include <linux/in.h>
43#include <linux/inet.h>
44#include <linux/slab.h>
45#include <linux/tcp.h>
46#include <linux/udp.h>
47#include <linux/sctp.h>
48#include <linux/netdevice.h>
49#ifdef CONFIG_NET_CLS_ACT
50#include <net/pkt_sched.h>
51#endif
52#include <linux/string.h>
53#include <linux/skbuff.h>
54#include <linux/splice.h>
55#include <linux/cache.h>
56#include <linux/rtnetlink.h>
57#include <linux/init.h>
58#include <linux/scatterlist.h>
59#include <linux/errqueue.h>
60#include <linux/prefetch.h>
61#include <linux/bitfield.h>
62#include <linux/if_vlan.h>
63#include <linux/mpls.h>
64#include <linux/kcov.h>
65#include <linux/iov_iter.h>
66
67#include <net/protocol.h>
68#include <net/dst.h>
69#include <net/sock.h>
70#include <net/checksum.h>
71#include <net/gso.h>
72#include <net/ip6_checksum.h>
73#include <net/xfrm.h>
74#include <net/mpls.h>
75#include <net/mptcp.h>
76#include <net/mctp.h>
77#include <net/page_pool/helpers.h>
78#include <net/dropreason.h>
79
80#include <linux/uaccess.h>
81#include <trace/events/skb.h>
82#include <linux/highmem.h>
83#include <linux/capability.h>
84#include <linux/user_namespace.h>
85#include <linux/indirect_call_wrapper.h>
86#include <linux/textsearch.h>
87
88#include "dev.h"
89#include "sock_destructor.h"
90
91struct kmem_cache *skbuff_cache __ro_after_init;
92static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
93#ifdef CONFIG_SKB_EXTENSIONS
94static struct kmem_cache *skbuff_ext_cache __ro_after_init;
95#endif
96
97
98static struct kmem_cache *skb_small_head_cache __ro_after_init;
99
100#define SKB_SMALL_HEAD_SIZE SKB_HEAD_ALIGN(MAX_TCP_HEADER)
101
102/* We want SKB_SMALL_HEAD_CACHE_SIZE to not be a power of two.
103 * This should ensure that SKB_SMALL_HEAD_HEADROOM is a unique
104 * size, and we can differentiate heads from skb_small_head_cache
105 * vs system slabs by looking at their size (skb_end_offset()).
106 */
107#define SKB_SMALL_HEAD_CACHE_SIZE \
108 (is_power_of_2(SKB_SMALL_HEAD_SIZE) ? \
109 (SKB_SMALL_HEAD_SIZE + L1_CACHE_BYTES) : \
110 SKB_SMALL_HEAD_SIZE)
111
112#define SKB_SMALL_HEAD_HEADROOM \
113 SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE)
114
115int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
116EXPORT_SYMBOL(sysctl_max_skb_frags);
117
118#undef FN
119#define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
120static const char * const drop_reasons[] = {
121 [SKB_CONSUMED] = "CONSUMED",
122 DEFINE_DROP_REASON(FN, FN)
123};
124
125static const struct drop_reason_list drop_reasons_core = {
126 .reasons = drop_reasons,
127 .n_reasons = ARRAY_SIZE(drop_reasons),
128};
129
130const struct drop_reason_list __rcu *
131drop_reasons_by_subsys[SKB_DROP_REASON_SUBSYS_NUM] = {
132 [SKB_DROP_REASON_SUBSYS_CORE] = RCU_INITIALIZER(&drop_reasons_core),
133};
134EXPORT_SYMBOL(drop_reasons_by_subsys);
135
136/**
137 * drop_reasons_register_subsys - register another drop reason subsystem
138 * @subsys: the subsystem to register, must not be the core
139 * @list: the list of drop reasons within the subsystem, must point to
140 * a statically initialized list
141 */
142void drop_reasons_register_subsys(enum skb_drop_reason_subsys subsys,
143 const struct drop_reason_list *list)
144{
145 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE ||
146 subsys >= ARRAY_SIZE(drop_reasons_by_subsys),
147 "invalid subsystem %d\n", subsys))
148 return;
149
150 /* must point to statically allocated memory, so INIT is OK */
151 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], list);
152}
153EXPORT_SYMBOL_GPL(drop_reasons_register_subsys);
154
155/**
156 * drop_reasons_unregister_subsys - unregister a drop reason subsystem
157 * @subsys: the subsystem to remove, must not be the core
158 *
159 * Note: This will synchronize_rcu() to ensure no users when it returns.
160 */
161void drop_reasons_unregister_subsys(enum skb_drop_reason_subsys subsys)
162{
163 if (WARN(subsys <= SKB_DROP_REASON_SUBSYS_CORE ||
164 subsys >= ARRAY_SIZE(drop_reasons_by_subsys),
165 "invalid subsystem %d\n", subsys))
166 return;
167
168 RCU_INIT_POINTER(drop_reasons_by_subsys[subsys], NULL);
169
170 synchronize_rcu();
171}
172EXPORT_SYMBOL_GPL(drop_reasons_unregister_subsys);
173
174/**
175 * skb_panic - private function for out-of-line support
176 * @skb: buffer
177 * @sz: size
178 * @addr: address
179 * @msg: skb_over_panic or skb_under_panic
180 *
181 * Out-of-line support for skb_put() and skb_push().
182 * Called via the wrapper skb_over_panic() or skb_under_panic().
183 * Keep out of line to prevent kernel bloat.
184 * __builtin_return_address is not used because it is not always reliable.
185 */
186static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
187 const char msg[])
188{
189 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
190 msg, addr, skb->len, sz, skb->head, skb->data,
191 (unsigned long)skb->tail, (unsigned long)skb->end,
192 skb->dev ? skb->dev->name : "<NULL>");
193 BUG();
194}
195
196static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
197{
198 skb_panic(skb, sz, addr, __func__);
199}
200
201static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
202{
203 skb_panic(skb, sz, addr, __func__);
204}
205
206#define NAPI_SKB_CACHE_SIZE 64
207#define NAPI_SKB_CACHE_BULK 16
208#define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
209
210#if PAGE_SIZE == SZ_4K
211
212#define NAPI_HAS_SMALL_PAGE_FRAG 1
213#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) ((nc).pfmemalloc)
214
215/* specialized page frag allocator using a single order 0 page
216 * and slicing it into 1K sized fragment. Constrained to systems
217 * with a very limited amount of 1K fragments fitting a single
218 * page - to avoid excessive truesize underestimation
219 */
220
221struct page_frag_1k {
222 void *va;
223 u16 offset;
224 bool pfmemalloc;
225};
226
227static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp)
228{
229 struct page *page;
230 int offset;
231
232 offset = nc->offset - SZ_1K;
233 if (likely(offset >= 0))
234 goto use_frag;
235
236 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
237 if (!page)
238 return NULL;
239
240 nc->va = page_address(page);
241 nc->pfmemalloc = page_is_pfmemalloc(page);
242 offset = PAGE_SIZE - SZ_1K;
243 page_ref_add(page, offset / SZ_1K);
244
245use_frag:
246 nc->offset = offset;
247 return nc->va + offset;
248}
249#else
250
251/* the small page is actually unused in this build; add dummy helpers
252 * to please the compiler and avoid later preprocessor's conditionals
253 */
254#define NAPI_HAS_SMALL_PAGE_FRAG 0
255#define NAPI_SMALL_PAGE_PFMEMALLOC(nc) false
256
257struct page_frag_1k {
258};
259
260static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask)
261{
262 return NULL;
263}
264
265#endif
266
267struct napi_alloc_cache {
268 struct page_frag_cache page;
269 struct page_frag_1k page_small;
270 unsigned int skb_count;
271 void *skb_cache[NAPI_SKB_CACHE_SIZE];
272};
273
274static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
275static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
276
277/* Double check that napi_get_frags() allocates skbs with
278 * skb->head being backed by slab, not a page fragment.
279 * This is to make sure bug fixed in 3226b158e67c
280 * ("net: avoid 32 x truesize under-estimation for tiny skbs")
281 * does not accidentally come back.
282 */
283void napi_get_frags_check(struct napi_struct *napi)
284{
285 struct sk_buff *skb;
286
287 local_bh_disable();
288 skb = napi_get_frags(napi);
289 WARN_ON_ONCE(!NAPI_HAS_SMALL_PAGE_FRAG && skb && skb->head_frag);
290 napi_free_frags(napi);
291 local_bh_enable();
292}
293
294void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
295{
296 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
297
298 fragsz = SKB_DATA_ALIGN(fragsz);
299
300 return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
301}
302EXPORT_SYMBOL(__napi_alloc_frag_align);
303
304void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
305{
306 void *data;
307
308 fragsz = SKB_DATA_ALIGN(fragsz);
309 if (in_hardirq() || irqs_disabled()) {
310 struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
311
312 data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
313 } else {
314 struct napi_alloc_cache *nc;
315
316 local_bh_disable();
317 nc = this_cpu_ptr(&napi_alloc_cache);
318 data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
319 local_bh_enable();
320 }
321 return data;
322}
323EXPORT_SYMBOL(__netdev_alloc_frag_align);
324
325static struct sk_buff *napi_skb_cache_get(void)
326{
327 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
328 struct sk_buff *skb;
329
330 if (unlikely(!nc->skb_count)) {
331 nc->skb_count = kmem_cache_alloc_bulk(skbuff_cache,
332 GFP_ATOMIC,
333 NAPI_SKB_CACHE_BULK,
334 nc->skb_cache);
335 if (unlikely(!nc->skb_count))
336 return NULL;
337 }
338
339 skb = nc->skb_cache[--nc->skb_count];
340 kasan_unpoison_object_data(skbuff_cache, skb);
341
342 return skb;
343}
344
345static inline void __finalize_skb_around(struct sk_buff *skb, void *data,
346 unsigned int size)
347{
348 struct skb_shared_info *shinfo;
349
350 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
351
352 /* Assumes caller memset cleared SKB */
353 skb->truesize = SKB_TRUESIZE(size);
354 refcount_set(&skb->users, 1);
355 skb->head = data;
356 skb->data = data;
357 skb_reset_tail_pointer(skb);
358 skb_set_end_offset(skb, size);
359 skb->mac_header = (typeof(skb->mac_header))~0U;
360 skb->transport_header = (typeof(skb->transport_header))~0U;
361 skb->alloc_cpu = raw_smp_processor_id();
362 /* make sure we initialize shinfo sequentially */
363 shinfo = skb_shinfo(skb);
364 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
365 atomic_set(&shinfo->dataref, 1);
366
367 skb_set_kcov_handle(skb, kcov_common_handle());
368}
369
370static inline void *__slab_build_skb(struct sk_buff *skb, void *data,
371 unsigned int *size)
372{
373 void *resized;
374
375 /* Must find the allocation size (and grow it to match). */
376 *size = ksize(data);
377 /* krealloc() will immediately return "data" when
378 * "ksize(data)" is requested: it is the existing upper
379 * bounds. As a result, GFP_ATOMIC will be ignored. Note
380 * that this "new" pointer needs to be passed back to the
381 * caller for use so the __alloc_size hinting will be
382 * tracked correctly.
383 */
384 resized = krealloc(data, *size, GFP_ATOMIC);
385 WARN_ON_ONCE(resized != data);
386 return resized;
387}
388
389/* build_skb() variant which can operate on slab buffers.
390 * Note that this should be used sparingly as slab buffers
391 * cannot be combined efficiently by GRO!
392 */
393struct sk_buff *slab_build_skb(void *data)
394{
395 struct sk_buff *skb;
396 unsigned int size;
397
398 skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
399 if (unlikely(!skb))
400 return NULL;
401
402 memset(skb, 0, offsetof(struct sk_buff, tail));
403 data = __slab_build_skb(skb, data, &size);
404 __finalize_skb_around(skb, data, size);
405
406 return skb;
407}
408EXPORT_SYMBOL(slab_build_skb);
409
410/* Caller must provide SKB that is memset cleared */
411static void __build_skb_around(struct sk_buff *skb, void *data,
412 unsigned int frag_size)
413{
414 unsigned int size = frag_size;
415
416 /* frag_size == 0 is considered deprecated now. Callers
417 * using slab buffer should use slab_build_skb() instead.
418 */
419 if (WARN_ONCE(size == 0, "Use slab_build_skb() instead"))
420 data = __slab_build_skb(skb, data, &size);
421
422 __finalize_skb_around(skb, data, size);
423}
424
425/**
426 * __build_skb - build a network buffer
427 * @data: data buffer provided by caller
428 * @frag_size: size of data (must not be 0)
429 *
430 * Allocate a new &sk_buff. Caller provides space holding head and
431 * skb_shared_info. @data must have been allocated from the page
432 * allocator or vmalloc(). (A @frag_size of 0 to indicate a kmalloc()
433 * allocation is deprecated, and callers should use slab_build_skb()
434 * instead.)
435 * The return is the new skb buffer.
436 * On a failure the return is %NULL, and @data is not freed.
437 * Notes :
438 * Before IO, driver allocates only data buffer where NIC put incoming frame
439 * Driver should add room at head (NET_SKB_PAD) and
440 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
441 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
442 * before giving packet to stack.
443 * RX rings only contains data buffers, not full skbs.
444 */
445struct sk_buff *__build_skb(void *data, unsigned int frag_size)
446{
447 struct sk_buff *skb;
448
449 skb = kmem_cache_alloc(skbuff_cache, GFP_ATOMIC);
450 if (unlikely(!skb))
451 return NULL;
452
453 memset(skb, 0, offsetof(struct sk_buff, tail));
454 __build_skb_around(skb, data, frag_size);
455
456 return skb;
457}
458
459/* build_skb() is wrapper over __build_skb(), that specifically
460 * takes care of skb->head and skb->pfmemalloc
461 */
462struct sk_buff *build_skb(void *data, unsigned int frag_size)
463{
464 struct sk_buff *skb = __build_skb(data, frag_size);
465
466 if (likely(skb && frag_size)) {
467 skb->head_frag = 1;
468 skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
469 }
470 return skb;
471}
472EXPORT_SYMBOL(build_skb);
473
474/**
475 * build_skb_around - build a network buffer around provided skb
476 * @skb: sk_buff provide by caller, must be memset cleared
477 * @data: data buffer provided by caller
478 * @frag_size: size of data
479 */
480struct sk_buff *build_skb_around(struct sk_buff *skb,
481 void *data, unsigned int frag_size)
482{
483 if (unlikely(!skb))
484 return NULL;
485
486 __build_skb_around(skb, data, frag_size);
487
488 if (frag_size) {
489 skb->head_frag = 1;
490 skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
491 }
492 return skb;
493}
494EXPORT_SYMBOL(build_skb_around);
495
496/**
497 * __napi_build_skb - build a network buffer
498 * @data: data buffer provided by caller
499 * @frag_size: size of data
500 *
501 * Version of __build_skb() that uses NAPI percpu caches to obtain
502 * skbuff_head instead of inplace allocation.
503 *
504 * Returns a new &sk_buff on success, %NULL on allocation failure.
505 */
506static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size)
507{
508 struct sk_buff *skb;
509
510 skb = napi_skb_cache_get();
511 if (unlikely(!skb))
512 return NULL;
513
514 memset(skb, 0, offsetof(struct sk_buff, tail));
515 __build_skb_around(skb, data, frag_size);
516
517 return skb;
518}
519
520/**
521 * napi_build_skb - build a network buffer
522 * @data: data buffer provided by caller
523 * @frag_size: size of data
524 *
525 * Version of __napi_build_skb() that takes care of skb->head_frag
526 * and skb->pfmemalloc when the data is a page or page fragment.
527 *
528 * Returns a new &sk_buff on success, %NULL on allocation failure.
529 */
530struct sk_buff *napi_build_skb(void *data, unsigned int frag_size)
531{
532 struct sk_buff *skb = __napi_build_skb(data, frag_size);
533
534 if (likely(skb) && frag_size) {
535 skb->head_frag = 1;
536 skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
537 }
538
539 return skb;
540}
541EXPORT_SYMBOL(napi_build_skb);
542
543/*
544 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
545 * the caller if emergency pfmemalloc reserves are being used. If it is and
546 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
547 * may be used. Otherwise, the packet data may be discarded until enough
548 * memory is free
549 */
550static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
551 bool *pfmemalloc)
552{
553 bool ret_pfmemalloc = false;
554 size_t obj_size;
555 void *obj;
556
557 obj_size = SKB_HEAD_ALIGN(*size);
558 if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
559 !(flags & KMALLOC_NOT_NORMAL_BITS)) {
560 obj = kmem_cache_alloc_node(skb_small_head_cache,
561 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
562 node);
563 *size = SKB_SMALL_HEAD_CACHE_SIZE;
564 if (obj || !(gfp_pfmemalloc_allowed(flags)))
565 goto out;
566 /* Try again but now we are using pfmemalloc reserves */
567 ret_pfmemalloc = true;
568 obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
569 goto out;
570 }
571
572 obj_size = kmalloc_size_roundup(obj_size);
573 /* The following cast might truncate high-order bits of obj_size, this
574 * is harmless because kmalloc(obj_size >= 2^32) will fail anyway.
575 */
576 *size = (unsigned int)obj_size;
577
578 /*
579 * Try a regular allocation, when that fails and we're not entitled
580 * to the reserves, fail.
581 */
582 obj = kmalloc_node_track_caller(obj_size,
583 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
584 node);
585 if (obj || !(gfp_pfmemalloc_allowed(flags)))
586 goto out;
587
588 /* Try again but now we are using pfmemalloc reserves */
589 ret_pfmemalloc = true;
590 obj = kmalloc_node_track_caller(obj_size, flags, node);
591
592out:
593 if (pfmemalloc)
594 *pfmemalloc = ret_pfmemalloc;
595
596 return obj;
597}
598
599/* Allocate a new skbuff. We do this ourselves so we can fill in a few
600 * 'private' fields and also do memory statistics to find all the
601 * [BEEP] leaks.
602 *
603 */
604
605/**
606 * __alloc_skb - allocate a network buffer
607 * @size: size to allocate
608 * @gfp_mask: allocation mask
609 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
610 * instead of head cache and allocate a cloned (child) skb.
611 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
612 * allocations in case the data is required for writeback
613 * @node: numa node to allocate memory on
614 *
615 * Allocate a new &sk_buff. The returned buffer has no headroom and a
616 * tail room of at least size bytes. The object has a reference count
617 * of one. The return is the buffer. On a failure the return is %NULL.
618 *
619 * Buffers may only be allocated from interrupts using a @gfp_mask of
620 * %GFP_ATOMIC.
621 */
622struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
623 int flags, int node)
624{
625 struct kmem_cache *cache;
626 struct sk_buff *skb;
627 bool pfmemalloc;
628 u8 *data;
629
630 cache = (flags & SKB_ALLOC_FCLONE)
631 ? skbuff_fclone_cache : skbuff_cache;
632
633 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
634 gfp_mask |= __GFP_MEMALLOC;
635
636 /* Get the HEAD */
637 if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI &&
638 likely(node == NUMA_NO_NODE || node == numa_mem_id()))
639 skb = napi_skb_cache_get();
640 else
641 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
642 if (unlikely(!skb))
643 return NULL;
644 prefetchw(skb);
645
646 /* We do our best to align skb_shared_info on a separate cache
647 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
648 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
649 * Both skb->head and skb_shared_info are cache line aligned.
650 */
651 data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
652 if (unlikely(!data))
653 goto nodata;
654 /* kmalloc_size_roundup() might give us more room than requested.
655 * Put skb_shared_info exactly at the end of allocated zone,
656 * to allow max possible filling before reallocation.
657 */
658 prefetchw(data + SKB_WITH_OVERHEAD(size));
659
660 /*
661 * Only clear those fields we need to clear, not those that we will
662 * actually initialise below. Hence, don't put any more fields after
663 * the tail pointer in struct sk_buff!
664 */
665 memset(skb, 0, offsetof(struct sk_buff, tail));
666 __build_skb_around(skb, data, size);
667 skb->pfmemalloc = pfmemalloc;
668
669 if (flags & SKB_ALLOC_FCLONE) {
670 struct sk_buff_fclones *fclones;
671
672 fclones = container_of(skb, struct sk_buff_fclones, skb1);
673
674 skb->fclone = SKB_FCLONE_ORIG;
675 refcount_set(&fclones->fclone_ref, 1);
676 }
677
678 return skb;
679
680nodata:
681 kmem_cache_free(cache, skb);
682 return NULL;
683}
684EXPORT_SYMBOL(__alloc_skb);
685
686/**
687 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
688 * @dev: network device to receive on
689 * @len: length to allocate
690 * @gfp_mask: get_free_pages mask, passed to alloc_skb
691 *
692 * Allocate a new &sk_buff and assign it a usage count of one. The
693 * buffer has NET_SKB_PAD headroom built in. Users should allocate
694 * the headroom they think they need without accounting for the
695 * built in space. The built in space is used for optimisations.
696 *
697 * %NULL is returned if there is no free memory.
698 */
699struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
700 gfp_t gfp_mask)
701{
702 struct page_frag_cache *nc;
703 struct sk_buff *skb;
704 bool pfmemalloc;
705 void *data;
706
707 len += NET_SKB_PAD;
708
709 /* If requested length is either too small or too big,
710 * we use kmalloc() for skb->head allocation.
711 */
712 if (len <= SKB_WITH_OVERHEAD(1024) ||
713 len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
714 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
715 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
716 if (!skb)
717 goto skb_fail;
718 goto skb_success;
719 }
720
721 len = SKB_HEAD_ALIGN(len);
722
723 if (sk_memalloc_socks())
724 gfp_mask |= __GFP_MEMALLOC;
725
726 if (in_hardirq() || irqs_disabled()) {
727 nc = this_cpu_ptr(&netdev_alloc_cache);
728 data = page_frag_alloc(nc, len, gfp_mask);
729 pfmemalloc = nc->pfmemalloc;
730 } else {
731 local_bh_disable();
732 nc = this_cpu_ptr(&napi_alloc_cache.page);
733 data = page_frag_alloc(nc, len, gfp_mask);
734 pfmemalloc = nc->pfmemalloc;
735 local_bh_enable();
736 }
737
738 if (unlikely(!data))
739 return NULL;
740
741 skb = __build_skb(data, len);
742 if (unlikely(!skb)) {
743 skb_free_frag(data);
744 return NULL;
745 }
746
747 if (pfmemalloc)
748 skb->pfmemalloc = 1;
749 skb->head_frag = 1;
750
751skb_success:
752 skb_reserve(skb, NET_SKB_PAD);
753 skb->dev = dev;
754
755skb_fail:
756 return skb;
757}
758EXPORT_SYMBOL(__netdev_alloc_skb);
759
760/**
761 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
762 * @napi: napi instance this buffer was allocated for
763 * @len: length to allocate
764 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
765 *
766 * Allocate a new sk_buff for use in NAPI receive. This buffer will
767 * attempt to allocate the head from a special reserved region used
768 * only for NAPI Rx allocation. By doing this we can save several
769 * CPU cycles by avoiding having to disable and re-enable IRQs.
770 *
771 * %NULL is returned if there is no free memory.
772 */
773struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
774 gfp_t gfp_mask)
775{
776 struct napi_alloc_cache *nc;
777 struct sk_buff *skb;
778 bool pfmemalloc;
779 void *data;
780
781 DEBUG_NET_WARN_ON_ONCE(!in_softirq());
782 len += NET_SKB_PAD + NET_IP_ALIGN;
783
784 /* If requested length is either too small or too big,
785 * we use kmalloc() for skb->head allocation.
786 * When the small frag allocator is available, prefer it over kmalloc
787 * for small fragments
788 */
789 if ((!NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) ||
790 len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
791 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
792 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
793 NUMA_NO_NODE);
794 if (!skb)
795 goto skb_fail;
796 goto skb_success;
797 }
798
799 nc = this_cpu_ptr(&napi_alloc_cache);
800
801 if (sk_memalloc_socks())
802 gfp_mask |= __GFP_MEMALLOC;
803
804 if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) {
805 /* we are artificially inflating the allocation size, but
806 * that is not as bad as it may look like, as:
807 * - 'len' less than GRO_MAX_HEAD makes little sense
808 * - On most systems, larger 'len' values lead to fragment
809 * size above 512 bytes
810 * - kmalloc would use the kmalloc-1k slab for such values
811 * - Builds with smaller GRO_MAX_HEAD will very likely do
812 * little networking, as that implies no WiFi and no
813 * tunnels support, and 32 bits arches.
814 */
815 len = SZ_1K;
816
817 data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
818 pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
819 } else {
820 len = SKB_HEAD_ALIGN(len);
821
822 data = page_frag_alloc(&nc->page, len, gfp_mask);
823 pfmemalloc = nc->page.pfmemalloc;
824 }
825
826 if (unlikely(!data))
827 return NULL;
828
829 skb = __napi_build_skb(data, len);
830 if (unlikely(!skb)) {
831 skb_free_frag(data);
832 return NULL;
833 }
834
835 if (pfmemalloc)
836 skb->pfmemalloc = 1;
837 skb->head_frag = 1;
838
839skb_success:
840 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
841 skb->dev = napi->dev;
842
843skb_fail:
844 return skb;
845}
846EXPORT_SYMBOL(__napi_alloc_skb);
847
848void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
849 int size, unsigned int truesize)
850{
851 DEBUG_NET_WARN_ON_ONCE(size > truesize);
852
853 skb_fill_page_desc(skb, i, page, off, size);
854 skb->len += size;
855 skb->data_len += size;
856 skb->truesize += truesize;
857}
858EXPORT_SYMBOL(skb_add_rx_frag);
859
860void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
861 unsigned int truesize)
862{
863 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
864
865 DEBUG_NET_WARN_ON_ONCE(size > truesize);
866
867 skb_frag_size_add(frag, size);
868 skb->len += size;
869 skb->data_len += size;
870 skb->truesize += truesize;
871}
872EXPORT_SYMBOL(skb_coalesce_rx_frag);
873
874static void skb_drop_list(struct sk_buff **listp)
875{
876 kfree_skb_list(*listp);
877 *listp = NULL;
878}
879
880static inline void skb_drop_fraglist(struct sk_buff *skb)
881{
882 skb_drop_list(&skb_shinfo(skb)->frag_list);
883}
884
885static void skb_clone_fraglist(struct sk_buff *skb)
886{
887 struct sk_buff *list;
888
889 skb_walk_frags(skb, list)
890 skb_get(list);
891}
892
893#if IS_ENABLED(CONFIG_PAGE_POOL)
894bool napi_pp_put_page(struct page *page, bool napi_safe)
895{
896 bool allow_direct = false;
897 struct page_pool *pp;
898
899 page = compound_head(page);
900
901 /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation
902 * in order to preserve any existing bits, such as bit 0 for the
903 * head page of compound page and bit 1 for pfmemalloc page, so
904 * mask those bits for freeing side when doing below checking,
905 * and page_is_pfmemalloc() is checked in __page_pool_put_page()
906 * to avoid recycling the pfmemalloc page.
907 */
908 if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
909 return false;
910
911 pp = page->pp;
912
913 /* Allow direct recycle if we have reasons to believe that we are
914 * in the same context as the consumer would run, so there's
915 * no possible race.
916 * __page_pool_put_page() makes sure we're not in hardirq context
917 * and interrupts are enabled prior to accessing the cache.
918 */
919 if (napi_safe || in_softirq()) {
920 const struct napi_struct *napi = READ_ONCE(pp->p.napi);
921
922 allow_direct = napi &&
923 READ_ONCE(napi->list_owner) == smp_processor_id();
924 }
925
926 /* Driver set this to memory recycling info. Reset it on recycle.
927 * This will *not* work for NIC using a split-page memory model.
928 * The page will be returned to the pool here regardless of the
929 * 'flipped' fragment being in use or not.
930 */
931 page_pool_put_full_page(pp, page, allow_direct);
932
933 return true;
934}
935EXPORT_SYMBOL(napi_pp_put_page);
936#endif
937
938static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe)
939{
940 if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
941 return false;
942 return napi_pp_put_page(virt_to_page(data), napi_safe);
943}
944
945static void skb_kfree_head(void *head, unsigned int end_offset)
946{
947 if (end_offset == SKB_SMALL_HEAD_HEADROOM)
948 kmem_cache_free(skb_small_head_cache, head);
949 else
950 kfree(head);
951}
952
953static void skb_free_head(struct sk_buff *skb, bool napi_safe)
954{
955 unsigned char *head = skb->head;
956
957 if (skb->head_frag) {
958 if (skb_pp_recycle(skb, head, napi_safe))
959 return;
960 skb_free_frag(head);
961 } else {
962 skb_kfree_head(head, skb_end_offset(skb));
963 }
964}
965
966static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
967 bool napi_safe)
968{
969 struct skb_shared_info *shinfo = skb_shinfo(skb);
970 int i;
971
972 if (skb->cloned &&
973 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
974 &shinfo->dataref))
975 goto exit;
976
977 if (skb_zcopy(skb)) {
978 bool skip_unref = shinfo->flags & SKBFL_MANAGED_FRAG_REFS;
979
980 skb_zcopy_clear(skb, true);
981 if (skip_unref)
982 goto free_head;
983 }
984
985 for (i = 0; i < shinfo->nr_frags; i++)
986 napi_frag_unref(&shinfo->frags[i], skb->pp_recycle, napi_safe);
987
988free_head:
989 if (shinfo->frag_list)
990 kfree_skb_list_reason(shinfo->frag_list, reason);
991
992 skb_free_head(skb, napi_safe);
993exit:
994 /* When we clone an SKB we copy the reycling bit. The pp_recycle
995 * bit is only set on the head though, so in order to avoid races
996 * while trying to recycle fragments on __skb_frag_unref() we need
997 * to make one SKB responsible for triggering the recycle path.
998 * So disable the recycling bit if an SKB is cloned and we have
999 * additional references to the fragmented part of the SKB.
1000 * Eventually the last SKB will have the recycling bit set and it's
1001 * dataref set to 0, which will trigger the recycling
1002 */
1003 skb->pp_recycle = 0;
1004}
1005
1006/*
1007 * Free an skbuff by memory without cleaning the state.
1008 */
1009static void kfree_skbmem(struct sk_buff *skb)
1010{
1011 struct sk_buff_fclones *fclones;
1012
1013 switch (skb->fclone) {
1014 case SKB_FCLONE_UNAVAILABLE:
1015 kmem_cache_free(skbuff_cache, skb);
1016 return;
1017
1018 case SKB_FCLONE_ORIG:
1019 fclones = container_of(skb, struct sk_buff_fclones, skb1);
1020
1021 /* We usually free the clone (TX completion) before original skb
1022 * This test would have no chance to be true for the clone,
1023 * while here, branch prediction will be good.
1024 */
1025 if (refcount_read(&fclones->fclone_ref) == 1)
1026 goto fastpath;
1027 break;
1028
1029 default: /* SKB_FCLONE_CLONE */
1030 fclones = container_of(skb, struct sk_buff_fclones, skb2);
1031 break;
1032 }
1033 if (!refcount_dec_and_test(&fclones->fclone_ref))
1034 return;
1035fastpath:
1036 kmem_cache_free(skbuff_fclone_cache, fclones);
1037}
1038
1039void skb_release_head_state(struct sk_buff *skb)
1040{
1041 skb_dst_drop(skb);
1042 if (skb->destructor) {
1043 DEBUG_NET_WARN_ON_ONCE(in_hardirq());
1044 skb->destructor(skb);
1045 }
1046#if IS_ENABLED(CONFIG_NF_CONNTRACK)
1047 nf_conntrack_put(skb_nfct(skb));
1048#endif
1049 skb_ext_put(skb);
1050}
1051
1052/* Free everything but the sk_buff shell. */
1053static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason,
1054 bool napi_safe)
1055{
1056 skb_release_head_state(skb);
1057 if (likely(skb->head))
1058 skb_release_data(skb, reason, napi_safe);
1059}
1060
1061/**
1062 * __kfree_skb - private function
1063 * @skb: buffer
1064 *
1065 * Free an sk_buff. Release anything attached to the buffer.
1066 * Clean the state. This is an internal helper function. Users should
1067 * always call kfree_skb
1068 */
1069
1070void __kfree_skb(struct sk_buff *skb)
1071{
1072 skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED, false);
1073 kfree_skbmem(skb);
1074}
1075EXPORT_SYMBOL(__kfree_skb);
1076
1077static __always_inline
1078bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
1079{
1080 if (unlikely(!skb_unref(skb)))
1081 return false;
1082
1083 DEBUG_NET_WARN_ON_ONCE(reason == SKB_NOT_DROPPED_YET ||
1084 u32_get_bits(reason,
1085 SKB_DROP_REASON_SUBSYS_MASK) >=
1086 SKB_DROP_REASON_SUBSYS_NUM);
1087
1088 if (reason == SKB_CONSUMED)
1089 trace_consume_skb(skb, __builtin_return_address(0));
1090 else
1091 trace_kfree_skb(skb, __builtin_return_address(0), reason);
1092 return true;
1093}
1094
1095/**
1096 * kfree_skb_reason - free an sk_buff with special reason
1097 * @skb: buffer to free
1098 * @reason: reason why this skb is dropped
1099 *
1100 * Drop a reference to the buffer and free it if the usage count has
1101 * hit zero. Meanwhile, pass the drop reason to 'kfree_skb'
1102 * tracepoint.
1103 */
1104void __fix_address
1105kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason)
1106{
1107 if (__kfree_skb_reason(skb, reason))
1108 __kfree_skb(skb);
1109}
1110EXPORT_SYMBOL(kfree_skb_reason);
1111
1112#define KFREE_SKB_BULK_SIZE 16
1113
1114struct skb_free_array {
1115 unsigned int skb_count;
1116 void *skb_array[KFREE_SKB_BULK_SIZE];
1117};
1118
1119static void kfree_skb_add_bulk(struct sk_buff *skb,
1120 struct skb_free_array *sa,
1121 enum skb_drop_reason reason)
1122{
1123 /* if SKB is a clone, don't handle this case */
1124 if (unlikely(skb->fclone != SKB_FCLONE_UNAVAILABLE)) {
1125 __kfree_skb(skb);
1126 return;
1127 }
1128
1129 skb_release_all(skb, reason, false);
1130 sa->skb_array[sa->skb_count++] = skb;
1131
1132 if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) {
1133 kmem_cache_free_bulk(skbuff_cache, KFREE_SKB_BULK_SIZE,
1134 sa->skb_array);
1135 sa->skb_count = 0;
1136 }
1137}
1138
1139void __fix_address
1140kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason)
1141{
1142 struct skb_free_array sa;
1143
1144 sa.skb_count = 0;
1145
1146 while (segs) {
1147 struct sk_buff *next = segs->next;
1148
1149 if (__kfree_skb_reason(segs, reason)) {
1150 skb_poison_list(segs);
1151 kfree_skb_add_bulk(segs, &sa, reason);
1152 }
1153
1154 segs = next;
1155 }
1156
1157 if (sa.skb_count)
1158 kmem_cache_free_bulk(skbuff_cache, sa.skb_count, sa.skb_array);
1159}
1160EXPORT_SYMBOL(kfree_skb_list_reason);
1161
1162/* Dump skb information and contents.
1163 *
1164 * Must only be called from net_ratelimit()-ed paths.
1165 *
1166 * Dumps whole packets if full_pkt, only headers otherwise.
1167 */
1168void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
1169{
1170 struct skb_shared_info *sh = skb_shinfo(skb);
1171 struct net_device *dev = skb->dev;
1172 struct sock *sk = skb->sk;
1173 struct sk_buff *list_skb;
1174 bool has_mac, has_trans;
1175 int headroom, tailroom;
1176 int i, len, seg_len;
1177
1178 if (full_pkt)
1179 len = skb->len;
1180 else
1181 len = min_t(int, skb->len, MAX_HEADER + 128);
1182
1183 headroom = skb_headroom(skb);
1184 tailroom = skb_tailroom(skb);
1185
1186 has_mac = skb_mac_header_was_set(skb);
1187 has_trans = skb_transport_header_was_set(skb);
1188
1189 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
1190 "mac=(%d,%d) net=(%d,%d) trans=%d\n"
1191 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
1192 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
1193 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
1194 level, skb->len, headroom, skb_headlen(skb), tailroom,
1195 has_mac ? skb->mac_header : -1,
1196 has_mac ? skb_mac_header_len(skb) : -1,
1197 skb->network_header,
1198 has_trans ? skb_network_header_len(skb) : -1,
1199 has_trans ? skb->transport_header : -1,
1200 sh->tx_flags, sh->nr_frags,
1201 sh->gso_size, sh->gso_type, sh->gso_segs,
1202 skb->csum, skb->ip_summed, skb->csum_complete_sw,
1203 skb->csum_valid, skb->csum_level,
1204 skb->hash, skb->sw_hash, skb->l4_hash,
1205 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
1206
1207 if (dev)
1208 printk("%sdev name=%s feat=%pNF\n",
1209 level, dev->name, &dev->features);
1210 if (sk)
1211 printk("%ssk family=%hu type=%u proto=%u\n",
1212 level, sk->sk_family, sk->sk_type, sk->sk_protocol);
1213
1214 if (full_pkt && headroom)
1215 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
1216 16, 1, skb->head, headroom, false);
1217
1218 seg_len = min_t(int, skb_headlen(skb), len);
1219 if (seg_len)
1220 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET,
1221 16, 1, skb->data, seg_len, false);
1222 len -= seg_len;
1223
1224 if (full_pkt && tailroom)
1225 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET,
1226 16, 1, skb_tail_pointer(skb), tailroom, false);
1227
1228 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
1229 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1230 u32 p_off, p_len, copied;
1231 struct page *p;
1232 u8 *vaddr;
1233
1234 skb_frag_foreach_page(frag, skb_frag_off(frag),
1235 skb_frag_size(frag), p, p_off, p_len,
1236 copied) {
1237 seg_len = min_t(int, p_len, len);
1238 vaddr = kmap_atomic(p);
1239 print_hex_dump(level, "skb frag: ",
1240 DUMP_PREFIX_OFFSET,
1241 16, 1, vaddr + p_off, seg_len, false);
1242 kunmap_atomic(vaddr);
1243 len -= seg_len;
1244 if (!len)
1245 break;
1246 }
1247 }
1248
1249 if (full_pkt && skb_has_frag_list(skb)) {
1250 printk("skb fraglist:\n");
1251 skb_walk_frags(skb, list_skb)
1252 skb_dump(level, list_skb, true);
1253 }
1254}
1255EXPORT_SYMBOL(skb_dump);
1256
1257/**
1258 * skb_tx_error - report an sk_buff xmit error
1259 * @skb: buffer that triggered an error
1260 *
1261 * Report xmit error if a device callback is tracking this skb.
1262 * skb must be freed afterwards.
1263 */
1264void skb_tx_error(struct sk_buff *skb)
1265{
1266 if (skb) {
1267 skb_zcopy_downgrade_managed(skb);
1268 skb_zcopy_clear(skb, true);
1269 }
1270}
1271EXPORT_SYMBOL(skb_tx_error);
1272
1273#ifdef CONFIG_TRACEPOINTS
1274/**
1275 * consume_skb - free an skbuff
1276 * @skb: buffer to free
1277 *
1278 * Drop a ref to the buffer and free it if the usage count has hit zero
1279 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
1280 * is being dropped after a failure and notes that
1281 */
1282void consume_skb(struct sk_buff *skb)
1283{
1284 if (!skb_unref(skb))
1285 return;
1286
1287 trace_consume_skb(skb, __builtin_return_address(0));
1288 __kfree_skb(skb);
1289}
1290EXPORT_SYMBOL(consume_skb);
1291#endif
1292
1293/**
1294 * __consume_stateless_skb - free an skbuff, assuming it is stateless
1295 * @skb: buffer to free
1296 *
1297 * Alike consume_skb(), but this variant assumes that this is the last
1298 * skb reference and all the head states have been already dropped
1299 */
1300void __consume_stateless_skb(struct sk_buff *skb)
1301{
1302 trace_consume_skb(skb, __builtin_return_address(0));
1303 skb_release_data(skb, SKB_CONSUMED, false);
1304 kfree_skbmem(skb);
1305}
1306
1307static void napi_skb_cache_put(struct sk_buff *skb)
1308{
1309 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
1310 u32 i;
1311
1312 kasan_poison_object_data(skbuff_cache, skb);
1313 nc->skb_cache[nc->skb_count++] = skb;
1314
1315 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
1316 for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
1317 kasan_unpoison_object_data(skbuff_cache,
1318 nc->skb_cache[i]);
1319
1320 kmem_cache_free_bulk(skbuff_cache, NAPI_SKB_CACHE_HALF,
1321 nc->skb_cache + NAPI_SKB_CACHE_HALF);
1322 nc->skb_count = NAPI_SKB_CACHE_HALF;
1323 }
1324}
1325
1326void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason)
1327{
1328 skb_release_all(skb, reason, true);
1329 napi_skb_cache_put(skb);
1330}
1331
1332void napi_skb_free_stolen_head(struct sk_buff *skb)
1333{
1334 if (unlikely(skb->slow_gro)) {
1335 nf_reset_ct(skb);
1336 skb_dst_drop(skb);
1337 skb_ext_put(skb);
1338 skb_orphan(skb);
1339 skb->slow_gro = 0;
1340 }
1341 napi_skb_cache_put(skb);
1342}
1343
1344void napi_consume_skb(struct sk_buff *skb, int budget)
1345{
1346 /* Zero budget indicate non-NAPI context called us, like netpoll */
1347 if (unlikely(!budget)) {
1348 dev_consume_skb_any(skb);
1349 return;
1350 }
1351
1352 DEBUG_NET_WARN_ON_ONCE(!in_softirq());
1353
1354 if (!skb_unref(skb))
1355 return;
1356
1357 /* if reaching here SKB is ready to free */
1358 trace_consume_skb(skb, __builtin_return_address(0));
1359
1360 /* if SKB is a clone, don't handle this case */
1361 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
1362 __kfree_skb(skb);
1363 return;
1364 }
1365
1366 skb_release_all(skb, SKB_CONSUMED, !!budget);
1367 napi_skb_cache_put(skb);
1368}
1369EXPORT_SYMBOL(napi_consume_skb);
1370
1371/* Make sure a field is contained by headers group */
1372#define CHECK_SKB_FIELD(field) \
1373 BUILD_BUG_ON(offsetof(struct sk_buff, field) != \
1374 offsetof(struct sk_buff, headers.field)); \
1375
1376static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1377{
1378 new->tstamp = old->tstamp;
1379 /* We do not copy old->sk */
1380 new->dev = old->dev;
1381 memcpy(new->cb, old->cb, sizeof(old->cb));
1382 skb_dst_copy(new, old);
1383 __skb_ext_copy(new, old);
1384 __nf_copy(new, old, false);
1385
1386 /* Note : this field could be in the headers group.
1387 * It is not yet because we do not want to have a 16 bit hole
1388 */
1389 new->queue_mapping = old->queue_mapping;
1390
1391 memcpy(&new->headers, &old->headers, sizeof(new->headers));
1392 CHECK_SKB_FIELD(protocol);
1393 CHECK_SKB_FIELD(csum);
1394 CHECK_SKB_FIELD(hash);
1395 CHECK_SKB_FIELD(priority);
1396 CHECK_SKB_FIELD(skb_iif);
1397 CHECK_SKB_FIELD(vlan_proto);
1398 CHECK_SKB_FIELD(vlan_tci);
1399 CHECK_SKB_FIELD(transport_header);
1400 CHECK_SKB_FIELD(network_header);
1401 CHECK_SKB_FIELD(mac_header);
1402 CHECK_SKB_FIELD(inner_protocol);
1403 CHECK_SKB_FIELD(inner_transport_header);
1404 CHECK_SKB_FIELD(inner_network_header);
1405 CHECK_SKB_FIELD(inner_mac_header);
1406 CHECK_SKB_FIELD(mark);
1407#ifdef CONFIG_NETWORK_SECMARK
1408 CHECK_SKB_FIELD(secmark);
1409#endif
1410#ifdef CONFIG_NET_RX_BUSY_POLL
1411 CHECK_SKB_FIELD(napi_id);
1412#endif
1413 CHECK_SKB_FIELD(alloc_cpu);
1414#ifdef CONFIG_XPS
1415 CHECK_SKB_FIELD(sender_cpu);
1416#endif
1417#ifdef CONFIG_NET_SCHED
1418 CHECK_SKB_FIELD(tc_index);
1419#endif
1420
1421}
1422
1423/*
1424 * You should not add any new code to this function. Add it to
1425 * __copy_skb_header above instead.
1426 */
1427static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
1428{
1429#define C(x) n->x = skb->x
1430
1431 n->next = n->prev = NULL;
1432 n->sk = NULL;
1433 __copy_skb_header(n, skb);
1434
1435 C(len);
1436 C(data_len);
1437 C(mac_len);
1438 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
1439 n->cloned = 1;
1440 n->nohdr = 0;
1441 n->peeked = 0;
1442 C(pfmemalloc);
1443 C(pp_recycle);
1444 n->destructor = NULL;
1445 C(tail);
1446 C(end);
1447 C(head);
1448 C(head_frag);
1449 C(data);
1450 C(truesize);
1451 refcount_set(&n->users, 1);
1452
1453 atomic_inc(&(skb_shinfo(skb)->dataref));
1454 skb->cloned = 1;
1455
1456 return n;
1457#undef C
1458}
1459
1460/**
1461 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1462 * @first: first sk_buff of the msg
1463 */
1464struct sk_buff *alloc_skb_for_msg(struct sk_buff *first)
1465{
1466 struct sk_buff *n;
1467
1468 n = alloc_skb(0, GFP_ATOMIC);
1469 if (!n)
1470 return NULL;
1471
1472 n->len = first->len;
1473 n->data_len = first->len;
1474 n->truesize = first->truesize;
1475
1476 skb_shinfo(n)->frag_list = first;
1477
1478 __copy_skb_header(n, first);
1479 n->destructor = NULL;
1480
1481 return n;
1482}
1483EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
1484
1485/**
1486 * skb_morph - morph one skb into another
1487 * @dst: the skb to receive the contents
1488 * @src: the skb to supply the contents
1489 *
1490 * This is identical to skb_clone except that the target skb is
1491 * supplied by the user.
1492 *
1493 * The target skb is returned upon exit.
1494 */
1495struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
1496{
1497 skb_release_all(dst, SKB_CONSUMED, false);
1498 return __skb_clone(dst, src);
1499}
1500EXPORT_SYMBOL_GPL(skb_morph);
1501
1502int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
1503{
1504 unsigned long max_pg, num_pg, new_pg, old_pg, rlim;
1505 struct user_struct *user;
1506
1507 if (capable(CAP_IPC_LOCK) || !size)
1508 return 0;
1509
1510 rlim = rlimit(RLIMIT_MEMLOCK);
1511 if (rlim == RLIM_INFINITY)
1512 return 0;
1513
1514 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */
1515 max_pg = rlim >> PAGE_SHIFT;
1516 user = mmp->user ? : current_user();
1517
1518 old_pg = atomic_long_read(&user->locked_vm);
1519 do {
1520 new_pg = old_pg + num_pg;
1521 if (new_pg > max_pg)
1522 return -ENOBUFS;
1523 } while (!atomic_long_try_cmpxchg(&user->locked_vm, &old_pg, new_pg));
1524
1525 if (!mmp->user) {
1526 mmp->user = get_uid(user);
1527 mmp->num_pg = num_pg;
1528 } else {
1529 mmp->num_pg += num_pg;
1530 }
1531
1532 return 0;
1533}
1534EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
1535
1536void mm_unaccount_pinned_pages(struct mmpin *mmp)
1537{
1538 if (mmp->user) {
1539 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
1540 free_uid(mmp->user);
1541 }
1542}
1543EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
1544
1545static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size)
1546{
1547 struct ubuf_info_msgzc *uarg;
1548 struct sk_buff *skb;
1549
1550 WARN_ON_ONCE(!in_task());
1551
1552 skb = sock_omalloc(sk, 0, GFP_KERNEL);
1553 if (!skb)
1554 return NULL;
1555
1556 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
1557 uarg = (void *)skb->cb;
1558 uarg->mmp.user = NULL;
1559
1560 if (mm_account_pinned_pages(&uarg->mmp, size)) {
1561 kfree_skb(skb);
1562 return NULL;
1563 }
1564
1565 uarg->ubuf.callback = msg_zerocopy_callback;
1566 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
1567 uarg->len = 1;
1568 uarg->bytelen = size;
1569 uarg->zerocopy = 1;
1570 uarg->ubuf.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
1571 refcount_set(&uarg->ubuf.refcnt, 1);
1572 sock_hold(sk);
1573
1574 return &uarg->ubuf;
1575}
1576
1577static inline struct sk_buff *skb_from_uarg(struct ubuf_info_msgzc *uarg)
1578{
1579 return container_of((void *)uarg, struct sk_buff, cb);
1580}
1581
1582struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
1583 struct ubuf_info *uarg)
1584{
1585 if (uarg) {
1586 struct ubuf_info_msgzc *uarg_zc;
1587 const u32 byte_limit = 1 << 19; /* limit to a few TSO */
1588 u32 bytelen, next;
1589
1590 /* there might be non MSG_ZEROCOPY users */
1591 if (uarg->callback != msg_zerocopy_callback)
1592 return NULL;
1593
1594 /* realloc only when socket is locked (TCP, UDP cork),
1595 * so uarg->len and sk_zckey access is serialized
1596 */
1597 if (!sock_owned_by_user(sk)) {
1598 WARN_ON_ONCE(1);
1599 return NULL;
1600 }
1601
1602 uarg_zc = uarg_to_msgzc(uarg);
1603 bytelen = uarg_zc->bytelen + size;
1604 if (uarg_zc->len == USHRT_MAX - 1 || bytelen > byte_limit) {
1605 /* TCP can create new skb to attach new uarg */
1606 if (sk->sk_type == SOCK_STREAM)
1607 goto new_alloc;
1608 return NULL;
1609 }
1610
1611 next = (u32)atomic_read(&sk->sk_zckey);
1612 if ((u32)(uarg_zc->id + uarg_zc->len) == next) {
1613 if (mm_account_pinned_pages(&uarg_zc->mmp, size))
1614 return NULL;
1615 uarg_zc->len++;
1616 uarg_zc->bytelen = bytelen;
1617 atomic_set(&sk->sk_zckey, ++next);
1618
1619 /* no extra ref when appending to datagram (MSG_MORE) */
1620 if (sk->sk_type == SOCK_STREAM)
1621 net_zcopy_get(uarg);
1622
1623 return uarg;
1624 }
1625 }
1626
1627new_alloc:
1628 return msg_zerocopy_alloc(sk, size);
1629}
1630EXPORT_SYMBOL_GPL(msg_zerocopy_realloc);
1631
1632static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1633{
1634 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1635 u32 old_lo, old_hi;
1636 u64 sum_len;
1637
1638 old_lo = serr->ee.ee_info;
1639 old_hi = serr->ee.ee_data;
1640 sum_len = old_hi - old_lo + 1ULL + len;
1641
1642 if (sum_len >= (1ULL << 32))
1643 return false;
1644
1645 if (lo != old_hi + 1)
1646 return false;
1647
1648 serr->ee.ee_data += len;
1649 return true;
1650}
1651
1652static void __msg_zerocopy_callback(struct ubuf_info_msgzc *uarg)
1653{
1654 struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1655 struct sock_exterr_skb *serr;
1656 struct sock *sk = skb->sk;
1657 struct sk_buff_head *q;
1658 unsigned long flags;
1659 bool is_zerocopy;
1660 u32 lo, hi;
1661 u16 len;
1662
1663 mm_unaccount_pinned_pages(&uarg->mmp);
1664
1665 /* if !len, there was only 1 call, and it was aborted
1666 * so do not queue a completion notification
1667 */
1668 if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1669 goto release;
1670
1671 len = uarg->len;
1672 lo = uarg->id;
1673 hi = uarg->id + len - 1;
1674 is_zerocopy = uarg->zerocopy;
1675
1676 serr = SKB_EXT_ERR(skb);
1677 memset(serr, 0, sizeof(*serr));
1678 serr->ee.ee_errno = 0;
1679 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
1680 serr->ee.ee_data = hi;
1681 serr->ee.ee_info = lo;
1682 if (!is_zerocopy)
1683 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1684
1685 q = &sk->sk_error_queue;
1686 spin_lock_irqsave(&q->lock, flags);
1687 tail = skb_peek_tail(q);
1688 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1689 !skb_zerocopy_notify_extend(tail, lo, len)) {
1690 __skb_queue_tail(q, skb);
1691 skb = NULL;
1692 }
1693 spin_unlock_irqrestore(&q->lock, flags);
1694
1695 sk_error_report(sk);
1696
1697release:
1698 consume_skb(skb);
1699 sock_put(sk);
1700}
1701
1702void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
1703 bool success)
1704{
1705 struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg);
1706
1707 uarg_zc->zerocopy = uarg_zc->zerocopy & success;
1708
1709 if (refcount_dec_and_test(&uarg->refcnt))
1710 __msg_zerocopy_callback(uarg_zc);
1711}
1712EXPORT_SYMBOL_GPL(msg_zerocopy_callback);
1713
1714void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
1715{
1716 struct sock *sk = skb_from_uarg(uarg_to_msgzc(uarg))->sk;
1717
1718 atomic_dec(&sk->sk_zckey);
1719 uarg_to_msgzc(uarg)->len--;
1720
1721 if (have_uref)
1722 msg_zerocopy_callback(NULL, uarg, true);
1723}
1724EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort);
1725
1726int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1727 struct msghdr *msg, int len,
1728 struct ubuf_info *uarg)
1729{
1730 struct ubuf_info *orig_uarg = skb_zcopy(skb);
1731 int err, orig_len = skb->len;
1732
1733 /* An skb can only point to one uarg. This edge case happens when
1734 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1735 */
1736 if (orig_uarg && uarg != orig_uarg)
1737 return -EEXIST;
1738
1739 err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len);
1740 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1741 struct sock *save_sk = skb->sk;
1742
1743 /* Streams do not free skb on error. Reset to prev state. */
1744 iov_iter_revert(&msg->msg_iter, skb->len - orig_len);
1745 skb->sk = sk;
1746 ___pskb_trim(skb, orig_len);
1747 skb->sk = save_sk;
1748 return err;
1749 }
1750
1751 skb_zcopy_set(skb, uarg, NULL);
1752 return skb->len - orig_len;
1753}
1754EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1755
1756void __skb_zcopy_downgrade_managed(struct sk_buff *skb)
1757{
1758 int i;
1759
1760 skb_shinfo(skb)->flags &= ~SKBFL_MANAGED_FRAG_REFS;
1761 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1762 skb_frag_ref(skb, i);
1763}
1764EXPORT_SYMBOL_GPL(__skb_zcopy_downgrade_managed);
1765
1766static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1767 gfp_t gfp_mask)
1768{
1769 if (skb_zcopy(orig)) {
1770 if (skb_zcopy(nskb)) {
1771 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1772 if (!gfp_mask) {
1773 WARN_ON_ONCE(1);
1774 return -ENOMEM;
1775 }
1776 if (skb_uarg(nskb) == skb_uarg(orig))
1777 return 0;
1778 if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1779 return -EIO;
1780 }
1781 skb_zcopy_set(nskb, skb_uarg(orig), NULL);
1782 }
1783 return 0;
1784}
1785
1786/**
1787 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1788 * @skb: the skb to modify
1789 * @gfp_mask: allocation priority
1790 *
1791 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
1792 * It will copy all frags into kernel and drop the reference
1793 * to userspace pages.
1794 *
1795 * If this function is called from an interrupt gfp_mask() must be
1796 * %GFP_ATOMIC.
1797 *
1798 * Returns 0 on success or a negative error code on failure
1799 * to allocate kernel memory to copy to.
1800 */
1801int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1802{
1803 int num_frags = skb_shinfo(skb)->nr_frags;
1804 struct page *page, *head = NULL;
1805 int i, order, psize, new_frags;
1806 u32 d_off;
1807
1808 if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1809 return -EINVAL;
1810
1811 if (!num_frags)
1812 goto release;
1813
1814 /* We might have to allocate high order pages, so compute what minimum
1815 * page order is needed.
1816 */
1817 order = 0;
1818 while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb))
1819 order++;
1820 psize = (PAGE_SIZE << order);
1821
1822 new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order);
1823 for (i = 0; i < new_frags; i++) {
1824 page = alloc_pages(gfp_mask | __GFP_COMP, order);
1825 if (!page) {
1826 while (head) {
1827 struct page *next = (struct page *)page_private(head);
1828 put_page(head);
1829 head = next;
1830 }
1831 return -ENOMEM;
1832 }
1833 set_page_private(page, (unsigned long)head);
1834 head = page;
1835 }
1836
1837 page = head;
1838 d_off = 0;
1839 for (i = 0; i < num_frags; i++) {
1840 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1841 u32 p_off, p_len, copied;
1842 struct page *p;
1843 u8 *vaddr;
1844
1845 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
1846 p, p_off, p_len, copied) {
1847 u32 copy, done = 0;
1848 vaddr = kmap_atomic(p);
1849
1850 while (done < p_len) {
1851 if (d_off == psize) {
1852 d_off = 0;
1853 page = (struct page *)page_private(page);
1854 }
1855 copy = min_t(u32, psize - d_off, p_len - done);
1856 memcpy(page_address(page) + d_off,
1857 vaddr + p_off + done, copy);
1858 done += copy;
1859 d_off += copy;
1860 }
1861 kunmap_atomic(vaddr);
1862 }
1863 }
1864
1865 /* skb frags release userspace buffers */
1866 for (i = 0; i < num_frags; i++)
1867 skb_frag_unref(skb, i);
1868
1869 /* skb frags point to kernel buffers */
1870 for (i = 0; i < new_frags - 1; i++) {
1871 __skb_fill_page_desc(skb, i, head, 0, psize);
1872 head = (struct page *)page_private(head);
1873 }
1874 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1875 skb_shinfo(skb)->nr_frags = new_frags;
1876
1877release:
1878 skb_zcopy_clear(skb, false);
1879 return 0;
1880}
1881EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1882
1883/**
1884 * skb_clone - duplicate an sk_buff
1885 * @skb: buffer to clone
1886 * @gfp_mask: allocation priority
1887 *
1888 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
1889 * copies share the same packet data but not structure. The new
1890 * buffer has a reference count of 1. If the allocation fails the
1891 * function returns %NULL otherwise the new buffer is returned.
1892 *
1893 * If this function is called from an interrupt gfp_mask() must be
1894 * %GFP_ATOMIC.
1895 */
1896
1897struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1898{
1899 struct sk_buff_fclones *fclones = container_of(skb,
1900 struct sk_buff_fclones,
1901 skb1);
1902 struct sk_buff *n;
1903
1904 if (skb_orphan_frags(skb, gfp_mask))
1905 return NULL;
1906
1907 if (skb->fclone == SKB_FCLONE_ORIG &&
1908 refcount_read(&fclones->fclone_ref) == 1) {
1909 n = &fclones->skb2;
1910 refcount_set(&fclones->fclone_ref, 2);
1911 n->fclone = SKB_FCLONE_CLONE;
1912 } else {
1913 if (skb_pfmemalloc(skb))
1914 gfp_mask |= __GFP_MEMALLOC;
1915
1916 n = kmem_cache_alloc(skbuff_cache, gfp_mask);
1917 if (!n)
1918 return NULL;
1919
1920 n->fclone = SKB_FCLONE_UNAVAILABLE;
1921 }
1922
1923 return __skb_clone(n, skb);
1924}
1925EXPORT_SYMBOL(skb_clone);
1926
1927void skb_headers_offset_update(struct sk_buff *skb, int off)
1928{
1929 /* Only adjust this if it actually is csum_start rather than csum */
1930 if (skb->ip_summed == CHECKSUM_PARTIAL)
1931 skb->csum_start += off;
1932 /* {transport,network,mac}_header and tail are relative to skb->head */
1933 skb->transport_header += off;
1934 skb->network_header += off;
1935 if (skb_mac_header_was_set(skb))
1936 skb->mac_header += off;
1937 skb->inner_transport_header += off;
1938 skb->inner_network_header += off;
1939 skb->inner_mac_header += off;
1940}
1941EXPORT_SYMBOL(skb_headers_offset_update);
1942
1943void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
1944{
1945 __copy_skb_header(new, old);
1946
1947 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1948 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1949 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1950}
1951EXPORT_SYMBOL(skb_copy_header);
1952
1953static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1954{
1955 if (skb_pfmemalloc(skb))
1956 return SKB_ALLOC_RX;
1957 return 0;
1958}
1959
1960/**
1961 * skb_copy - create private copy of an sk_buff
1962 * @skb: buffer to copy
1963 * @gfp_mask: allocation priority
1964 *
1965 * Make a copy of both an &sk_buff and its data. This is used when the
1966 * caller wishes to modify the data and needs a private copy of the
1967 * data to alter. Returns %NULL on failure or the pointer to the buffer
1968 * on success. The returned buffer has a reference count of 1.
1969 *
1970 * As by-product this function converts non-linear &sk_buff to linear
1971 * one, so that &sk_buff becomes completely private and caller is allowed
1972 * to modify all the data of returned buffer. This means that this
1973 * function is not recommended for use in circumstances when only
1974 * header is going to be modified. Use pskb_copy() instead.
1975 */
1976
1977struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1978{
1979 int headerlen = skb_headroom(skb);
1980 unsigned int size = skb_end_offset(skb) + skb->data_len;
1981 struct sk_buff *n = __alloc_skb(size, gfp_mask,
1982 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1983
1984 if (!n)
1985 return NULL;
1986
1987 /* Set the data pointer */
1988 skb_reserve(n, headerlen);
1989 /* Set the tail pointer and length */
1990 skb_put(n, skb->len);
1991
1992 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
1993
1994 skb_copy_header(n, skb);
1995 return n;
1996}
1997EXPORT_SYMBOL(skb_copy);
1998
1999/**
2000 * __pskb_copy_fclone - create copy of an sk_buff with private head.
2001 * @skb: buffer to copy
2002 * @headroom: headroom of new skb
2003 * @gfp_mask: allocation priority
2004 * @fclone: if true allocate the copy of the skb from the fclone
2005 * cache instead of the head cache; it is recommended to set this
2006 * to true for the cases where the copy will likely be cloned
2007 *
2008 * Make a copy of both an &sk_buff and part of its data, located
2009 * in header. Fragmented data remain shared. This is used when
2010 * the caller wishes to modify only header of &sk_buff and needs
2011 * private copy of the header to alter. Returns %NULL on failure
2012 * or the pointer to the buffer on success.
2013 * The returned buffer has a reference count of 1.
2014 */
2015
2016struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
2017 gfp_t gfp_mask, bool fclone)
2018{
2019 unsigned int size = skb_headlen(skb) + headroom;
2020 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
2021 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
2022
2023 if (!n)
2024 goto out;
2025
2026 /* Set the data pointer */
2027 skb_reserve(n, headroom);
2028 /* Set the tail pointer and length */
2029 skb_put(n, skb_headlen(skb));
2030 /* Copy the bytes */
2031 skb_copy_from_linear_data(skb, n->data, n->len);
2032
2033 n->truesize += skb->data_len;
2034 n->data_len = skb->data_len;
2035 n->len = skb->len;
2036
2037 if (skb_shinfo(skb)->nr_frags) {
2038 int i;
2039
2040 if (skb_orphan_frags(skb, gfp_mask) ||
2041 skb_zerocopy_clone(n, skb, gfp_mask)) {
2042 kfree_skb(n);
2043 n = NULL;
2044 goto out;
2045 }
2046 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2047 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
2048 skb_frag_ref(skb, i);
2049 }
2050 skb_shinfo(n)->nr_frags = i;
2051 }
2052
2053 if (skb_has_frag_list(skb)) {
2054 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
2055 skb_clone_fraglist(n);
2056 }
2057
2058 skb_copy_header(n, skb);
2059out:
2060 return n;
2061}
2062EXPORT_SYMBOL(__pskb_copy_fclone);
2063
2064/**
2065 * pskb_expand_head - reallocate header of &sk_buff
2066 * @skb: buffer to reallocate
2067 * @nhead: room to add at head
2068 * @ntail: room to add at tail
2069 * @gfp_mask: allocation priority
2070 *
2071 * Expands (or creates identical copy, if @nhead and @ntail are zero)
2072 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
2073 * reference count of 1. Returns zero in the case of success or error,
2074 * if expansion failed. In the last case, &sk_buff is not changed.
2075 *
2076 * All the pointers pointing into skb header may change and must be
2077 * reloaded after call to this function.
2078 */
2079
2080int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
2081 gfp_t gfp_mask)
2082{
2083 unsigned int osize = skb_end_offset(skb);
2084 unsigned int size = osize + nhead + ntail;
2085 long off;
2086 u8 *data;
2087 int i;
2088
2089 BUG_ON(nhead < 0);
2090
2091 BUG_ON(skb_shared(skb));
2092
2093 skb_zcopy_downgrade_managed(skb);
2094
2095 if (skb_pfmemalloc(skb))
2096 gfp_mask |= __GFP_MEMALLOC;
2097
2098 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
2099 if (!data)
2100 goto nodata;
2101 size = SKB_WITH_OVERHEAD(size);
2102
2103 /* Copy only real data... and, alas, header. This should be
2104 * optimized for the cases when header is void.
2105 */
2106 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
2107
2108 memcpy((struct skb_shared_info *)(data + size),
2109 skb_shinfo(skb),
2110 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
2111
2112 /*
2113 * if shinfo is shared we must drop the old head gracefully, but if it
2114 * is not we can just drop the old head and let the existing refcount
2115 * be since all we did is relocate the values
2116 */
2117 if (skb_cloned(skb)) {
2118 if (skb_orphan_frags(skb, gfp_mask))
2119 goto nofrags;
2120 if (skb_zcopy(skb))
2121 refcount_inc(&skb_uarg(skb)->refcnt);
2122 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2123 skb_frag_ref(skb, i);
2124
2125 if (skb_has_frag_list(skb))
2126 skb_clone_fraglist(skb);
2127
2128 skb_release_data(skb, SKB_CONSUMED, false);
2129 } else {
2130 skb_free_head(skb, false);
2131 }
2132 off = (data + nhead) - skb->head;
2133
2134 skb->head = data;
2135 skb->head_frag = 0;
2136 skb->data += off;
2137
2138 skb_set_end_offset(skb, size);
2139#ifdef NET_SKBUFF_DATA_USES_OFFSET
2140 off = nhead;
2141#endif
2142 skb->tail += off;
2143 skb_headers_offset_update(skb, nhead);
2144 skb->cloned = 0;
2145 skb->hdr_len = 0;
2146 skb->nohdr = 0;
2147 atomic_set(&skb_shinfo(skb)->dataref, 1);
2148
2149 skb_metadata_clear(skb);
2150
2151 /* It is not generally safe to change skb->truesize.
2152 * For the moment, we really care of rx path, or
2153 * when skb is orphaned (not attached to a socket).
2154 */
2155 if (!skb->sk || skb->destructor == sock_edemux)
2156 skb->truesize += size - osize;
2157
2158 return 0;
2159
2160nofrags:
2161 skb_kfree_head(data, size);
2162nodata:
2163 return -ENOMEM;
2164}
2165EXPORT_SYMBOL(pskb_expand_head);
2166
2167/* Make private copy of skb with writable head and some headroom */
2168
2169struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
2170{
2171 struct sk_buff *skb2;
2172 int delta = headroom - skb_headroom(skb);
2173
2174 if (delta <= 0)
2175 skb2 = pskb_copy(skb, GFP_ATOMIC);
2176 else {
2177 skb2 = skb_clone(skb, GFP_ATOMIC);
2178 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
2179 GFP_ATOMIC)) {
2180 kfree_skb(skb2);
2181 skb2 = NULL;
2182 }
2183 }
2184 return skb2;
2185}
2186EXPORT_SYMBOL(skb_realloc_headroom);
2187
2188/* Note: We plan to rework this in linux-6.4 */
2189int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
2190{
2191 unsigned int saved_end_offset, saved_truesize;
2192 struct skb_shared_info *shinfo;
2193 int res;
2194
2195 saved_end_offset = skb_end_offset(skb);
2196 saved_truesize = skb->truesize;
2197
2198 res = pskb_expand_head(skb, 0, 0, pri);
2199 if (res)
2200 return res;
2201
2202 skb->truesize = saved_truesize;
2203
2204 if (likely(skb_end_offset(skb) == saved_end_offset))
2205 return 0;
2206
2207 /* We can not change skb->end if the original or new value
2208 * is SKB_SMALL_HEAD_HEADROOM, as it might break skb_kfree_head().
2209 */
2210 if (saved_end_offset == SKB_SMALL_HEAD_HEADROOM ||
2211 skb_end_offset(skb) == SKB_SMALL_HEAD_HEADROOM) {
2212 /* We think this path should not be taken.
2213 * Add a temporary trace to warn us just in case.
2214 */
2215 pr_err_once("__skb_unclone_keeptruesize() skb_end_offset() %u -> %u\n",
2216 saved_end_offset, skb_end_offset(skb));
2217 WARN_ON_ONCE(1);
2218 return 0;
2219 }
2220
2221 shinfo = skb_shinfo(skb);
2222
2223 /* We are about to change back skb->end,
2224 * we need to move skb_shinfo() to its new location.
2225 */
2226 memmove(skb->head + saved_end_offset,
2227 shinfo,
2228 offsetof(struct skb_shared_info, frags[shinfo->nr_frags]));
2229
2230 skb_set_end_offset(skb, saved_end_offset);
2231
2232 return 0;
2233}
2234
2235/**
2236 * skb_expand_head - reallocate header of &sk_buff
2237 * @skb: buffer to reallocate
2238 * @headroom: needed headroom
2239 *
2240 * Unlike skb_realloc_headroom, this one does not allocate a new skb
2241 * if possible; copies skb->sk to new skb as needed
2242 * and frees original skb in case of failures.
2243 *
2244 * It expect increased headroom and generates warning otherwise.
2245 */
2246
2247struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom)
2248{
2249 int delta = headroom - skb_headroom(skb);
2250 int osize = skb_end_offset(skb);
2251 struct sock *sk = skb->sk;
2252
2253 if (WARN_ONCE(delta <= 0,
2254 "%s is expecting an increase in the headroom", __func__))
2255 return skb;
2256
2257 delta = SKB_DATA_ALIGN(delta);
2258 /* pskb_expand_head() might crash, if skb is shared. */
2259 if (skb_shared(skb) || !is_skb_wmem(skb)) {
2260 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2261
2262 if (unlikely(!nskb))
2263 goto fail;
2264
2265 if (sk)
2266 skb_set_owner_w(nskb, sk);
2267 consume_skb(skb);
2268 skb = nskb;
2269 }
2270 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC))
2271 goto fail;
2272
2273 if (sk && is_skb_wmem(skb)) {
2274 delta = skb_end_offset(skb) - osize;
2275 refcount_add(delta, &sk->sk_wmem_alloc);
2276 skb->truesize += delta;
2277 }
2278 return skb;
2279
2280fail:
2281 kfree_skb(skb);
2282 return NULL;
2283}
2284EXPORT_SYMBOL(skb_expand_head);
2285
2286/**
2287 * skb_copy_expand - copy and expand sk_buff
2288 * @skb: buffer to copy
2289 * @newheadroom: new free bytes at head
2290 * @newtailroom: new free bytes at tail
2291 * @gfp_mask: allocation priority
2292 *
2293 * Make a copy of both an &sk_buff and its data and while doing so
2294 * allocate additional space.
2295 *
2296 * This is used when the caller wishes to modify the data and needs a
2297 * private copy of the data to alter as well as more space for new fields.
2298 * Returns %NULL on failure or the pointer to the buffer
2299 * on success. The returned buffer has a reference count of 1.
2300 *
2301 * You must pass %GFP_ATOMIC as the allocation priority if this function
2302 * is called from an interrupt.
2303 */
2304struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
2305 int newheadroom, int newtailroom,
2306 gfp_t gfp_mask)
2307{
2308 /*
2309 * Allocate the copy buffer
2310 */
2311 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
2312 gfp_mask, skb_alloc_rx_flag(skb),
2313 NUMA_NO_NODE);
2314 int oldheadroom = skb_headroom(skb);
2315 int head_copy_len, head_copy_off;
2316
2317 if (!n)
2318 return NULL;
2319
2320 skb_reserve(n, newheadroom);
2321
2322 /* Set the tail pointer and length */
2323 skb_put(n, skb->len);
2324
2325 head_copy_len = oldheadroom;
2326 head_copy_off = 0;
2327 if (newheadroom <= head_copy_len)
2328 head_copy_len = newheadroom;
2329 else
2330 head_copy_off = newheadroom - head_copy_len;
2331
2332 /* Copy the linear header and data. */
2333 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
2334 skb->len + head_copy_len));
2335
2336 skb_copy_header(n, skb);
2337
2338 skb_headers_offset_update(n, newheadroom - oldheadroom);
2339
2340 return n;
2341}
2342EXPORT_SYMBOL(skb_copy_expand);
2343
2344/**
2345 * __skb_pad - zero pad the tail of an skb
2346 * @skb: buffer to pad
2347 * @pad: space to pad
2348 * @free_on_error: free buffer on error
2349 *
2350 * Ensure that a buffer is followed by a padding area that is zero
2351 * filled. Used by network drivers which may DMA or transfer data
2352 * beyond the buffer end onto the wire.
2353 *
2354 * May return error in out of memory cases. The skb is freed on error
2355 * if @free_on_error is true.
2356 */
2357
2358int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
2359{
2360 int err;
2361 int ntail;
2362
2363 /* If the skbuff is non linear tailroom is always zero.. */
2364 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
2365 memset(skb->data+skb->len, 0, pad);
2366 return 0;
2367 }
2368
2369 ntail = skb->data_len + pad - (skb->end - skb->tail);
2370 if (likely(skb_cloned(skb) || ntail > 0)) {
2371 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
2372 if (unlikely(err))
2373 goto free_skb;
2374 }
2375
2376 /* FIXME: The use of this function with non-linear skb's really needs
2377 * to be audited.
2378 */
2379 err = skb_linearize(skb);
2380 if (unlikely(err))
2381 goto free_skb;
2382
2383 memset(skb->data + skb->len, 0, pad);
2384 return 0;
2385
2386free_skb:
2387 if (free_on_error)
2388 kfree_skb(skb);
2389 return err;
2390}
2391EXPORT_SYMBOL(__skb_pad);
2392
2393/**
2394 * pskb_put - add data to the tail of a potentially fragmented buffer
2395 * @skb: start of the buffer to use
2396 * @tail: tail fragment of the buffer to use
2397 * @len: amount of data to add
2398 *
2399 * This function extends the used data area of the potentially
2400 * fragmented buffer. @tail must be the last fragment of @skb -- or
2401 * @skb itself. If this would exceed the total buffer size the kernel
2402 * will panic. A pointer to the first byte of the extra data is
2403 * returned.
2404 */
2405
2406void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
2407{
2408 if (tail != skb) {
2409 skb->data_len += len;
2410 skb->len += len;
2411 }
2412 return skb_put(tail, len);
2413}
2414EXPORT_SYMBOL_GPL(pskb_put);
2415
2416/**
2417 * skb_put - add data to a buffer
2418 * @skb: buffer to use
2419 * @len: amount of data to add
2420 *
2421 * This function extends the used data area of the buffer. If this would
2422 * exceed the total buffer size the kernel will panic. A pointer to the
2423 * first byte of the extra data is returned.
2424 */
2425void *skb_put(struct sk_buff *skb, unsigned int len)
2426{
2427 void *tmp = skb_tail_pointer(skb);
2428 SKB_LINEAR_ASSERT(skb);
2429 skb->tail += len;
2430 skb->len += len;
2431 if (unlikely(skb->tail > skb->end))
2432 skb_over_panic(skb, len, __builtin_return_address(0));
2433 return tmp;
2434}
2435EXPORT_SYMBOL(skb_put);
2436
2437/**
2438 * skb_push - add data to the start of a buffer
2439 * @skb: buffer to use
2440 * @len: amount of data to add
2441 *
2442 * This function extends the used data area of the buffer at the buffer
2443 * start. If this would exceed the total buffer headroom the kernel will
2444 * panic. A pointer to the first byte of the extra data is returned.
2445 */
2446void *skb_push(struct sk_buff *skb, unsigned int len)
2447{
2448 skb->data -= len;
2449 skb->len += len;
2450 if (unlikely(skb->data < skb->head))
2451 skb_under_panic(skb, len, __builtin_return_address(0));
2452 return skb->data;
2453}
2454EXPORT_SYMBOL(skb_push);
2455
2456/**
2457 * skb_pull - remove data from the start of a buffer
2458 * @skb: buffer to use
2459 * @len: amount of data to remove
2460 *
2461 * This function removes data from the start of a buffer, returning
2462 * the memory to the headroom. A pointer to the next data in the buffer
2463 * is returned. Once the data has been pulled future pushes will overwrite
2464 * the old data.
2465 */
2466void *skb_pull(struct sk_buff *skb, unsigned int len)
2467{
2468 return skb_pull_inline(skb, len);
2469}
2470EXPORT_SYMBOL(skb_pull);
2471
2472/**
2473 * skb_pull_data - remove data from the start of a buffer returning its
2474 * original position.
2475 * @skb: buffer to use
2476 * @len: amount of data to remove
2477 *
2478 * This function removes data from the start of a buffer, returning
2479 * the memory to the headroom. A pointer to the original data in the buffer
2480 * is returned after checking if there is enough data to pull. Once the
2481 * data has been pulled future pushes will overwrite the old data.
2482 */
2483void *skb_pull_data(struct sk_buff *skb, size_t len)
2484{
2485 void *data = skb->data;
2486
2487 if (skb->len < len)
2488 return NULL;
2489
2490 skb_pull(skb, len);
2491
2492 return data;
2493}
2494EXPORT_SYMBOL(skb_pull_data);
2495
2496/**
2497 * skb_trim - remove end from a buffer
2498 * @skb: buffer to alter
2499 * @len: new length
2500 *
2501 * Cut the length of a buffer down by removing data from the tail. If
2502 * the buffer is already under the length specified it is not modified.
2503 * The skb must be linear.
2504 */
2505void skb_trim(struct sk_buff *skb, unsigned int len)
2506{
2507 if (skb->len > len)
2508 __skb_trim(skb, len);
2509}
2510EXPORT_SYMBOL(skb_trim);
2511
2512/* Trims skb to length len. It can change skb pointers.
2513 */
2514
2515int ___pskb_trim(struct sk_buff *skb, unsigned int len)
2516{
2517 struct sk_buff **fragp;
2518 struct sk_buff *frag;
2519 int offset = skb_headlen(skb);
2520 int nfrags = skb_shinfo(skb)->nr_frags;
2521 int i;
2522 int err;
2523
2524 if (skb_cloned(skb) &&
2525 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
2526 return err;
2527
2528 i = 0;
2529 if (offset >= len)
2530 goto drop_pages;
2531
2532 for (; i < nfrags; i++) {
2533 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2534
2535 if (end < len) {
2536 offset = end;
2537 continue;
2538 }
2539
2540 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
2541
2542drop_pages:
2543 skb_shinfo(skb)->nr_frags = i;
2544
2545 for (; i < nfrags; i++)
2546 skb_frag_unref(skb, i);
2547
2548 if (skb_has_frag_list(skb))
2549 skb_drop_fraglist(skb);
2550 goto done;
2551 }
2552
2553 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
2554 fragp = &frag->next) {
2555 int end = offset + frag->len;
2556
2557 if (skb_shared(frag)) {
2558 struct sk_buff *nfrag;
2559
2560 nfrag = skb_clone(frag, GFP_ATOMIC);
2561 if (unlikely(!nfrag))
2562 return -ENOMEM;
2563
2564 nfrag->next = frag->next;
2565 consume_skb(frag);
2566 frag = nfrag;
2567 *fragp = frag;
2568 }
2569
2570 if (end < len) {
2571 offset = end;
2572 continue;
2573 }
2574
2575 if (end > len &&
2576 unlikely((err = pskb_trim(frag, len - offset))))
2577 return err;
2578
2579 if (frag->next)
2580 skb_drop_list(&frag->next);
2581 break;
2582 }
2583
2584done:
2585 if (len > skb_headlen(skb)) {
2586 skb->data_len -= skb->len - len;
2587 skb->len = len;
2588 } else {
2589 skb->len = len;
2590 skb->data_len = 0;
2591 skb_set_tail_pointer(skb, len);
2592 }
2593
2594 if (!skb->sk || skb->destructor == sock_edemux)
2595 skb_condense(skb);
2596 return 0;
2597}
2598EXPORT_SYMBOL(___pskb_trim);
2599
2600/* Note : use pskb_trim_rcsum() instead of calling this directly
2601 */
2602int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
2603{
2604 if (skb->ip_summed == CHECKSUM_COMPLETE) {
2605 int delta = skb->len - len;
2606
2607 skb->csum = csum_block_sub(skb->csum,
2608 skb_checksum(skb, len, delta, 0),
2609 len);
2610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2611 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
2612 int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
2613
2614 if (offset + sizeof(__sum16) > hdlen)
2615 return -EINVAL;
2616 }
2617 return __pskb_trim(skb, len);
2618}
2619EXPORT_SYMBOL(pskb_trim_rcsum_slow);
2620
2621/**
2622 * __pskb_pull_tail - advance tail of skb header
2623 * @skb: buffer to reallocate
2624 * @delta: number of bytes to advance tail
2625 *
2626 * The function makes a sense only on a fragmented &sk_buff,
2627 * it expands header moving its tail forward and copying necessary
2628 * data from fragmented part.
2629 *
2630 * &sk_buff MUST have reference count of 1.
2631 *
2632 * Returns %NULL (and &sk_buff does not change) if pull failed
2633 * or value of new tail of skb in the case of success.
2634 *
2635 * All the pointers pointing into skb header may change and must be
2636 * reloaded after call to this function.
2637 */
2638
2639/* Moves tail of skb head forward, copying data from fragmented part,
2640 * when it is necessary.
2641 * 1. It may fail due to malloc failure.
2642 * 2. It may change skb pointers.
2643 *
2644 * It is pretty complicated. Luckily, it is called only in exceptional cases.
2645 */
2646void *__pskb_pull_tail(struct sk_buff *skb, int delta)
2647{
2648 /* If skb has not enough free space at tail, get new one
2649 * plus 128 bytes for future expansions. If we have enough
2650 * room at tail, reallocate without expansion only if skb is cloned.
2651 */
2652 int i, k, eat = (skb->tail + delta) - skb->end;
2653
2654 if (eat > 0 || skb_cloned(skb)) {
2655 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
2656 GFP_ATOMIC))
2657 return NULL;
2658 }
2659
2660 BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
2661 skb_tail_pointer(skb), delta));
2662
2663 /* Optimization: no fragments, no reasons to preestimate
2664 * size of pulled pages. Superb.
2665 */
2666 if (!skb_has_frag_list(skb))
2667 goto pull_pages;
2668
2669 /* Estimate size of pulled pages. */
2670 eat = delta;
2671 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2672 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2673
2674 if (size >= eat)
2675 goto pull_pages;
2676 eat -= size;
2677 }
2678
2679 /* If we need update frag list, we are in troubles.
2680 * Certainly, it is possible to add an offset to skb data,
2681 * but taking into account that pulling is expected to
2682 * be very rare operation, it is worth to fight against
2683 * further bloating skb head and crucify ourselves here instead.
2684 * Pure masohism, indeed. 8)8)
2685 */
2686 if (eat) {
2687 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2688 struct sk_buff *clone = NULL;
2689 struct sk_buff *insp = NULL;
2690
2691 do {
2692 if (list->len <= eat) {
2693 /* Eaten as whole. */
2694 eat -= list->len;
2695 list = list->next;
2696 insp = list;
2697 } else {
2698 /* Eaten partially. */
2699 if (skb_is_gso(skb) && !list->head_frag &&
2700 skb_headlen(list))
2701 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2702
2703 if (skb_shared(list)) {
2704 /* Sucks! We need to fork list. :-( */
2705 clone = skb_clone(list, GFP_ATOMIC);
2706 if (!clone)
2707 return NULL;
2708 insp = list->next;
2709 list = clone;
2710 } else {
2711 /* This may be pulled without
2712 * problems. */
2713 insp = list;
2714 }
2715 if (!pskb_pull(list, eat)) {
2716 kfree_skb(clone);
2717 return NULL;
2718 }
2719 break;
2720 }
2721 } while (eat);
2722
2723 /* Free pulled out fragments. */
2724 while ((list = skb_shinfo(skb)->frag_list) != insp) {
2725 skb_shinfo(skb)->frag_list = list->next;
2726 consume_skb(list);
2727 }
2728 /* And insert new clone at head. */
2729 if (clone) {
2730 clone->next = list;
2731 skb_shinfo(skb)->frag_list = clone;
2732 }
2733 }
2734 /* Success! Now we may commit changes to skb data. */
2735
2736pull_pages:
2737 eat = delta;
2738 k = 0;
2739 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2740 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2741
2742 if (size <= eat) {
2743 skb_frag_unref(skb, i);
2744 eat -= size;
2745 } else {
2746 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2747
2748 *frag = skb_shinfo(skb)->frags[i];
2749 if (eat) {
2750 skb_frag_off_add(frag, eat);
2751 skb_frag_size_sub(frag, eat);
2752 if (!i)
2753 goto end;
2754 eat = 0;
2755 }
2756 k++;
2757 }
2758 }
2759 skb_shinfo(skb)->nr_frags = k;
2760
2761end:
2762 skb->tail += delta;
2763 skb->data_len -= delta;
2764
2765 if (!skb->data_len)
2766 skb_zcopy_clear(skb, false);
2767
2768 return skb_tail_pointer(skb);
2769}
2770EXPORT_SYMBOL(__pskb_pull_tail);
2771
2772/**
2773 * skb_copy_bits - copy bits from skb to kernel buffer
2774 * @skb: source skb
2775 * @offset: offset in source
2776 * @to: destination buffer
2777 * @len: number of bytes to copy
2778 *
2779 * Copy the specified number of bytes from the source skb to the
2780 * destination buffer.
2781 *
2782 * CAUTION ! :
2783 * If its prototype is ever changed,
2784 * check arch/{*}/net/{*}.S files,
2785 * since it is called from BPF assembly code.
2786 */
2787int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2788{
2789 int start = skb_headlen(skb);
2790 struct sk_buff *frag_iter;
2791 int i, copy;
2792
2793 if (offset > (int)skb->len - len)
2794 goto fault;
2795
2796 /* Copy header. */
2797 if ((copy = start - offset) > 0) {
2798 if (copy > len)
2799 copy = len;
2800 skb_copy_from_linear_data_offset(skb, offset, to, copy);
2801 if ((len -= copy) == 0)
2802 return 0;
2803 offset += copy;
2804 to += copy;
2805 }
2806
2807 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2808 int end;
2809 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2810
2811 WARN_ON(start > offset + len);
2812
2813 end = start + skb_frag_size(f);
2814 if ((copy = end - offset) > 0) {
2815 u32 p_off, p_len, copied;
2816 struct page *p;
2817 u8 *vaddr;
2818
2819 if (copy > len)
2820 copy = len;
2821
2822 skb_frag_foreach_page(f,
2823 skb_frag_off(f) + offset - start,
2824 copy, p, p_off, p_len, copied) {
2825 vaddr = kmap_atomic(p);
2826 memcpy(to + copied, vaddr + p_off, p_len);
2827 kunmap_atomic(vaddr);
2828 }
2829
2830 if ((len -= copy) == 0)
2831 return 0;
2832 offset += copy;
2833 to += copy;
2834 }
2835 start = end;
2836 }
2837
2838 skb_walk_frags(skb, frag_iter) {
2839 int end;
2840
2841 WARN_ON(start > offset + len);
2842
2843 end = start + frag_iter->len;
2844 if ((copy = end - offset) > 0) {
2845 if (copy > len)
2846 copy = len;
2847 if (skb_copy_bits(frag_iter, offset - start, to, copy))
2848 goto fault;
2849 if ((len -= copy) == 0)
2850 return 0;
2851 offset += copy;
2852 to += copy;
2853 }
2854 start = end;
2855 }
2856
2857 if (!len)
2858 return 0;
2859
2860fault:
2861 return -EFAULT;
2862}
2863EXPORT_SYMBOL(skb_copy_bits);
2864
2865/*
2866 * Callback from splice_to_pipe(), if we need to release some pages
2867 * at the end of the spd in case we error'ed out in filling the pipe.
2868 */
2869static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2870{
2871 put_page(spd->pages[i]);
2872}
2873
2874static struct page *linear_to_page(struct page *page, unsigned int *len,
2875 unsigned int *offset,
2876 struct sock *sk)
2877{
2878 struct page_frag *pfrag = sk_page_frag(sk);
2879
2880 if (!sk_page_frag_refill(sk, pfrag))
2881 return NULL;
2882
2883 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
2884
2885 memcpy(page_address(pfrag->page) + pfrag->offset,
2886 page_address(page) + *offset, *len);
2887 *offset = pfrag->offset;
2888 pfrag->offset += *len;
2889
2890 return pfrag->page;
2891}
2892
2893static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
2894 struct page *page,
2895 unsigned int offset)
2896{
2897 return spd->nr_pages &&
2898 spd->pages[spd->nr_pages - 1] == page &&
2899 (spd->partial[spd->nr_pages - 1].offset +
2900 spd->partial[spd->nr_pages - 1].len == offset);
2901}
2902
2903/*
2904 * Fill page/offset/length into spd, if it can hold more pages.
2905 */
2906static bool spd_fill_page(struct splice_pipe_desc *spd,
2907 struct pipe_inode_info *pipe, struct page *page,
2908 unsigned int *len, unsigned int offset,
2909 bool linear,
2910 struct sock *sk)
2911{
2912 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2913 return true;
2914
2915 if (linear) {
2916 page = linear_to_page(page, len, &offset, sk);
2917 if (!page)
2918 return true;
2919 }
2920 if (spd_can_coalesce(spd, page, offset)) {
2921 spd->partial[spd->nr_pages - 1].len += *len;
2922 return false;
2923 }
2924 get_page(page);
2925 spd->pages[spd->nr_pages] = page;
2926 spd->partial[spd->nr_pages].len = *len;
2927 spd->partial[spd->nr_pages].offset = offset;
2928 spd->nr_pages++;
2929
2930 return false;
2931}
2932
2933static bool __splice_segment(struct page *page, unsigned int poff,
2934 unsigned int plen, unsigned int *off,
2935 unsigned int *len,
2936 struct splice_pipe_desc *spd, bool linear,
2937 struct sock *sk,
2938 struct pipe_inode_info *pipe)
2939{
2940 if (!*len)
2941 return true;
2942
2943 /* skip this segment if already processed */
2944 if (*off >= plen) {
2945 *off -= plen;
2946 return false;
2947 }
2948
2949 /* ignore any bits we already processed */
2950 poff += *off;
2951 plen -= *off;
2952 *off = 0;
2953
2954 do {
2955 unsigned int flen = min(*len, plen);
2956
2957 if (spd_fill_page(spd, pipe, page, &flen, poff,
2958 linear, sk))
2959 return true;
2960 poff += flen;
2961 plen -= flen;
2962 *len -= flen;
2963 } while (*len && plen);
2964
2965 return false;
2966}
2967
2968/*
2969 * Map linear and fragment data from the skb to spd. It reports true if the
2970 * pipe is full or if we already spliced the requested length.
2971 */
2972static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2973 unsigned int *offset, unsigned int *len,
2974 struct splice_pipe_desc *spd, struct sock *sk)
2975{
2976 int seg;
2977 struct sk_buff *iter;
2978
2979 /* map the linear part :
2980 * If skb->head_frag is set, this 'linear' part is backed by a
2981 * fragment, and if the head is not shared with any clones then
2982 * we can avoid a copy since we own the head portion of this page.
2983 */
2984 if (__splice_segment(virt_to_page(skb->data),
2985 (unsigned long) skb->data & (PAGE_SIZE - 1),
2986 skb_headlen(skb),
2987 offset, len, spd,
2988 skb_head_is_locked(skb),
2989 sk, pipe))
2990 return true;
2991
2992 /*
2993 * then map the fragments
2994 */
2995 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
2996 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
2997
2998 if (__splice_segment(skb_frag_page(f),
2999 skb_frag_off(f), skb_frag_size(f),
3000 offset, len, spd, false, sk, pipe))
3001 return true;
3002 }
3003
3004 skb_walk_frags(skb, iter) {
3005 if (*offset >= iter->len) {
3006 *offset -= iter->len;
3007 continue;
3008 }
3009 /* __skb_splice_bits() only fails if the output has no room
3010 * left, so no point in going over the frag_list for the error
3011 * case.
3012 */
3013 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
3014 return true;
3015 }
3016
3017 return false;
3018}
3019
3020/*
3021 * Map data from the skb to a pipe. Should handle both the linear part,
3022 * the fragments, and the frag list.
3023 */
3024int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
3025 struct pipe_inode_info *pipe, unsigned int tlen,
3026 unsigned int flags)
3027{
3028 struct partial_page partial[MAX_SKB_FRAGS];
3029 struct page *pages[MAX_SKB_FRAGS];
3030 struct splice_pipe_desc spd = {
3031 .pages = pages,
3032 .partial = partial,
3033 .nr_pages_max = MAX_SKB_FRAGS,
3034 .ops = &nosteal_pipe_buf_ops,
3035 .spd_release = sock_spd_release,
3036 };
3037 int ret = 0;
3038
3039 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
3040
3041 if (spd.nr_pages)
3042 ret = splice_to_pipe(pipe, &spd);
3043
3044 return ret;
3045}
3046EXPORT_SYMBOL_GPL(skb_splice_bits);
3047
3048static int sendmsg_locked(struct sock *sk, struct msghdr *msg)
3049{
3050 struct socket *sock = sk->sk_socket;
3051 size_t size = msg_data_left(msg);
3052
3053 if (!sock)
3054 return -EINVAL;
3055
3056 if (!sock->ops->sendmsg_locked)
3057 return sock_no_sendmsg_locked(sk, msg, size);
3058
3059 return sock->ops->sendmsg_locked(sk, msg, size);
3060}
3061
3062static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg)
3063{
3064 struct socket *sock = sk->sk_socket;
3065
3066 if (!sock)
3067 return -EINVAL;
3068 return sock_sendmsg(sock, msg);
3069}
3070
3071typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg);
3072static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
3073 int len, sendmsg_func sendmsg)
3074{
3075 unsigned int orig_len = len;
3076 struct sk_buff *head = skb;
3077 unsigned short fragidx;
3078 int slen, ret;
3079
3080do_frag_list:
3081
3082 /* Deal with head data */
3083 while (offset < skb_headlen(skb) && len) {
3084 struct kvec kv;
3085 struct msghdr msg;
3086
3087 slen = min_t(int, len, skb_headlen(skb) - offset);
3088 kv.iov_base = skb->data + offset;
3089 kv.iov_len = slen;
3090 memset(&msg, 0, sizeof(msg));
3091 msg.msg_flags = MSG_DONTWAIT;
3092
3093 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &kv, 1, slen);
3094 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
3095 sendmsg_unlocked, sk, &msg);
3096 if (ret <= 0)
3097 goto error;
3098
3099 offset += ret;
3100 len -= ret;
3101 }
3102
3103 /* All the data was skb head? */
3104 if (!len)
3105 goto out;
3106
3107 /* Make offset relative to start of frags */
3108 offset -= skb_headlen(skb);
3109
3110 /* Find where we are in frag list */
3111 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
3112 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
3113
3114 if (offset < skb_frag_size(frag))
3115 break;
3116
3117 offset -= skb_frag_size(frag);
3118 }
3119
3120 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
3121 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
3122
3123 slen = min_t(size_t, len, skb_frag_size(frag) - offset);
3124
3125 while (slen) {
3126 struct bio_vec bvec;
3127 struct msghdr msg = {
3128 .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT,
3129 };
3130
3131 bvec_set_page(&bvec, skb_frag_page(frag), slen,
3132 skb_frag_off(frag) + offset);
3133 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1,
3134 slen);
3135
3136 ret = INDIRECT_CALL_2(sendmsg, sendmsg_locked,
3137 sendmsg_unlocked, sk, &msg);
3138 if (ret <= 0)
3139 goto error;
3140
3141 len -= ret;
3142 offset += ret;
3143 slen -= ret;
3144 }
3145
3146 offset = 0;
3147 }
3148
3149 if (len) {
3150 /* Process any frag lists */
3151
3152 if (skb == head) {
3153 if (skb_has_frag_list(skb)) {
3154 skb = skb_shinfo(skb)->frag_list;
3155 goto do_frag_list;
3156 }
3157 } else if (skb->next) {
3158 skb = skb->next;
3159 goto do_frag_list;
3160 }
3161 }
3162
3163out:
3164 return orig_len - len;
3165
3166error:
3167 return orig_len == len ? ret : orig_len - len;
3168}
3169
3170/* Send skb data on a socket. Socket must be locked. */
3171int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
3172 int len)
3173{
3174 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked);
3175}
3176EXPORT_SYMBOL_GPL(skb_send_sock_locked);
3177
3178/* Send skb data on a socket. Socket must be unlocked. */
3179int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
3180{
3181 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked);
3182}
3183
3184/**
3185 * skb_store_bits - store bits from kernel buffer to skb
3186 * @skb: destination buffer
3187 * @offset: offset in destination
3188 * @from: source buffer
3189 * @len: number of bytes to copy
3190 *
3191 * Copy the specified number of bytes from the source buffer to the
3192 * destination skb. This function handles all the messy bits of
3193 * traversing fragment lists and such.
3194 */
3195
3196int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
3197{
3198 int start = skb_headlen(skb);
3199 struct sk_buff *frag_iter;
3200 int i, copy;
3201
3202 if (offset > (int)skb->len - len)
3203 goto fault;
3204
3205 if ((copy = start - offset) > 0) {
3206 if (copy > len)
3207 copy = len;
3208 skb_copy_to_linear_data_offset(skb, offset, from, copy);
3209 if ((len -= copy) == 0)
3210 return 0;
3211 offset += copy;
3212 from += copy;
3213 }
3214
3215 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3216 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3217 int end;
3218
3219 WARN_ON(start > offset + len);
3220
3221 end = start + skb_frag_size(frag);
3222 if ((copy = end - offset) > 0) {
3223 u32 p_off, p_len, copied;
3224 struct page *p;
3225 u8 *vaddr;
3226
3227 if (copy > len)
3228 copy = len;
3229
3230 skb_frag_foreach_page(frag,
3231 skb_frag_off(frag) + offset - start,
3232 copy, p, p_off, p_len, copied) {
3233 vaddr = kmap_atomic(p);
3234 memcpy(vaddr + p_off, from + copied, p_len);
3235 kunmap_atomic(vaddr);
3236 }
3237
3238 if ((len -= copy) == 0)
3239 return 0;
3240 offset += copy;
3241 from += copy;
3242 }
3243 start = end;
3244 }
3245
3246 skb_walk_frags(skb, frag_iter) {
3247 int end;
3248
3249 WARN_ON(start > offset + len);
3250
3251 end = start + frag_iter->len;
3252 if ((copy = end - offset) > 0) {
3253 if (copy > len)
3254 copy = len;
3255 if (skb_store_bits(frag_iter, offset - start,
3256 from, copy))
3257 goto fault;
3258 if ((len -= copy) == 0)
3259 return 0;
3260 offset += copy;
3261 from += copy;
3262 }
3263 start = end;
3264 }
3265 if (!len)
3266 return 0;
3267
3268fault:
3269 return -EFAULT;
3270}
3271EXPORT_SYMBOL(skb_store_bits);
3272
3273/* Checksum skb data. */
3274__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
3275 __wsum csum, const struct skb_checksum_ops *ops)
3276{
3277 int start = skb_headlen(skb);
3278 int i, copy = start - offset;
3279 struct sk_buff *frag_iter;
3280 int pos = 0;
3281
3282 /* Checksum header. */
3283 if (copy > 0) {
3284 if (copy > len)
3285 copy = len;
3286 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext,
3287 skb->data + offset, copy, csum);
3288 if ((len -= copy) == 0)
3289 return csum;
3290 offset += copy;
3291 pos = copy;
3292 }
3293
3294 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3295 int end;
3296 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3297
3298 WARN_ON(start > offset + len);
3299
3300 end = start + skb_frag_size(frag);
3301 if ((copy = end - offset) > 0) {
3302 u32 p_off, p_len, copied;
3303 struct page *p;
3304 __wsum csum2;
3305 u8 *vaddr;
3306
3307 if (copy > len)
3308 copy = len;
3309
3310 skb_frag_foreach_page(frag,
3311 skb_frag_off(frag) + offset - start,
3312 copy, p, p_off, p_len, copied) {
3313 vaddr = kmap_atomic(p);
3314 csum2 = INDIRECT_CALL_1(ops->update,
3315 csum_partial_ext,
3316 vaddr + p_off, p_len, 0);
3317 kunmap_atomic(vaddr);
3318 csum = INDIRECT_CALL_1(ops->combine,
3319 csum_block_add_ext, csum,
3320 csum2, pos, p_len);
3321 pos += p_len;
3322 }
3323
3324 if (!(len -= copy))
3325 return csum;
3326 offset += copy;
3327 }
3328 start = end;
3329 }
3330
3331 skb_walk_frags(skb, frag_iter) {
3332 int end;
3333
3334 WARN_ON(start > offset + len);
3335
3336 end = start + frag_iter->len;
3337 if ((copy = end - offset) > 0) {
3338 __wsum csum2;
3339 if (copy > len)
3340 copy = len;
3341 csum2 = __skb_checksum(frag_iter, offset - start,
3342 copy, 0, ops);
3343 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext,
3344 csum, csum2, pos, copy);
3345 if ((len -= copy) == 0)
3346 return csum;
3347 offset += copy;
3348 pos += copy;
3349 }
3350 start = end;
3351 }
3352 BUG_ON(len);
3353
3354 return csum;
3355}
3356EXPORT_SYMBOL(__skb_checksum);
3357
3358__wsum skb_checksum(const struct sk_buff *skb, int offset,
3359 int len, __wsum csum)
3360{
3361 const struct skb_checksum_ops ops = {
3362 .update = csum_partial_ext,
3363 .combine = csum_block_add_ext,
3364 };
3365
3366 return __skb_checksum(skb, offset, len, csum, &ops);
3367}
3368EXPORT_SYMBOL(skb_checksum);
3369
3370/* Both of above in one bottle. */
3371
3372__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
3373 u8 *to, int len)
3374{
3375 int start = skb_headlen(skb);
3376 int i, copy = start - offset;
3377 struct sk_buff *frag_iter;
3378 int pos = 0;
3379 __wsum csum = 0;
3380
3381 /* Copy header. */
3382 if (copy > 0) {
3383 if (copy > len)
3384 copy = len;
3385 csum = csum_partial_copy_nocheck(skb->data + offset, to,
3386 copy);
3387 if ((len -= copy) == 0)
3388 return csum;
3389 offset += copy;
3390 to += copy;
3391 pos = copy;
3392 }
3393
3394 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3395 int end;
3396
3397 WARN_ON(start > offset + len);
3398
3399 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3400 if ((copy = end - offset) > 0) {
3401 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3402 u32 p_off, p_len, copied;
3403 struct page *p;
3404 __wsum csum2;
3405 u8 *vaddr;
3406
3407 if (copy > len)
3408 copy = len;
3409
3410 skb_frag_foreach_page(frag,
3411 skb_frag_off(frag) + offset - start,
3412 copy, p, p_off, p_len, copied) {
3413 vaddr = kmap_atomic(p);
3414 csum2 = csum_partial_copy_nocheck(vaddr + p_off,
3415 to + copied,
3416 p_len);
3417 kunmap_atomic(vaddr);
3418 csum = csum_block_add(csum, csum2, pos);
3419 pos += p_len;
3420 }
3421
3422 if (!(len -= copy))
3423 return csum;
3424 offset += copy;
3425 to += copy;
3426 }
3427 start = end;
3428 }
3429
3430 skb_walk_frags(skb, frag_iter) {
3431 __wsum csum2;
3432 int end;
3433
3434 WARN_ON(start > offset + len);
3435
3436 end = start + frag_iter->len;
3437 if ((copy = end - offset) > 0) {
3438 if (copy > len)
3439 copy = len;
3440 csum2 = skb_copy_and_csum_bits(frag_iter,
3441 offset - start,
3442 to, copy);
3443 csum = csum_block_add(csum, csum2, pos);
3444 if ((len -= copy) == 0)
3445 return csum;
3446 offset += copy;
3447 to += copy;
3448 pos += copy;
3449 }
3450 start = end;
3451 }
3452 BUG_ON(len);
3453 return csum;
3454}
3455EXPORT_SYMBOL(skb_copy_and_csum_bits);
3456
3457__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
3458{
3459 __sum16 sum;
3460
3461 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
3462 /* See comments in __skb_checksum_complete(). */
3463 if (likely(!sum)) {
3464 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
3465 !skb->csum_complete_sw)
3466 netdev_rx_csum_fault(skb->dev, skb);
3467 }
3468 if (!skb_shared(skb))
3469 skb->csum_valid = !sum;
3470 return sum;
3471}
3472EXPORT_SYMBOL(__skb_checksum_complete_head);
3473
3474/* This function assumes skb->csum already holds pseudo header's checksum,
3475 * which has been changed from the hardware checksum, for example, by
3476 * __skb_checksum_validate_complete(). And, the original skb->csum must
3477 * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
3478 *
3479 * It returns non-zero if the recomputed checksum is still invalid, otherwise
3480 * zero. The new checksum is stored back into skb->csum unless the skb is
3481 * shared.
3482 */
3483__sum16 __skb_checksum_complete(struct sk_buff *skb)
3484{
3485 __wsum csum;
3486 __sum16 sum;
3487
3488 csum = skb_checksum(skb, 0, skb->len, 0);
3489
3490 sum = csum_fold(csum_add(skb->csum, csum));
3491 /* This check is inverted, because we already knew the hardware
3492 * checksum is invalid before calling this function. So, if the
3493 * re-computed checksum is valid instead, then we have a mismatch
3494 * between the original skb->csum and skb_checksum(). This means either
3495 * the original hardware checksum is incorrect or we screw up skb->csum
3496 * when moving skb->data around.
3497 */
3498 if (likely(!sum)) {
3499 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
3500 !skb->csum_complete_sw)
3501 netdev_rx_csum_fault(skb->dev, skb);
3502 }
3503
3504 if (!skb_shared(skb)) {
3505 /* Save full packet checksum */
3506 skb->csum = csum;
3507 skb->ip_summed = CHECKSUM_COMPLETE;
3508 skb->csum_complete_sw = 1;
3509 skb->csum_valid = !sum;
3510 }
3511
3512 return sum;
3513}
3514EXPORT_SYMBOL(__skb_checksum_complete);
3515
3516static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
3517{
3518 net_warn_ratelimited(
3519 "%s: attempt to compute crc32c without libcrc32c.ko\n",
3520 __func__);
3521 return 0;
3522}
3523
3524static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
3525 int offset, int len)
3526{
3527 net_warn_ratelimited(
3528 "%s: attempt to compute crc32c without libcrc32c.ko\n",
3529 __func__);
3530 return 0;
3531}
3532
3533static const struct skb_checksum_ops default_crc32c_ops = {
3534 .update = warn_crc32c_csum_update,
3535 .combine = warn_crc32c_csum_combine,
3536};
3537
3538const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
3539 &default_crc32c_ops;
3540EXPORT_SYMBOL(crc32c_csum_stub);
3541
3542 /**
3543 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
3544 * @from: source buffer
3545 *
3546 * Calculates the amount of linear headroom needed in the 'to' skb passed
3547 * into skb_zerocopy().
3548 */
3549unsigned int
3550skb_zerocopy_headlen(const struct sk_buff *from)
3551{
3552 unsigned int hlen = 0;
3553
3554 if (!from->head_frag ||
3555 skb_headlen(from) < L1_CACHE_BYTES ||
3556 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) {
3557 hlen = skb_headlen(from);
3558 if (!hlen)
3559 hlen = from->len;
3560 }
3561
3562 if (skb_has_frag_list(from))
3563 hlen = from->len;
3564
3565 return hlen;
3566}
3567EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
3568
3569/**
3570 * skb_zerocopy - Zero copy skb to skb
3571 * @to: destination buffer
3572 * @from: source buffer
3573 * @len: number of bytes to copy from source buffer
3574 * @hlen: size of linear headroom in destination buffer
3575 *
3576 * Copies up to `len` bytes from `from` to `to` by creating references
3577 * to the frags in the source buffer.
3578 *
3579 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
3580 * headroom in the `to` buffer.
3581 *
3582 * Return value:
3583 * 0: everything is OK
3584 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
3585 * -EFAULT: skb_copy_bits() found some problem with skb geometry
3586 */
3587int
3588skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
3589{
3590 int i, j = 0;
3591 int plen = 0; /* length of skb->head fragment */
3592 int ret;
3593 struct page *page;
3594 unsigned int offset;
3595
3596 BUG_ON(!from->head_frag && !hlen);
3597
3598 /* dont bother with small payloads */
3599 if (len <= skb_tailroom(to))
3600 return skb_copy_bits(from, 0, skb_put(to, len), len);
3601
3602 if (hlen) {
3603 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
3604 if (unlikely(ret))
3605 return ret;
3606 len -= hlen;
3607 } else {
3608 plen = min_t(int, skb_headlen(from), len);
3609 if (plen) {
3610 page = virt_to_head_page(from->head);
3611 offset = from->data - (unsigned char *)page_address(page);
3612 __skb_fill_page_desc(to, 0, page, offset, plen);
3613 get_page(page);
3614 j = 1;
3615 len -= plen;
3616 }
3617 }
3618
3619 skb_len_add(to, len + plen);
3620
3621 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
3622 skb_tx_error(from);
3623 return -ENOMEM;
3624 }
3625 skb_zerocopy_clone(to, from, GFP_ATOMIC);
3626
3627 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
3628 int size;
3629
3630 if (!len)
3631 break;
3632 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
3633 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
3634 len);
3635 skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
3636 len -= size;
3637 skb_frag_ref(to, j);
3638 j++;
3639 }
3640 skb_shinfo(to)->nr_frags = j;
3641
3642 return 0;
3643}
3644EXPORT_SYMBOL_GPL(skb_zerocopy);
3645
3646void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
3647{
3648 __wsum csum;
3649 long csstart;
3650
3651 if (skb->ip_summed == CHECKSUM_PARTIAL)
3652 csstart = skb_checksum_start_offset(skb);
3653 else
3654 csstart = skb_headlen(skb);
3655
3656 BUG_ON(csstart > skb_headlen(skb));
3657
3658 skb_copy_from_linear_data(skb, to, csstart);
3659
3660 csum = 0;
3661 if (csstart != skb->len)
3662 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
3663 skb->len - csstart);
3664
3665 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3666 long csstuff = csstart + skb->csum_offset;
3667
3668 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
3669 }
3670}
3671EXPORT_SYMBOL(skb_copy_and_csum_dev);
3672
3673/**
3674 * skb_dequeue - remove from the head of the queue
3675 * @list: list to dequeue from
3676 *
3677 * Remove the head of the list. The list lock is taken so the function
3678 * may be used safely with other locking list functions. The head item is
3679 * returned or %NULL if the list is empty.
3680 */
3681
3682struct sk_buff *skb_dequeue(struct sk_buff_head *list)
3683{
3684 unsigned long flags;
3685 struct sk_buff *result;
3686
3687 spin_lock_irqsave(&list->lock, flags);
3688 result = __skb_dequeue(list);
3689 spin_unlock_irqrestore(&list->lock, flags);
3690 return result;
3691}
3692EXPORT_SYMBOL(skb_dequeue);
3693
3694/**
3695 * skb_dequeue_tail - remove from the tail of the queue
3696 * @list: list to dequeue from
3697 *
3698 * Remove the tail of the list. The list lock is taken so the function
3699 * may be used safely with other locking list functions. The tail item is
3700 * returned or %NULL if the list is empty.
3701 */
3702struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
3703{
3704 unsigned long flags;
3705 struct sk_buff *result;
3706
3707 spin_lock_irqsave(&list->lock, flags);
3708 result = __skb_dequeue_tail(list);
3709 spin_unlock_irqrestore(&list->lock, flags);
3710 return result;
3711}
3712EXPORT_SYMBOL(skb_dequeue_tail);
3713
3714/**
3715 * skb_queue_purge_reason - empty a list
3716 * @list: list to empty
3717 * @reason: drop reason
3718 *
3719 * Delete all buffers on an &sk_buff list. Each buffer is removed from
3720 * the list and one reference dropped. This function takes the list
3721 * lock and is atomic with respect to other list locking functions.
3722 */
3723void skb_queue_purge_reason(struct sk_buff_head *list,
3724 enum skb_drop_reason reason)
3725{
3726 struct sk_buff_head tmp;
3727 unsigned long flags;
3728
3729 if (skb_queue_empty_lockless(list))
3730 return;
3731
3732 __skb_queue_head_init(&tmp);
3733
3734 spin_lock_irqsave(&list->lock, flags);
3735 skb_queue_splice_init(list, &tmp);
3736 spin_unlock_irqrestore(&list->lock, flags);
3737
3738 __skb_queue_purge_reason(&tmp, reason);
3739}
3740EXPORT_SYMBOL(skb_queue_purge_reason);
3741
3742/**
3743 * skb_rbtree_purge - empty a skb rbtree
3744 * @root: root of the rbtree to empty
3745 * Return value: the sum of truesizes of all purged skbs.
3746 *
3747 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
3748 * the list and one reference dropped. This function does not take
3749 * any lock. Synchronization should be handled by the caller (e.g., TCP
3750 * out-of-order queue is protected by the socket lock).
3751 */
3752unsigned int skb_rbtree_purge(struct rb_root *root)
3753{
3754 struct rb_node *p = rb_first(root);
3755 unsigned int sum = 0;
3756
3757 while (p) {
3758 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3759
3760 p = rb_next(p);
3761 rb_erase(&skb->rbnode, root);
3762 sum += skb->truesize;
3763 kfree_skb(skb);
3764 }
3765 return sum;
3766}
3767
3768void skb_errqueue_purge(struct sk_buff_head *list)
3769{
3770 struct sk_buff *skb, *next;
3771 struct sk_buff_head kill;
3772 unsigned long flags;
3773
3774 __skb_queue_head_init(&kill);
3775
3776 spin_lock_irqsave(&list->lock, flags);
3777 skb_queue_walk_safe(list, skb, next) {
3778 if (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ZEROCOPY ||
3779 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING)
3780 continue;
3781 __skb_unlink(skb, list);
3782 __skb_queue_tail(&kill, skb);
3783 }
3784 spin_unlock_irqrestore(&list->lock, flags);
3785 __skb_queue_purge(&kill);
3786}
3787EXPORT_SYMBOL(skb_errqueue_purge);
3788
3789/**
3790 * skb_queue_head - queue a buffer at the list head
3791 * @list: list to use
3792 * @newsk: buffer to queue
3793 *
3794 * Queue a buffer at the start of the list. This function takes the
3795 * list lock and can be used safely with other locking &sk_buff functions
3796 * safely.
3797 *
3798 * A buffer cannot be placed on two lists at the same time.
3799 */
3800void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
3801{
3802 unsigned long flags;
3803
3804 spin_lock_irqsave(&list->lock, flags);
3805 __skb_queue_head(list, newsk);
3806 spin_unlock_irqrestore(&list->lock, flags);
3807}
3808EXPORT_SYMBOL(skb_queue_head);
3809
3810/**
3811 * skb_queue_tail - queue a buffer at the list tail
3812 * @list: list to use
3813 * @newsk: buffer to queue
3814 *
3815 * Queue a buffer at the tail of the list. This function takes the
3816 * list lock and can be used safely with other locking &sk_buff functions
3817 * safely.
3818 *
3819 * A buffer cannot be placed on two lists at the same time.
3820 */
3821void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
3822{
3823 unsigned long flags;
3824
3825 spin_lock_irqsave(&list->lock, flags);
3826 __skb_queue_tail(list, newsk);
3827 spin_unlock_irqrestore(&list->lock, flags);
3828}
3829EXPORT_SYMBOL(skb_queue_tail);
3830
3831/**
3832 * skb_unlink - remove a buffer from a list
3833 * @skb: buffer to remove
3834 * @list: list to use
3835 *
3836 * Remove a packet from a list. The list locks are taken and this
3837 * function is atomic with respect to other list locked calls
3838 *
3839 * You must know what list the SKB is on.
3840 */
3841void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
3842{
3843 unsigned long flags;
3844
3845 spin_lock_irqsave(&list->lock, flags);
3846 __skb_unlink(skb, list);
3847 spin_unlock_irqrestore(&list->lock, flags);
3848}
3849EXPORT_SYMBOL(skb_unlink);
3850
3851/**
3852 * skb_append - append a buffer
3853 * @old: buffer to insert after
3854 * @newsk: buffer to insert
3855 * @list: list to use
3856 *
3857 * Place a packet after a given packet in a list. The list locks are taken
3858 * and this function is atomic with respect to other list locked calls.
3859 * A buffer cannot be placed on two lists at the same time.
3860 */
3861void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
3862{
3863 unsigned long flags;
3864
3865 spin_lock_irqsave(&list->lock, flags);
3866 __skb_queue_after(list, old, newsk);
3867 spin_unlock_irqrestore(&list->lock, flags);
3868}
3869EXPORT_SYMBOL(skb_append);
3870
3871static inline void skb_split_inside_header(struct sk_buff *skb,
3872 struct sk_buff* skb1,
3873 const u32 len, const int pos)
3874{
3875 int i;
3876
3877 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
3878 pos - len);
3879 /* And move data appendix as is. */
3880 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
3881 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
3882
3883 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
3884 skb_shinfo(skb)->nr_frags = 0;
3885 skb1->data_len = skb->data_len;
3886 skb1->len += skb1->data_len;
3887 skb->data_len = 0;
3888 skb->len = len;
3889 skb_set_tail_pointer(skb, len);
3890}
3891
3892static inline void skb_split_no_header(struct sk_buff *skb,
3893 struct sk_buff* skb1,
3894 const u32 len, int pos)
3895{
3896 int i, k = 0;
3897 const int nfrags = skb_shinfo(skb)->nr_frags;
3898
3899 skb_shinfo(skb)->nr_frags = 0;
3900 skb1->len = skb1->data_len = skb->len - len;
3901 skb->len = len;
3902 skb->data_len = len - pos;
3903
3904 for (i = 0; i < nfrags; i++) {
3905 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
3906
3907 if (pos + size > len) {
3908 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3909
3910 if (pos < len) {
3911 /* Split frag.
3912 * We have two variants in this case:
3913 * 1. Move all the frag to the second
3914 * part, if it is possible. F.e.
3915 * this approach is mandatory for TUX,
3916 * where splitting is expensive.
3917 * 2. Split is accurately. We make this.
3918 */
3919 skb_frag_ref(skb, i);
3920 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
3921 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3922 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3923 skb_shinfo(skb)->nr_frags++;
3924 }
3925 k++;
3926 } else
3927 skb_shinfo(skb)->nr_frags++;
3928 pos += size;
3929 }
3930 skb_shinfo(skb1)->nr_frags = k;
3931}
3932
3933/**
3934 * skb_split - Split fragmented skb to two parts at length len.
3935 * @skb: the buffer to split
3936 * @skb1: the buffer to receive the second part
3937 * @len: new length for skb
3938 */
3939void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3940{
3941 int pos = skb_headlen(skb);
3942 const int zc_flags = SKBFL_SHARED_FRAG | SKBFL_PURE_ZEROCOPY;
3943
3944 skb_zcopy_downgrade_managed(skb);
3945
3946 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & zc_flags;
3947 skb_zerocopy_clone(skb1, skb, 0);
3948 if (len < pos) /* Split line is inside header. */
3949 skb_split_inside_header(skb, skb1, len, pos);
3950 else /* Second chunk has no header, nothing to copy. */
3951 skb_split_no_header(skb, skb1, len, pos);
3952}
3953EXPORT_SYMBOL(skb_split);
3954
3955/* Shifting from/to a cloned skb is a no-go.
3956 *
3957 * Caller cannot keep skb_shinfo related pointers past calling here!
3958 */
3959static int skb_prepare_for_shift(struct sk_buff *skb)
3960{
3961 return skb_unclone_keeptruesize(skb, GFP_ATOMIC);
3962}
3963
3964/**
3965 * skb_shift - Shifts paged data partially from skb to another
3966 * @tgt: buffer into which tail data gets added
3967 * @skb: buffer from which the paged data comes from
3968 * @shiftlen: shift up to this many bytes
3969 *
3970 * Attempts to shift up to shiftlen worth of bytes, which may be less than
3971 * the length of the skb, from skb to tgt. Returns number bytes shifted.
3972 * It's up to caller to free skb if everything was shifted.
3973 *
3974 * If @tgt runs out of frags, the whole operation is aborted.
3975 *
3976 * Skb cannot include anything else but paged data while tgt is allowed
3977 * to have non-paged data as well.
3978 *
3979 * TODO: full sized shift could be optimized but that would need
3980 * specialized skb free'er to handle frags without up-to-date nr_frags.
3981 */
3982int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3983{
3984 int from, to, merge, todo;
3985 skb_frag_t *fragfrom, *fragto;
3986
3987 BUG_ON(shiftlen > skb->len);
3988
3989 if (skb_headlen(skb))
3990 return 0;
3991 if (skb_zcopy(tgt) || skb_zcopy(skb))
3992 return 0;
3993
3994 todo = shiftlen;
3995 from = 0;
3996 to = skb_shinfo(tgt)->nr_frags;
3997 fragfrom = &skb_shinfo(skb)->frags[from];
3998
3999 /* Actual merge is delayed until the point when we know we can
4000 * commit all, so that we don't have to undo partial changes
4001 */
4002 if (!to ||
4003 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
4004 skb_frag_off(fragfrom))) {
4005 merge = -1;
4006 } else {
4007 merge = to - 1;
4008
4009 todo -= skb_frag_size(fragfrom);
4010 if (todo < 0) {
4011 if (skb_prepare_for_shift(skb) ||
4012 skb_prepare_for_shift(tgt))
4013 return 0;
4014
4015 /* All previous frag pointers might be stale! */
4016 fragfrom = &skb_shinfo(skb)->frags[from];
4017 fragto = &skb_shinfo(tgt)->frags[merge];
4018
4019 skb_frag_size_add(fragto, shiftlen);
4020 skb_frag_size_sub(fragfrom, shiftlen);
4021 skb_frag_off_add(fragfrom, shiftlen);
4022
4023 goto onlymerged;
4024 }
4025
4026 from++;
4027 }
4028
4029 /* Skip full, not-fitting skb to avoid expensive operations */
4030 if ((shiftlen == skb->len) &&
4031 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
4032 return 0;
4033
4034 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
4035 return 0;
4036
4037 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
4038 if (to == MAX_SKB_FRAGS)
4039 return 0;
4040
4041 fragfrom = &skb_shinfo(skb)->frags[from];
4042 fragto = &skb_shinfo(tgt)->frags[to];
4043
4044 if (todo >= skb_frag_size(fragfrom)) {
4045 *fragto = *fragfrom;
4046 todo -= skb_frag_size(fragfrom);
4047 from++;
4048 to++;
4049
4050 } else {
4051 __skb_frag_ref(fragfrom);
4052 skb_frag_page_copy(fragto, fragfrom);
4053 skb_frag_off_copy(fragto, fragfrom);
4054 skb_frag_size_set(fragto, todo);
4055
4056 skb_frag_off_add(fragfrom, todo);
4057 skb_frag_size_sub(fragfrom, todo);
4058 todo = 0;
4059
4060 to++;
4061 break;
4062 }
4063 }
4064
4065 /* Ready to "commit" this state change to tgt */
4066 skb_shinfo(tgt)->nr_frags = to;
4067
4068 if (merge >= 0) {
4069 fragfrom = &skb_shinfo(skb)->frags[0];
4070 fragto = &skb_shinfo(tgt)->frags[merge];
4071
4072 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
4073 __skb_frag_unref(fragfrom, skb->pp_recycle);
4074 }
4075
4076 /* Reposition in the original skb */
4077 to = 0;
4078 while (from < skb_shinfo(skb)->nr_frags)
4079 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
4080 skb_shinfo(skb)->nr_frags = to;
4081
4082 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
4083
4084onlymerged:
4085 /* Most likely the tgt won't ever need its checksum anymore, skb on
4086 * the other hand might need it if it needs to be resent
4087 */
4088 tgt->ip_summed = CHECKSUM_PARTIAL;
4089 skb->ip_summed = CHECKSUM_PARTIAL;
4090
4091 skb_len_add(skb, -shiftlen);
4092 skb_len_add(tgt, shiftlen);
4093
4094 return shiftlen;
4095}
4096
4097/**
4098 * skb_prepare_seq_read - Prepare a sequential read of skb data
4099 * @skb: the buffer to read
4100 * @from: lower offset of data to be read
4101 * @to: upper offset of data to be read
4102 * @st: state variable
4103 *
4104 * Initializes the specified state variable. Must be called before
4105 * invoking skb_seq_read() for the first time.
4106 */
4107void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
4108 unsigned int to, struct skb_seq_state *st)
4109{
4110 st->lower_offset = from;
4111 st->upper_offset = to;
4112 st->root_skb = st->cur_skb = skb;
4113 st->frag_idx = st->stepped_offset = 0;
4114 st->frag_data = NULL;
4115 st->frag_off = 0;
4116}
4117EXPORT_SYMBOL(skb_prepare_seq_read);
4118
4119/**
4120 * skb_seq_read - Sequentially read skb data
4121 * @consumed: number of bytes consumed by the caller so far
4122 * @data: destination pointer for data to be returned
4123 * @st: state variable
4124 *
4125 * Reads a block of skb data at @consumed relative to the
4126 * lower offset specified to skb_prepare_seq_read(). Assigns
4127 * the head of the data block to @data and returns the length
4128 * of the block or 0 if the end of the skb data or the upper
4129 * offset has been reached.
4130 *
4131 * The caller is not required to consume all of the data
4132 * returned, i.e. @consumed is typically set to the number
4133 * of bytes already consumed and the next call to
4134 * skb_seq_read() will return the remaining part of the block.
4135 *
4136 * Note 1: The size of each block of data returned can be arbitrary,
4137 * this limitation is the cost for zerocopy sequential
4138 * reads of potentially non linear data.
4139 *
4140 * Note 2: Fragment lists within fragments are not implemented
4141 * at the moment, state->root_skb could be replaced with
4142 * a stack for this purpose.
4143 */
4144unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
4145 struct skb_seq_state *st)
4146{
4147 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
4148 skb_frag_t *frag;
4149
4150 if (unlikely(abs_offset >= st->upper_offset)) {
4151 if (st->frag_data) {
4152 kunmap_atomic(st->frag_data);
4153 st->frag_data = NULL;
4154 }
4155 return 0;
4156 }
4157
4158next_skb:
4159 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
4160
4161 if (abs_offset < block_limit && !st->frag_data) {
4162 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
4163 return block_limit - abs_offset;
4164 }
4165
4166 if (st->frag_idx == 0 && !st->frag_data)
4167 st->stepped_offset += skb_headlen(st->cur_skb);
4168
4169 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
4170 unsigned int pg_idx, pg_off, pg_sz;
4171
4172 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
4173
4174 pg_idx = 0;
4175 pg_off = skb_frag_off(frag);
4176 pg_sz = skb_frag_size(frag);
4177
4178 if (skb_frag_must_loop(skb_frag_page(frag))) {
4179 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT;
4180 pg_off = offset_in_page(pg_off + st->frag_off);
4181 pg_sz = min_t(unsigned int, pg_sz - st->frag_off,
4182 PAGE_SIZE - pg_off);
4183 }
4184
4185 block_limit = pg_sz + st->stepped_offset;
4186 if (abs_offset < block_limit) {
4187 if (!st->frag_data)
4188 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx);
4189
4190 *data = (u8 *)st->frag_data + pg_off +
4191 (abs_offset - st->stepped_offset);
4192
4193 return block_limit - abs_offset;
4194 }
4195
4196 if (st->frag_data) {
4197 kunmap_atomic(st->frag_data);
4198 st->frag_data = NULL;
4199 }
4200
4201 st->stepped_offset += pg_sz;
4202 st->frag_off += pg_sz;
4203 if (st->frag_off == skb_frag_size(frag)) {
4204 st->frag_off = 0;
4205 st->frag_idx++;
4206 }
4207 }
4208
4209 if (st->frag_data) {
4210 kunmap_atomic(st->frag_data);
4211 st->frag_data = NULL;
4212 }
4213
4214 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
4215 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
4216 st->frag_idx = 0;
4217 goto next_skb;
4218 } else if (st->cur_skb->next) {
4219 st->cur_skb = st->cur_skb->next;
4220 st->frag_idx = 0;
4221 goto next_skb;
4222 }
4223
4224 return 0;
4225}
4226EXPORT_SYMBOL(skb_seq_read);
4227
4228/**
4229 * skb_abort_seq_read - Abort a sequential read of skb data
4230 * @st: state variable
4231 *
4232 * Must be called if skb_seq_read() was not called until it
4233 * returned 0.
4234 */
4235void skb_abort_seq_read(struct skb_seq_state *st)
4236{
4237 if (st->frag_data)
4238 kunmap_atomic(st->frag_data);
4239}
4240EXPORT_SYMBOL(skb_abort_seq_read);
4241
4242#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
4243
4244static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
4245 struct ts_config *conf,
4246 struct ts_state *state)
4247{
4248 return skb_seq_read(offset, text, TS_SKB_CB(state));
4249}
4250
4251static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
4252{
4253 skb_abort_seq_read(TS_SKB_CB(state));
4254}
4255
4256/**
4257 * skb_find_text - Find a text pattern in skb data
4258 * @skb: the buffer to look in
4259 * @from: search offset
4260 * @to: search limit
4261 * @config: textsearch configuration
4262 *
4263 * Finds a pattern in the skb data according to the specified
4264 * textsearch configuration. Use textsearch_next() to retrieve
4265 * subsequent occurrences of the pattern. Returns the offset
4266 * to the first occurrence or UINT_MAX if no match was found.
4267 */
4268unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
4269 unsigned int to, struct ts_config *config)
4270{
4271 unsigned int patlen = config->ops->get_pattern_len(config);
4272 struct ts_state state;
4273 unsigned int ret;
4274
4275 BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb));
4276
4277 config->get_next_block = skb_ts_get_next_block;
4278 config->finish = skb_ts_finish;
4279
4280 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
4281
4282 ret = textsearch_find(config, &state);
4283 return (ret + patlen <= to - from ? ret : UINT_MAX);
4284}
4285EXPORT_SYMBOL(skb_find_text);
4286
4287int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
4288 int offset, size_t size, size_t max_frags)
4289{
4290 int i = skb_shinfo(skb)->nr_frags;
4291
4292 if (skb_can_coalesce(skb, i, page, offset)) {
4293 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
4294 } else if (i < max_frags) {
4295 skb_zcopy_downgrade_managed(skb);
4296 get_page(page);
4297 skb_fill_page_desc_noacc(skb, i, page, offset, size);
4298 } else {
4299 return -EMSGSIZE;
4300 }
4301
4302 return 0;
4303}
4304EXPORT_SYMBOL_GPL(skb_append_pagefrags);
4305
4306/**
4307 * skb_pull_rcsum - pull skb and update receive checksum
4308 * @skb: buffer to update
4309 * @len: length of data pulled
4310 *
4311 * This function performs an skb_pull on the packet and updates
4312 * the CHECKSUM_COMPLETE checksum. It should be used on
4313 * receive path processing instead of skb_pull unless you know
4314 * that the checksum difference is zero (e.g., a valid IP header)
4315 * or you are setting ip_summed to CHECKSUM_NONE.
4316 */
4317void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
4318{
4319 unsigned char *data = skb->data;
4320
4321 BUG_ON(len > skb->len);
4322 __skb_pull(skb, len);
4323 skb_postpull_rcsum(skb, data, len);
4324 return skb->data;
4325}
4326EXPORT_SYMBOL_GPL(skb_pull_rcsum);
4327
4328static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
4329{
4330 skb_frag_t head_frag;
4331 struct page *page;
4332
4333 page = virt_to_head_page(frag_skb->head);
4334 skb_frag_fill_page_desc(&head_frag, page, frag_skb->data -
4335 (unsigned char *)page_address(page),
4336 skb_headlen(frag_skb));
4337 return head_frag;
4338}
4339
4340struct sk_buff *skb_segment_list(struct sk_buff *skb,
4341 netdev_features_t features,
4342 unsigned int offset)
4343{
4344 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
4345 unsigned int tnl_hlen = skb_tnl_header_len(skb);
4346 unsigned int delta_truesize = 0;
4347 unsigned int delta_len = 0;
4348 struct sk_buff *tail = NULL;
4349 struct sk_buff *nskb, *tmp;
4350 int len_diff, err;
4351
4352 skb_push(skb, -skb_network_offset(skb) + offset);
4353
4354 /* Ensure the head is writeable before touching the shared info */
4355 err = skb_unclone(skb, GFP_ATOMIC);
4356 if (err)
4357 goto err_linearize;
4358
4359 skb_shinfo(skb)->frag_list = NULL;
4360
4361 while (list_skb) {
4362 nskb = list_skb;
4363 list_skb = list_skb->next;
4364
4365 err = 0;
4366 delta_truesize += nskb->truesize;
4367 if (skb_shared(nskb)) {
4368 tmp = skb_clone(nskb, GFP_ATOMIC);
4369 if (tmp) {
4370 consume_skb(nskb);
4371 nskb = tmp;
4372 err = skb_unclone(nskb, GFP_ATOMIC);
4373 } else {
4374 err = -ENOMEM;
4375 }
4376 }
4377
4378 if (!tail)
4379 skb->next = nskb;
4380 else
4381 tail->next = nskb;
4382
4383 if (unlikely(err)) {
4384 nskb->next = list_skb;
4385 goto err_linearize;
4386 }
4387
4388 tail = nskb;
4389
4390 delta_len += nskb->len;
4391
4392 skb_push(nskb, -skb_network_offset(nskb) + offset);
4393
4394 skb_release_head_state(nskb);
4395 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
4396 __copy_skb_header(nskb, skb);
4397
4398 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
4399 nskb->transport_header += len_diff;
4400 skb_copy_from_linear_data_offset(skb, -tnl_hlen,
4401 nskb->data - tnl_hlen,
4402 offset + tnl_hlen);
4403
4404 if (skb_needs_linearize(nskb, features) &&
4405 __skb_linearize(nskb))
4406 goto err_linearize;
4407 }
4408
4409 skb->truesize = skb->truesize - delta_truesize;
4410 skb->data_len = skb->data_len - delta_len;
4411 skb->len = skb->len - delta_len;
4412
4413 skb_gso_reset(skb);
4414
4415 skb->prev = tail;
4416
4417 if (skb_needs_linearize(skb, features) &&
4418 __skb_linearize(skb))
4419 goto err_linearize;
4420
4421 skb_get(skb);
4422
4423 return skb;
4424
4425err_linearize:
4426 kfree_skb_list(skb->next);
4427 skb->next = NULL;
4428 return ERR_PTR(-ENOMEM);
4429}
4430EXPORT_SYMBOL_GPL(skb_segment_list);
4431
4432/**
4433 * skb_segment - Perform protocol segmentation on skb.
4434 * @head_skb: buffer to segment
4435 * @features: features for the output path (see dev->features)
4436 *
4437 * This function performs segmentation on the given skb. It returns
4438 * a pointer to the first in a list of new skbs for the segments.
4439 * In case of error it returns ERR_PTR(err).
4440 */
4441struct sk_buff *skb_segment(struct sk_buff *head_skb,
4442 netdev_features_t features)
4443{
4444 struct sk_buff *segs = NULL;
4445 struct sk_buff *tail = NULL;
4446 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
4447 unsigned int mss = skb_shinfo(head_skb)->gso_size;
4448 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
4449 unsigned int offset = doffset;
4450 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
4451 unsigned int partial_segs = 0;
4452 unsigned int headroom;
4453 unsigned int len = head_skb->len;
4454 struct sk_buff *frag_skb;
4455 skb_frag_t *frag;
4456 __be16 proto;
4457 bool csum, sg;
4458 int err = -ENOMEM;
4459 int i = 0;
4460 int nfrags, pos;
4461
4462 if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
4463 mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
4464 struct sk_buff *check_skb;
4465
4466 for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
4467 if (skb_headlen(check_skb) && !check_skb->head_frag) {
4468 /* gso_size is untrusted, and we have a frag_list with
4469 * a linear non head_frag item.
4470 *
4471 * If head_skb's headlen does not fit requested gso_size,
4472 * it means that the frag_list members do NOT terminate
4473 * on exact gso_size boundaries. Hence we cannot perform
4474 * skb_frag_t page sharing. Therefore we must fallback to
4475 * copying the frag_list skbs; we do so by disabling SG.
4476 */
4477 features &= ~NETIF_F_SG;
4478 break;
4479 }
4480 }
4481 }
4482
4483 __skb_push(head_skb, doffset);
4484 proto = skb_network_protocol(head_skb, NULL);
4485 if (unlikely(!proto))
4486 return ERR_PTR(-EINVAL);
4487
4488 sg = !!(features & NETIF_F_SG);
4489 csum = !!can_checksum_protocol(features, proto);
4490
4491 if (sg && csum && (mss != GSO_BY_FRAGS)) {
4492 if (!(features & NETIF_F_GSO_PARTIAL)) {
4493 struct sk_buff *iter;
4494 unsigned int frag_len;
4495
4496 if (!list_skb ||
4497 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
4498 goto normal;
4499
4500 /* If we get here then all the required
4501 * GSO features except frag_list are supported.
4502 * Try to split the SKB to multiple GSO SKBs
4503 * with no frag_list.
4504 * Currently we can do that only when the buffers don't
4505 * have a linear part and all the buffers except
4506 * the last are of the same length.
4507 */
4508 frag_len = list_skb->len;
4509 skb_walk_frags(head_skb, iter) {
4510 if (frag_len != iter->len && iter->next)
4511 goto normal;
4512 if (skb_headlen(iter) && !iter->head_frag)
4513 goto normal;
4514
4515 len -= iter->len;
4516 }
4517
4518 if (len != frag_len)
4519 goto normal;
4520 }
4521
4522 /* GSO partial only requires that we trim off any excess that
4523 * doesn't fit into an MSS sized block, so take care of that
4524 * now.
4525 * Cap len to not accidentally hit GSO_BY_FRAGS.
4526 */
4527 partial_segs = min(len, GSO_BY_FRAGS - 1) / mss;
4528 if (partial_segs > 1)
4529 mss *= partial_segs;
4530 else
4531 partial_segs = 0;
4532 }
4533
4534normal:
4535 headroom = skb_headroom(head_skb);
4536 pos = skb_headlen(head_skb);
4537
4538 if (skb_orphan_frags(head_skb, GFP_ATOMIC))
4539 return ERR_PTR(-ENOMEM);
4540
4541 nfrags = skb_shinfo(head_skb)->nr_frags;
4542 frag = skb_shinfo(head_skb)->frags;
4543 frag_skb = head_skb;
4544
4545 do {
4546 struct sk_buff *nskb;
4547 skb_frag_t *nskb_frag;
4548 int hsize;
4549 int size;
4550
4551 if (unlikely(mss == GSO_BY_FRAGS)) {
4552 len = list_skb->len;
4553 } else {
4554 len = head_skb->len - offset;
4555 if (len > mss)
4556 len = mss;
4557 }
4558
4559 hsize = skb_headlen(head_skb) - offset;
4560
4561 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) &&
4562 (skb_headlen(list_skb) == len || sg)) {
4563 BUG_ON(skb_headlen(list_skb) > len);
4564
4565 nskb = skb_clone(list_skb, GFP_ATOMIC);
4566 if (unlikely(!nskb))
4567 goto err;
4568
4569 i = 0;
4570 nfrags = skb_shinfo(list_skb)->nr_frags;
4571 frag = skb_shinfo(list_skb)->frags;
4572 frag_skb = list_skb;
4573 pos += skb_headlen(list_skb);
4574
4575 while (pos < offset + len) {
4576 BUG_ON(i >= nfrags);
4577
4578 size = skb_frag_size(frag);
4579 if (pos + size > offset + len)
4580 break;
4581
4582 i++;
4583 pos += size;
4584 frag++;
4585 }
4586
4587 list_skb = list_skb->next;
4588
4589 if (unlikely(pskb_trim(nskb, len))) {
4590 kfree_skb(nskb);
4591 goto err;
4592 }
4593
4594 hsize = skb_end_offset(nskb);
4595 if (skb_cow_head(nskb, doffset + headroom)) {
4596 kfree_skb(nskb);
4597 goto err;
4598 }
4599
4600 nskb->truesize += skb_end_offset(nskb) - hsize;
4601 skb_release_head_state(nskb);
4602 __skb_push(nskb, doffset);
4603 } else {
4604 if (hsize < 0)
4605 hsize = 0;
4606 if (hsize > len || !sg)
4607 hsize = len;
4608
4609 nskb = __alloc_skb(hsize + doffset + headroom,
4610 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
4611 NUMA_NO_NODE);
4612
4613 if (unlikely(!nskb))
4614 goto err;
4615
4616 skb_reserve(nskb, headroom);
4617 __skb_put(nskb, doffset);
4618 }
4619
4620 if (segs)
4621 tail->next = nskb;
4622 else
4623 segs = nskb;
4624 tail = nskb;
4625
4626 __copy_skb_header(nskb, head_skb);
4627
4628 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
4629 skb_reset_mac_len(nskb);
4630
4631 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
4632 nskb->data - tnl_hlen,
4633 doffset + tnl_hlen);
4634
4635 if (nskb->len == len + doffset)
4636 goto perform_csum_check;
4637
4638 if (!sg) {
4639 if (!csum) {
4640 if (!nskb->remcsum_offload)
4641 nskb->ip_summed = CHECKSUM_NONE;
4642 SKB_GSO_CB(nskb)->csum =
4643 skb_copy_and_csum_bits(head_skb, offset,
4644 skb_put(nskb,
4645 len),
4646 len);
4647 SKB_GSO_CB(nskb)->csum_start =
4648 skb_headroom(nskb) + doffset;
4649 } else {
4650 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len))
4651 goto err;
4652 }
4653 continue;
4654 }
4655
4656 nskb_frag = skb_shinfo(nskb)->frags;
4657
4658 skb_copy_from_linear_data_offset(head_skb, offset,
4659 skb_put(nskb, hsize), hsize);
4660
4661 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &
4662 SKBFL_SHARED_FRAG;
4663
4664 if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
4665 goto err;
4666
4667 while (pos < offset + len) {
4668 if (i >= nfrags) {
4669 if (skb_orphan_frags(list_skb, GFP_ATOMIC) ||
4670 skb_zerocopy_clone(nskb, list_skb,
4671 GFP_ATOMIC))
4672 goto err;
4673
4674 i = 0;
4675 nfrags = skb_shinfo(list_skb)->nr_frags;
4676 frag = skb_shinfo(list_skb)->frags;
4677 frag_skb = list_skb;
4678 if (!skb_headlen(list_skb)) {
4679 BUG_ON(!nfrags);
4680 } else {
4681 BUG_ON(!list_skb->head_frag);
4682
4683 /* to make room for head_frag. */
4684 i--;
4685 frag--;
4686 }
4687
4688 list_skb = list_skb->next;
4689 }
4690
4691 if (unlikely(skb_shinfo(nskb)->nr_frags >=
4692 MAX_SKB_FRAGS)) {
4693 net_warn_ratelimited(
4694 "skb_segment: too many frags: %u %u\n",
4695 pos, mss);
4696 err = -EINVAL;
4697 goto err;
4698 }
4699
4700 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
4701 __skb_frag_ref(nskb_frag);
4702 size = skb_frag_size(nskb_frag);
4703
4704 if (pos < offset) {
4705 skb_frag_off_add(nskb_frag, offset - pos);
4706 skb_frag_size_sub(nskb_frag, offset - pos);
4707 }
4708
4709 skb_shinfo(nskb)->nr_frags++;
4710
4711 if (pos + size <= offset + len) {
4712 i++;
4713 frag++;
4714 pos += size;
4715 } else {
4716 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
4717 goto skip_fraglist;
4718 }
4719
4720 nskb_frag++;
4721 }
4722
4723skip_fraglist:
4724 nskb->data_len = len - hsize;
4725 nskb->len += nskb->data_len;
4726 nskb->truesize += nskb->data_len;
4727
4728perform_csum_check:
4729 if (!csum) {
4730 if (skb_has_shared_frag(nskb) &&
4731 __skb_linearize(nskb))
4732 goto err;
4733
4734 if (!nskb->remcsum_offload)
4735 nskb->ip_summed = CHECKSUM_NONE;
4736 SKB_GSO_CB(nskb)->csum =
4737 skb_checksum(nskb, doffset,
4738 nskb->len - doffset, 0);
4739 SKB_GSO_CB(nskb)->csum_start =
4740 skb_headroom(nskb) + doffset;
4741 }
4742 } while ((offset += len) < head_skb->len);
4743
4744 /* Some callers want to get the end of the list.
4745 * Put it in segs->prev to avoid walking the list.
4746 * (see validate_xmit_skb_list() for example)
4747 */
4748 segs->prev = tail;
4749
4750 if (partial_segs) {
4751 struct sk_buff *iter;
4752 int type = skb_shinfo(head_skb)->gso_type;
4753 unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
4754
4755 /* Update type to add partial and then remove dodgy if set */
4756 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
4757 type &= ~SKB_GSO_DODGY;
4758
4759 /* Update GSO info and prepare to start updating headers on
4760 * our way back down the stack of protocols.
4761 */
4762 for (iter = segs; iter; iter = iter->next) {
4763 skb_shinfo(iter)->gso_size = gso_size;
4764 skb_shinfo(iter)->gso_segs = partial_segs;
4765 skb_shinfo(iter)->gso_type = type;
4766 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
4767 }
4768
4769 if (tail->len - doffset <= gso_size)
4770 skb_shinfo(tail)->gso_size = 0;
4771 else if (tail != segs)
4772 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
4773 }
4774
4775 /* Following permits correct backpressure, for protocols
4776 * using skb_set_owner_w().
4777 * Idea is to tranfert ownership from head_skb to last segment.
4778 */
4779 if (head_skb->destructor == sock_wfree) {
4780 swap(tail->truesize, head_skb->truesize);
4781 swap(tail->destructor, head_skb->destructor);
4782 swap(tail->sk, head_skb->sk);
4783 }
4784 return segs;
4785
4786err:
4787 kfree_skb_list(segs);
4788 return ERR_PTR(err);
4789}
4790EXPORT_SYMBOL_GPL(skb_segment);
4791
4792#ifdef CONFIG_SKB_EXTENSIONS
4793#define SKB_EXT_ALIGN_VALUE 8
4794#define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
4795
4796static const u8 skb_ext_type_len[] = {
4797#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4798 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
4799#endif
4800#ifdef CONFIG_XFRM
4801 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
4802#endif
4803#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4804 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext),
4805#endif
4806#if IS_ENABLED(CONFIG_MPTCP)
4807 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
4808#endif
4809#if IS_ENABLED(CONFIG_MCTP_FLOWS)
4810 [SKB_EXT_MCTP] = SKB_EXT_CHUNKSIZEOF(struct mctp_flow),
4811#endif
4812};
4813
4814static __always_inline unsigned int skb_ext_total_length(void)
4815{
4816 unsigned int l = SKB_EXT_CHUNKSIZEOF(struct skb_ext);
4817 int i;
4818
4819 for (i = 0; i < ARRAY_SIZE(skb_ext_type_len); i++)
4820 l += skb_ext_type_len[i];
4821
4822 return l;
4823}
4824
4825static void skb_extensions_init(void)
4826{
4827 BUILD_BUG_ON(SKB_EXT_NUM >= 8);
4828 BUILD_BUG_ON(skb_ext_total_length() > 255);
4829
4830 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
4831 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
4832 0,
4833 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4834 NULL);
4835}
4836#else
4837static void skb_extensions_init(void) {}
4838#endif
4839
4840/* The SKB kmem_cache slab is critical for network performance. Never
4841 * merge/alias the slab with similar sized objects. This avoids fragmentation
4842 * that hurts performance of kmem_cache_{alloc,free}_bulk APIs.
4843 */
4844#ifndef CONFIG_SLUB_TINY
4845#define FLAG_SKB_NO_MERGE SLAB_NO_MERGE
4846#else /* CONFIG_SLUB_TINY - simple loop in kmem_cache_alloc_bulk */
4847#define FLAG_SKB_NO_MERGE 0
4848#endif
4849
4850void __init skb_init(void)
4851{
4852 skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
4853 sizeof(struct sk_buff),
4854 0,
4855 SLAB_HWCACHE_ALIGN|SLAB_PANIC|
4856 FLAG_SKB_NO_MERGE,
4857 offsetof(struct sk_buff, cb),
4858 sizeof_field(struct sk_buff, cb),
4859 NULL);
4860 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
4861 sizeof(struct sk_buff_fclones),
4862 0,
4863 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4864 NULL);
4865 /* usercopy should only access first SKB_SMALL_HEAD_HEADROOM bytes.
4866 * struct skb_shared_info is located at the end of skb->head,
4867 * and should not be copied to/from user.
4868 */
4869 skb_small_head_cache = kmem_cache_create_usercopy("skbuff_small_head",
4870 SKB_SMALL_HEAD_CACHE_SIZE,
4871 0,
4872 SLAB_HWCACHE_ALIGN | SLAB_PANIC,
4873 0,
4874 SKB_SMALL_HEAD_HEADROOM,
4875 NULL);
4876 skb_extensions_init();
4877}
4878
4879static int
4880__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
4881 unsigned int recursion_level)
4882{
4883 int start = skb_headlen(skb);
4884 int i, copy = start - offset;
4885 struct sk_buff *frag_iter;
4886 int elt = 0;
4887
4888 if (unlikely(recursion_level >= 24))
4889 return -EMSGSIZE;
4890
4891 if (copy > 0) {
4892 if (copy > len)
4893 copy = len;
4894 sg_set_buf(sg, skb->data + offset, copy);
4895 elt++;
4896 if ((len -= copy) == 0)
4897 return elt;
4898 offset += copy;
4899 }
4900
4901 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4902 int end;
4903
4904 WARN_ON(start > offset + len);
4905
4906 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
4907 if ((copy = end - offset) > 0) {
4908 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4909 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4910 return -EMSGSIZE;
4911
4912 if (copy > len)
4913 copy = len;
4914 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
4915 skb_frag_off(frag) + offset - start);
4916 elt++;
4917 if (!(len -= copy))
4918 return elt;
4919 offset += copy;
4920 }
4921 start = end;
4922 }
4923
4924 skb_walk_frags(skb, frag_iter) {
4925 int end, ret;
4926
4927 WARN_ON(start > offset + len);
4928
4929 end = start + frag_iter->len;
4930 if ((copy = end - offset) > 0) {
4931 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4932 return -EMSGSIZE;
4933
4934 if (copy > len)
4935 copy = len;
4936 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
4937 copy, recursion_level + 1);
4938 if (unlikely(ret < 0))
4939 return ret;
4940 elt += ret;
4941 if ((len -= copy) == 0)
4942 return elt;
4943 offset += copy;
4944 }
4945 start = end;
4946 }
4947 BUG_ON(len);
4948 return elt;
4949}
4950
4951/**
4952 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4953 * @skb: Socket buffer containing the buffers to be mapped
4954 * @sg: The scatter-gather list to map into
4955 * @offset: The offset into the buffer's contents to start mapping
4956 * @len: Length of buffer space to be mapped
4957 *
4958 * Fill the specified scatter-gather list with mappings/pointers into a
4959 * region of the buffer space attached to a socket buffer. Returns either
4960 * the number of scatterlist items used, or -EMSGSIZE if the contents
4961 * could not fit.
4962 */
4963int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4964{
4965 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4966
4967 if (nsg <= 0)
4968 return nsg;
4969
4970 sg_mark_end(&sg[nsg - 1]);
4971
4972 return nsg;
4973}
4974EXPORT_SYMBOL_GPL(skb_to_sgvec);
4975
4976/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4977 * sglist without mark the sg which contain last skb data as the end.
4978 * So the caller can mannipulate sg list as will when padding new data after
4979 * the first call without calling sg_unmark_end to expend sg list.
4980 *
4981 * Scenario to use skb_to_sgvec_nomark:
4982 * 1. sg_init_table
4983 * 2. skb_to_sgvec_nomark(payload1)
4984 * 3. skb_to_sgvec_nomark(payload2)
4985 *
4986 * This is equivalent to:
4987 * 1. sg_init_table
4988 * 2. skb_to_sgvec(payload1)
4989 * 3. sg_unmark_end
4990 * 4. skb_to_sgvec(payload2)
4991 *
4992 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4993 * is more preferable.
4994 */
4995int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
4996 int offset, int len)
4997{
4998 return __skb_to_sgvec(skb, sg, offset, len, 0);
4999}
5000EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
5001
5002
5003
5004/**
5005 * skb_cow_data - Check that a socket buffer's data buffers are writable
5006 * @skb: The socket buffer to check.
5007 * @tailbits: Amount of trailing space to be added
5008 * @trailer: Returned pointer to the skb where the @tailbits space begins
5009 *
5010 * Make sure that the data buffers attached to a socket buffer are
5011 * writable. If they are not, private copies are made of the data buffers
5012 * and the socket buffer is set to use these instead.
5013 *
5014 * If @tailbits is given, make sure that there is space to write @tailbits
5015 * bytes of data beyond current end of socket buffer. @trailer will be
5016 * set to point to the skb in which this space begins.
5017 *
5018 * The number of scatterlist elements required to completely map the
5019 * COW'd and extended socket buffer will be returned.
5020 */
5021int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
5022{
5023 int copyflag;
5024 int elt;
5025 struct sk_buff *skb1, **skb_p;
5026
5027 /* If skb is cloned or its head is paged, reallocate
5028 * head pulling out all the pages (pages are considered not writable
5029 * at the moment even if they are anonymous).
5030 */
5031 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
5032 !__pskb_pull_tail(skb, __skb_pagelen(skb)))
5033 return -ENOMEM;
5034
5035 /* Easy case. Most of packets will go this way. */
5036 if (!skb_has_frag_list(skb)) {
5037 /* A little of trouble, not enough of space for trailer.
5038 * This should not happen, when stack is tuned to generate
5039 * good frames. OK, on miss we reallocate and reserve even more
5040 * space, 128 bytes is fair. */
5041
5042 if (skb_tailroom(skb) < tailbits &&
5043 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
5044 return -ENOMEM;
5045
5046 /* Voila! */
5047 *trailer = skb;
5048 return 1;
5049 }
5050
5051 /* Misery. We are in troubles, going to mincer fragments... */
5052
5053 elt = 1;
5054 skb_p = &skb_shinfo(skb)->frag_list;
5055 copyflag = 0;
5056
5057 while ((skb1 = *skb_p) != NULL) {
5058 int ntail = 0;
5059
5060 /* The fragment is partially pulled by someone,
5061 * this can happen on input. Copy it and everything
5062 * after it. */
5063
5064 if (skb_shared(skb1))
5065 copyflag = 1;
5066
5067 /* If the skb is the last, worry about trailer. */
5068
5069 if (skb1->next == NULL && tailbits) {
5070 if (skb_shinfo(skb1)->nr_frags ||
5071 skb_has_frag_list(skb1) ||
5072 skb_tailroom(skb1) < tailbits)
5073 ntail = tailbits + 128;
5074 }
5075
5076 if (copyflag ||
5077 skb_cloned(skb1) ||
5078 ntail ||
5079 skb_shinfo(skb1)->nr_frags ||
5080 skb_has_frag_list(skb1)) {
5081 struct sk_buff *skb2;
5082
5083 /* Fuck, we are miserable poor guys... */
5084 if (ntail == 0)
5085 skb2 = skb_copy(skb1, GFP_ATOMIC);
5086 else
5087 skb2 = skb_copy_expand(skb1,
5088 skb_headroom(skb1),
5089 ntail,
5090 GFP_ATOMIC);
5091 if (unlikely(skb2 == NULL))
5092 return -ENOMEM;
5093
5094 if (skb1->sk)
5095 skb_set_owner_w(skb2, skb1->sk);
5096
5097 /* Looking around. Are we still alive?
5098 * OK, link new skb, drop old one */
5099
5100 skb2->next = skb1->next;
5101 *skb_p = skb2;
5102 kfree_skb(skb1);
5103 skb1 = skb2;
5104 }
5105 elt++;
5106 *trailer = skb1;
5107 skb_p = &skb1->next;
5108 }
5109
5110 return elt;
5111}
5112EXPORT_SYMBOL_GPL(skb_cow_data);
5113
5114static void sock_rmem_free(struct sk_buff *skb)
5115{
5116 struct sock *sk = skb->sk;
5117
5118 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
5119}
5120
5121static void skb_set_err_queue(struct sk_buff *skb)
5122{
5123 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
5124 * So, it is safe to (mis)use it to mark skbs on the error queue.
5125 */
5126 skb->pkt_type = PACKET_OUTGOING;
5127 BUILD_BUG_ON(PACKET_OUTGOING == 0);
5128}
5129
5130/*
5131 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
5132 */
5133int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
5134{
5135 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
5136 (unsigned int)READ_ONCE(sk->sk_rcvbuf))
5137 return -ENOMEM;
5138
5139 skb_orphan(skb);
5140 skb->sk = sk;
5141 skb->destructor = sock_rmem_free;
5142 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
5143 skb_set_err_queue(skb);
5144
5145 /* before exiting rcu section, make sure dst is refcounted */
5146 skb_dst_force(skb);
5147
5148 skb_queue_tail(&sk->sk_error_queue, skb);
5149 if (!sock_flag(sk, SOCK_DEAD))
5150 sk_error_report(sk);
5151 return 0;
5152}
5153EXPORT_SYMBOL(sock_queue_err_skb);
5154
5155static bool is_icmp_err_skb(const struct sk_buff *skb)
5156{
5157 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
5158 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
5159}
5160
5161struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
5162{
5163 struct sk_buff_head *q = &sk->sk_error_queue;
5164 struct sk_buff *skb, *skb_next = NULL;
5165 bool icmp_next = false;
5166 unsigned long flags;
5167
5168 if (skb_queue_empty_lockless(q))
5169 return NULL;
5170
5171 spin_lock_irqsave(&q->lock, flags);
5172 skb = __skb_dequeue(q);
5173 if (skb && (skb_next = skb_peek(q))) {
5174 icmp_next = is_icmp_err_skb(skb_next);
5175 if (icmp_next)
5176 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
5177 }
5178 spin_unlock_irqrestore(&q->lock, flags);
5179
5180 if (is_icmp_err_skb(skb) && !icmp_next)
5181 sk->sk_err = 0;
5182
5183 if (skb_next)
5184 sk_error_report(sk);
5185
5186 return skb;
5187}
5188EXPORT_SYMBOL(sock_dequeue_err_skb);
5189
5190/**
5191 * skb_clone_sk - create clone of skb, and take reference to socket
5192 * @skb: the skb to clone
5193 *
5194 * This function creates a clone of a buffer that holds a reference on
5195 * sk_refcnt. Buffers created via this function are meant to be
5196 * returned using sock_queue_err_skb, or free via kfree_skb.
5197 *
5198 * When passing buffers allocated with this function to sock_queue_err_skb
5199 * it is necessary to wrap the call with sock_hold/sock_put in order to
5200 * prevent the socket from being released prior to being enqueued on
5201 * the sk_error_queue.
5202 */
5203struct sk_buff *skb_clone_sk(struct sk_buff *skb)
5204{
5205 struct sock *sk = skb->sk;
5206 struct sk_buff *clone;
5207
5208 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
5209 return NULL;
5210
5211 clone = skb_clone(skb, GFP_ATOMIC);
5212 if (!clone) {
5213 sock_put(sk);
5214 return NULL;
5215 }
5216
5217 clone->sk = sk;
5218 clone->destructor = sock_efree;
5219
5220 return clone;
5221}
5222EXPORT_SYMBOL(skb_clone_sk);
5223
5224static void __skb_complete_tx_timestamp(struct sk_buff *skb,
5225 struct sock *sk,
5226 int tstype,
5227 bool opt_stats)
5228{
5229 struct sock_exterr_skb *serr;
5230 int err;
5231
5232 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
5233
5234 serr = SKB_EXT_ERR(skb);
5235 memset(serr, 0, sizeof(*serr));
5236 serr->ee.ee_errno = ENOMSG;
5237 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
5238 serr->ee.ee_info = tstype;
5239 serr->opt_stats = opt_stats;
5240 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
5241 if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {
5242 serr->ee.ee_data = skb_shinfo(skb)->tskey;
5243 if (sk_is_tcp(sk))
5244 serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
5245 }
5246
5247 err = sock_queue_err_skb(sk, skb);
5248
5249 if (err)
5250 kfree_skb(skb);
5251}
5252
5253static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
5254{
5255 bool ret;
5256
5257 if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
5258 return true;
5259
5260 read_lock_bh(&sk->sk_callback_lock);
5261 ret = sk->sk_socket && sk->sk_socket->file &&
5262 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
5263 read_unlock_bh(&sk->sk_callback_lock);
5264 return ret;
5265}
5266
5267void skb_complete_tx_timestamp(struct sk_buff *skb,
5268 struct skb_shared_hwtstamps *hwtstamps)
5269{
5270 struct sock *sk = skb->sk;
5271
5272 if (!skb_may_tx_timestamp(sk, false))
5273 goto err;
5274
5275 /* Take a reference to prevent skb_orphan() from freeing the socket,
5276 * but only if the socket refcount is not zero.
5277 */
5278 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
5279 *skb_hwtstamps(skb) = *hwtstamps;
5280 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
5281 sock_put(sk);
5282 return;
5283 }
5284
5285err:
5286 kfree_skb(skb);
5287}
5288EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
5289
5290void __skb_tstamp_tx(struct sk_buff *orig_skb,
5291 const struct sk_buff *ack_skb,
5292 struct skb_shared_hwtstamps *hwtstamps,
5293 struct sock *sk, int tstype)
5294{
5295 struct sk_buff *skb;
5296 bool tsonly, opt_stats = false;
5297 u32 tsflags;
5298
5299 if (!sk)
5300 return;
5301
5302 tsflags = READ_ONCE(sk->sk_tsflags);
5303 if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
5304 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
5305 return;
5306
5307 tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
5308 if (!skb_may_tx_timestamp(sk, tsonly))
5309 return;
5310
5311 if (tsonly) {
5312#ifdef CONFIG_INET
5313 if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
5314 sk_is_tcp(sk)) {
5315 skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
5316 ack_skb);
5317 opt_stats = true;
5318 } else
5319#endif
5320 skb = alloc_skb(0, GFP_ATOMIC);
5321 } else {
5322 skb = skb_clone(orig_skb, GFP_ATOMIC);
5323
5324 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) {
5325 kfree_skb(skb);
5326 return;
5327 }
5328 }
5329 if (!skb)
5330 return;
5331
5332 if (tsonly) {
5333 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
5334 SKBTX_ANY_TSTAMP;
5335 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
5336 }
5337
5338 if (hwtstamps)
5339 *skb_hwtstamps(skb) = *hwtstamps;
5340 else
5341 __net_timestamp(skb);
5342
5343 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
5344}
5345EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
5346
5347void skb_tstamp_tx(struct sk_buff *orig_skb,
5348 struct skb_shared_hwtstamps *hwtstamps)
5349{
5350 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk,
5351 SCM_TSTAMP_SND);
5352}
5353EXPORT_SYMBOL_GPL(skb_tstamp_tx);
5354
5355#ifdef CONFIG_WIRELESS
5356void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
5357{
5358 struct sock *sk = skb->sk;
5359 struct sock_exterr_skb *serr;
5360 int err = 1;
5361
5362 skb->wifi_acked_valid = 1;
5363 skb->wifi_acked = acked;
5364
5365 serr = SKB_EXT_ERR(skb);
5366 memset(serr, 0, sizeof(*serr));
5367 serr->ee.ee_errno = ENOMSG;
5368 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
5369
5370 /* Take a reference to prevent skb_orphan() from freeing the socket,
5371 * but only if the socket refcount is not zero.
5372 */
5373 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
5374 err = sock_queue_err_skb(sk, skb);
5375 sock_put(sk);
5376 }
5377 if (err)
5378 kfree_skb(skb);
5379}
5380EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
5381#endif /* CONFIG_WIRELESS */
5382
5383/**
5384 * skb_partial_csum_set - set up and verify partial csum values for packet
5385 * @skb: the skb to set
5386 * @start: the number of bytes after skb->data to start checksumming.
5387 * @off: the offset from start to place the checksum.
5388 *
5389 * For untrusted partially-checksummed packets, we need to make sure the values
5390 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
5391 *
5392 * This function checks and sets those values and skb->ip_summed: if this
5393 * returns false you should drop the packet.
5394 */
5395bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
5396{
5397 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
5398 u32 csum_start = skb_headroom(skb) + (u32)start;
5399
5400 if (unlikely(csum_start >= U16_MAX || csum_end > skb_headlen(skb))) {
5401 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
5402 start, off, skb_headroom(skb), skb_headlen(skb));
5403 return false;
5404 }
5405 skb->ip_summed = CHECKSUM_PARTIAL;
5406 skb->csum_start = csum_start;
5407 skb->csum_offset = off;
5408 skb->transport_header = csum_start;
5409 return true;
5410}
5411EXPORT_SYMBOL_GPL(skb_partial_csum_set);
5412
5413static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
5414 unsigned int max)
5415{
5416 if (skb_headlen(skb) >= len)
5417 return 0;
5418
5419 /* If we need to pullup then pullup to the max, so we
5420 * won't need to do it again.
5421 */
5422 if (max > skb->len)
5423 max = skb->len;
5424
5425 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
5426 return -ENOMEM;
5427
5428 if (skb_headlen(skb) < len)
5429 return -EPROTO;
5430
5431 return 0;
5432}
5433
5434#define MAX_TCP_HDR_LEN (15 * 4)
5435
5436static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
5437 typeof(IPPROTO_IP) proto,
5438 unsigned int off)
5439{
5440 int err;
5441
5442 switch (proto) {
5443 case IPPROTO_TCP:
5444 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
5445 off + MAX_TCP_HDR_LEN);
5446 if (!err && !skb_partial_csum_set(skb, off,
5447 offsetof(struct tcphdr,
5448 check)))
5449 err = -EPROTO;
5450 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
5451
5452 case IPPROTO_UDP:
5453 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
5454 off + sizeof(struct udphdr));
5455 if (!err && !skb_partial_csum_set(skb, off,
5456 offsetof(struct udphdr,
5457 check)))
5458 err = -EPROTO;
5459 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
5460 }
5461
5462 return ERR_PTR(-EPROTO);
5463}
5464
5465/* This value should be large enough to cover a tagged ethernet header plus
5466 * maximally sized IP and TCP or UDP headers.
5467 */
5468#define MAX_IP_HDR_LEN 128
5469
5470static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
5471{
5472 unsigned int off;
5473 bool fragment;
5474 __sum16 *csum;
5475 int err;
5476
5477 fragment = false;
5478
5479 err = skb_maybe_pull_tail(skb,
5480 sizeof(struct iphdr),
5481 MAX_IP_HDR_LEN);
5482 if (err < 0)
5483 goto out;
5484
5485 if (ip_is_fragment(ip_hdr(skb)))
5486 fragment = true;
5487
5488 off = ip_hdrlen(skb);
5489
5490 err = -EPROTO;
5491
5492 if (fragment)
5493 goto out;
5494
5495 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
5496 if (IS_ERR(csum))
5497 return PTR_ERR(csum);
5498
5499 if (recalculate)
5500 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
5501 ip_hdr(skb)->daddr,
5502 skb->len - off,
5503 ip_hdr(skb)->protocol, 0);
5504 err = 0;
5505
5506out:
5507 return err;
5508}
5509
5510/* This value should be large enough to cover a tagged ethernet header plus
5511 * an IPv6 header, all options, and a maximal TCP or UDP header.
5512 */
5513#define MAX_IPV6_HDR_LEN 256
5514
5515#define OPT_HDR(type, skb, off) \
5516 (type *)(skb_network_header(skb) + (off))
5517
5518static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
5519{
5520 int err;
5521 u8 nexthdr;
5522 unsigned int off;
5523 unsigned int len;
5524 bool fragment;
5525 bool done;
5526 __sum16 *csum;
5527
5528 fragment = false;
5529 done = false;
5530
5531 off = sizeof(struct ipv6hdr);
5532
5533 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
5534 if (err < 0)
5535 goto out;
5536
5537 nexthdr = ipv6_hdr(skb)->nexthdr;
5538
5539 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
5540 while (off <= len && !done) {
5541 switch (nexthdr) {
5542 case IPPROTO_DSTOPTS:
5543 case IPPROTO_HOPOPTS:
5544 case IPPROTO_ROUTING: {
5545 struct ipv6_opt_hdr *hp;
5546
5547 err = skb_maybe_pull_tail(skb,
5548 off +
5549 sizeof(struct ipv6_opt_hdr),
5550 MAX_IPV6_HDR_LEN);
5551 if (err < 0)
5552 goto out;
5553
5554 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
5555 nexthdr = hp->nexthdr;
5556 off += ipv6_optlen(hp);
5557 break;
5558 }
5559 case IPPROTO_AH: {
5560 struct ip_auth_hdr *hp;
5561
5562 err = skb_maybe_pull_tail(skb,
5563 off +
5564 sizeof(struct ip_auth_hdr),
5565 MAX_IPV6_HDR_LEN);
5566 if (err < 0)
5567 goto out;
5568
5569 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
5570 nexthdr = hp->nexthdr;
5571 off += ipv6_authlen(hp);
5572 break;
5573 }
5574 case IPPROTO_FRAGMENT: {
5575 struct frag_hdr *hp;
5576
5577 err = skb_maybe_pull_tail(skb,
5578 off +
5579 sizeof(struct frag_hdr),
5580 MAX_IPV6_HDR_LEN);
5581 if (err < 0)
5582 goto out;
5583
5584 hp = OPT_HDR(struct frag_hdr, skb, off);
5585
5586 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
5587 fragment = true;
5588
5589 nexthdr = hp->nexthdr;
5590 off += sizeof(struct frag_hdr);
5591 break;
5592 }
5593 default:
5594 done = true;
5595 break;
5596 }
5597 }
5598
5599 err = -EPROTO;
5600
5601 if (!done || fragment)
5602 goto out;
5603
5604 csum = skb_checksum_setup_ip(skb, nexthdr, off);
5605 if (IS_ERR(csum))
5606 return PTR_ERR(csum);
5607
5608 if (recalculate)
5609 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5610 &ipv6_hdr(skb)->daddr,
5611 skb->len - off, nexthdr, 0);
5612 err = 0;
5613
5614out:
5615 return err;
5616}
5617
5618/**
5619 * skb_checksum_setup - set up partial checksum offset
5620 * @skb: the skb to set up
5621 * @recalculate: if true the pseudo-header checksum will be recalculated
5622 */
5623int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
5624{
5625 int err;
5626
5627 switch (skb->protocol) {
5628 case htons(ETH_P_IP):
5629 err = skb_checksum_setup_ipv4(skb, recalculate);
5630 break;
5631
5632 case htons(ETH_P_IPV6):
5633 err = skb_checksum_setup_ipv6(skb, recalculate);
5634 break;
5635
5636 default:
5637 err = -EPROTO;
5638 break;
5639 }
5640
5641 return err;
5642}
5643EXPORT_SYMBOL(skb_checksum_setup);
5644
5645/**
5646 * skb_checksum_maybe_trim - maybe trims the given skb
5647 * @skb: the skb to check
5648 * @transport_len: the data length beyond the network header
5649 *
5650 * Checks whether the given skb has data beyond the given transport length.
5651 * If so, returns a cloned skb trimmed to this transport length.
5652 * Otherwise returns the provided skb. Returns NULL in error cases
5653 * (e.g. transport_len exceeds skb length or out-of-memory).
5654 *
5655 * Caller needs to set the skb transport header and free any returned skb if it
5656 * differs from the provided skb.
5657 */
5658static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
5659 unsigned int transport_len)
5660{
5661 struct sk_buff *skb_chk;
5662 unsigned int len = skb_transport_offset(skb) + transport_len;
5663 int ret;
5664
5665 if (skb->len < len)
5666 return NULL;
5667 else if (skb->len == len)
5668 return skb;
5669
5670 skb_chk = skb_clone(skb, GFP_ATOMIC);
5671 if (!skb_chk)
5672 return NULL;
5673
5674 ret = pskb_trim_rcsum(skb_chk, len);
5675 if (ret) {
5676 kfree_skb(skb_chk);
5677 return NULL;
5678 }
5679
5680 return skb_chk;
5681}
5682
5683/**
5684 * skb_checksum_trimmed - validate checksum of an skb
5685 * @skb: the skb to check
5686 * @transport_len: the data length beyond the network header
5687 * @skb_chkf: checksum function to use
5688 *
5689 * Applies the given checksum function skb_chkf to the provided skb.
5690 * Returns a checked and maybe trimmed skb. Returns NULL on error.
5691 *
5692 * If the skb has data beyond the given transport length, then a
5693 * trimmed & cloned skb is checked and returned.
5694 *
5695 * Caller needs to set the skb transport header and free any returned skb if it
5696 * differs from the provided skb.
5697 */
5698struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
5699 unsigned int transport_len,
5700 __sum16(*skb_chkf)(struct sk_buff *skb))
5701{
5702 struct sk_buff *skb_chk;
5703 unsigned int offset = skb_transport_offset(skb);
5704 __sum16 ret;
5705
5706 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
5707 if (!skb_chk)
5708 goto err;
5709
5710 if (!pskb_may_pull(skb_chk, offset))
5711 goto err;
5712
5713 skb_pull_rcsum(skb_chk, offset);
5714 ret = skb_chkf(skb_chk);
5715 skb_push_rcsum(skb_chk, offset);
5716
5717 if (ret)
5718 goto err;
5719
5720 return skb_chk;
5721
5722err:
5723 if (skb_chk && skb_chk != skb)
5724 kfree_skb(skb_chk);
5725
5726 return NULL;
5727
5728}
5729EXPORT_SYMBOL(skb_checksum_trimmed);
5730
5731void __skb_warn_lro_forwarding(const struct sk_buff *skb)
5732{
5733 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
5734 skb->dev->name);
5735}
5736EXPORT_SYMBOL(__skb_warn_lro_forwarding);
5737
5738void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
5739{
5740 if (head_stolen) {
5741 skb_release_head_state(skb);
5742 kmem_cache_free(skbuff_cache, skb);
5743 } else {
5744 __kfree_skb(skb);
5745 }
5746}
5747EXPORT_SYMBOL(kfree_skb_partial);
5748
5749/**
5750 * skb_try_coalesce - try to merge skb to prior one
5751 * @to: prior buffer
5752 * @from: buffer to add
5753 * @fragstolen: pointer to boolean
5754 * @delta_truesize: how much more was allocated than was requested
5755 */
5756bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
5757 bool *fragstolen, int *delta_truesize)
5758{
5759 struct skb_shared_info *to_shinfo, *from_shinfo;
5760 int i, delta, len = from->len;
5761
5762 *fragstolen = false;
5763
5764 if (skb_cloned(to))
5765 return false;
5766
5767 /* In general, avoid mixing page_pool and non-page_pool allocated
5768 * pages within the same SKB. Additionally avoid dealing with clones
5769 * with page_pool pages, in case the SKB is using page_pool fragment
5770 * references (page_pool_alloc_frag()). Since we only take full page
5771 * references for cloned SKBs at the moment that would result in
5772 * inconsistent reference counts.
5773 * In theory we could take full references if @from is cloned and
5774 * !@to->pp_recycle but its tricky (due to potential race with
5775 * the clone disappearing) and rare, so not worth dealing with.
5776 */
5777 if (to->pp_recycle != from->pp_recycle ||
5778 (from->pp_recycle && skb_cloned(from)))
5779 return false;
5780
5781 if (len <= skb_tailroom(to)) {
5782 if (len)
5783 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
5784 *delta_truesize = 0;
5785 return true;
5786 }
5787
5788 to_shinfo = skb_shinfo(to);
5789 from_shinfo = skb_shinfo(from);
5790 if (to_shinfo->frag_list || from_shinfo->frag_list)
5791 return false;
5792 if (skb_zcopy(to) || skb_zcopy(from))
5793 return false;
5794
5795 if (skb_headlen(from) != 0) {
5796 struct page *page;
5797 unsigned int offset;
5798
5799 if (to_shinfo->nr_frags +
5800 from_shinfo->nr_frags >= MAX_SKB_FRAGS)
5801 return false;
5802
5803 if (skb_head_is_locked(from))
5804 return false;
5805
5806 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
5807
5808 page = virt_to_head_page(from->head);
5809 offset = from->data - (unsigned char *)page_address(page);
5810
5811 skb_fill_page_desc(to, to_shinfo->nr_frags,
5812 page, offset, skb_headlen(from));
5813 *fragstolen = true;
5814 } else {
5815 if (to_shinfo->nr_frags +
5816 from_shinfo->nr_frags > MAX_SKB_FRAGS)
5817 return false;
5818
5819 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
5820 }
5821
5822 WARN_ON_ONCE(delta < len);
5823
5824 memcpy(to_shinfo->frags + to_shinfo->nr_frags,
5825 from_shinfo->frags,
5826 from_shinfo->nr_frags * sizeof(skb_frag_t));
5827 to_shinfo->nr_frags += from_shinfo->nr_frags;
5828
5829 if (!skb_cloned(from))
5830 from_shinfo->nr_frags = 0;
5831
5832 /* if the skb is not cloned this does nothing
5833 * since we set nr_frags to 0.
5834 */
5835 for (i = 0; i < from_shinfo->nr_frags; i++)
5836 __skb_frag_ref(&from_shinfo->frags[i]);
5837
5838 to->truesize += delta;
5839 to->len += len;
5840 to->data_len += len;
5841
5842 *delta_truesize = delta;
5843 return true;
5844}
5845EXPORT_SYMBOL(skb_try_coalesce);
5846
5847/**
5848 * skb_scrub_packet - scrub an skb
5849 *
5850 * @skb: buffer to clean
5851 * @xnet: packet is crossing netns
5852 *
5853 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
5854 * into/from a tunnel. Some information have to be cleared during these
5855 * operations.
5856 * skb_scrub_packet can also be used to clean a skb before injecting it in
5857 * another namespace (@xnet == true). We have to clear all information in the
5858 * skb that could impact namespace isolation.
5859 */
5860void skb_scrub_packet(struct sk_buff *skb, bool xnet)
5861{
5862 skb->pkt_type = PACKET_HOST;
5863 skb->skb_iif = 0;
5864 skb->ignore_df = 0;
5865 skb_dst_drop(skb);
5866 skb_ext_reset(skb);
5867 nf_reset_ct(skb);
5868 nf_reset_trace(skb);
5869
5870#ifdef CONFIG_NET_SWITCHDEV
5871 skb->offload_fwd_mark = 0;
5872 skb->offload_l3_fwd_mark = 0;
5873#endif
5874
5875 if (!xnet)
5876 return;
5877
5878 ipvs_reset(skb);
5879 skb->mark = 0;
5880 skb_clear_tstamp(skb);
5881}
5882EXPORT_SYMBOL_GPL(skb_scrub_packet);
5883
5884static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5885{
5886 int mac_len, meta_len;
5887 void *meta;
5888
5889 if (skb_cow(skb, skb_headroom(skb)) < 0) {
5890 kfree_skb(skb);
5891 return NULL;
5892 }
5893
5894 mac_len = skb->data - skb_mac_header(skb);
5895 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5896 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5897 mac_len - VLAN_HLEN - ETH_TLEN);
5898 }
5899
5900 meta_len = skb_metadata_len(skb);
5901 if (meta_len) {
5902 meta = skb_metadata_end(skb) - meta_len;
5903 memmove(meta + VLAN_HLEN, meta, meta_len);
5904 }
5905
5906 skb->mac_header += VLAN_HLEN;
5907 return skb;
5908}
5909
5910struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5911{
5912 struct vlan_hdr *vhdr;
5913 u16 vlan_tci;
5914
5915 if (unlikely(skb_vlan_tag_present(skb))) {
5916 /* vlan_tci is already set-up so leave this for another time */
5917 return skb;
5918 }
5919
5920 skb = skb_share_check(skb, GFP_ATOMIC);
5921 if (unlikely(!skb))
5922 goto err_free;
5923 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
5924 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
5925 goto err_free;
5926
5927 vhdr = (struct vlan_hdr *)skb->data;
5928 vlan_tci = ntohs(vhdr->h_vlan_TCI);
5929 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5930
5931 skb_pull_rcsum(skb, VLAN_HLEN);
5932 vlan_set_encap_proto(skb, vhdr);
5933
5934 skb = skb_reorder_vlan_header(skb);
5935 if (unlikely(!skb))
5936 goto err_free;
5937
5938 skb_reset_network_header(skb);
5939 if (!skb_transport_header_was_set(skb))
5940 skb_reset_transport_header(skb);
5941 skb_reset_mac_len(skb);
5942
5943 return skb;
5944
5945err_free:
5946 kfree_skb(skb);
5947 return NULL;
5948}
5949EXPORT_SYMBOL(skb_vlan_untag);
5950
5951int skb_ensure_writable(struct sk_buff *skb, unsigned int write_len)
5952{
5953 if (!pskb_may_pull(skb, write_len))
5954 return -ENOMEM;
5955
5956 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5957 return 0;
5958
5959 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5960}
5961EXPORT_SYMBOL(skb_ensure_writable);
5962
5963/* remove VLAN header from packet and update csum accordingly.
5964 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5965 */
5966int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
5967{
5968 int offset = skb->data - skb_mac_header(skb);
5969 int err;
5970
5971 if (WARN_ONCE(offset,
5972 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5973 offset)) {
5974 return -EINVAL;
5975 }
5976
5977 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5978 if (unlikely(err))
5979 return err;
5980
5981 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5982
5983 vlan_remove_tag(skb, vlan_tci);
5984
5985 skb->mac_header += VLAN_HLEN;
5986
5987 if (skb_network_offset(skb) < ETH_HLEN)
5988 skb_set_network_header(skb, ETH_HLEN);
5989
5990 skb_reset_mac_len(skb);
5991
5992 return err;
5993}
5994EXPORT_SYMBOL(__skb_vlan_pop);
5995
5996/* Pop a vlan tag either from hwaccel or from payload.
5997 * Expects skb->data at mac header.
5998 */
5999int skb_vlan_pop(struct sk_buff *skb)
6000{
6001 u16 vlan_tci;
6002 __be16 vlan_proto;
6003 int err;
6004
6005 if (likely(skb_vlan_tag_present(skb))) {
6006 __vlan_hwaccel_clear_tag(skb);
6007 } else {
6008 if (unlikely(!eth_type_vlan(skb->protocol)))
6009 return 0;
6010
6011 err = __skb_vlan_pop(skb, &vlan_tci);
6012 if (err)
6013 return err;
6014 }
6015 /* move next vlan tag to hw accel tag */
6016 if (likely(!eth_type_vlan(skb->protocol)))
6017 return 0;
6018
6019 vlan_proto = skb->protocol;
6020 err = __skb_vlan_pop(skb, &vlan_tci);
6021 if (unlikely(err))
6022 return err;
6023
6024 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
6025 return 0;
6026}
6027EXPORT_SYMBOL(skb_vlan_pop);
6028
6029/* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
6030 * Expects skb->data at mac header.
6031 */
6032int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
6033{
6034 if (skb_vlan_tag_present(skb)) {
6035 int offset = skb->data - skb_mac_header(skb);
6036 int err;
6037
6038 if (WARN_ONCE(offset,
6039 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
6040 offset)) {
6041 return -EINVAL;
6042 }
6043
6044 err = __vlan_insert_tag(skb, skb->vlan_proto,
6045 skb_vlan_tag_get(skb));
6046 if (err)
6047 return err;
6048
6049 skb->protocol = skb->vlan_proto;
6050 skb->mac_len += VLAN_HLEN;
6051
6052 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
6053 }
6054 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
6055 return 0;
6056}
6057EXPORT_SYMBOL(skb_vlan_push);
6058
6059/**
6060 * skb_eth_pop() - Drop the Ethernet header at the head of a packet
6061 *
6062 * @skb: Socket buffer to modify
6063 *
6064 * Drop the Ethernet header of @skb.
6065 *
6066 * Expects that skb->data points to the mac header and that no VLAN tags are
6067 * present.
6068 *
6069 * Returns 0 on success, -errno otherwise.
6070 */
6071int skb_eth_pop(struct sk_buff *skb)
6072{
6073 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) ||
6074 skb_network_offset(skb) < ETH_HLEN)
6075 return -EPROTO;
6076
6077 skb_pull_rcsum(skb, ETH_HLEN);
6078 skb_reset_mac_header(skb);
6079 skb_reset_mac_len(skb);
6080
6081 return 0;
6082}
6083EXPORT_SYMBOL(skb_eth_pop);
6084
6085/**
6086 * skb_eth_push() - Add a new Ethernet header at the head of a packet
6087 *
6088 * @skb: Socket buffer to modify
6089 * @dst: Destination MAC address of the new header
6090 * @src: Source MAC address of the new header
6091 *
6092 * Prepend @skb with a new Ethernet header.
6093 *
6094 * Expects that skb->data points to the mac header, which must be empty.
6095 *
6096 * Returns 0 on success, -errno otherwise.
6097 */
6098int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
6099 const unsigned char *src)
6100{
6101 struct ethhdr *eth;
6102 int err;
6103
6104 if (skb_network_offset(skb) || skb_vlan_tag_present(skb))
6105 return -EPROTO;
6106
6107 err = skb_cow_head(skb, sizeof(*eth));
6108 if (err < 0)
6109 return err;
6110
6111 skb_push(skb, sizeof(*eth));
6112 skb_reset_mac_header(skb);
6113 skb_reset_mac_len(skb);
6114
6115 eth = eth_hdr(skb);
6116 ether_addr_copy(eth->h_dest, dst);
6117 ether_addr_copy(eth->h_source, src);
6118 eth->h_proto = skb->protocol;
6119
6120 skb_postpush_rcsum(skb, eth, sizeof(*eth));
6121
6122 return 0;
6123}
6124EXPORT_SYMBOL(skb_eth_push);
6125
6126/* Update the ethertype of hdr and the skb csum value if required. */
6127static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
6128 __be16 ethertype)
6129{
6130 if (skb->ip_summed == CHECKSUM_COMPLETE) {
6131 __be16 diff[] = { ~hdr->h_proto, ethertype };
6132
6133 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
6134 }
6135
6136 hdr->h_proto = ethertype;
6137}
6138
6139/**
6140 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
6141 * the packet
6142 *
6143 * @skb: buffer
6144 * @mpls_lse: MPLS label stack entry to push
6145 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
6146 * @mac_len: length of the MAC header
6147 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is
6148 * ethernet
6149 *
6150 * Expects skb->data at mac header.
6151 *
6152 * Returns 0 on success, -errno otherwise.
6153 */
6154int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
6155 int mac_len, bool ethernet)
6156{
6157 struct mpls_shim_hdr *lse;
6158 int err;
6159
6160 if (unlikely(!eth_p_mpls(mpls_proto)))
6161 return -EINVAL;
6162
6163 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */
6164 if (skb->encapsulation)
6165 return -EINVAL;
6166
6167 err = skb_cow_head(skb, MPLS_HLEN);
6168 if (unlikely(err))
6169 return err;
6170
6171 if (!skb->inner_protocol) {
6172 skb_set_inner_network_header(skb, skb_network_offset(skb));
6173 skb_set_inner_protocol(skb, skb->protocol);
6174 }
6175
6176 skb_push(skb, MPLS_HLEN);
6177 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
6178 mac_len);
6179 skb_reset_mac_header(skb);
6180 skb_set_network_header(skb, mac_len);
6181 skb_reset_mac_len(skb);
6182
6183 lse = mpls_hdr(skb);
6184 lse->label_stack_entry = mpls_lse;
6185 skb_postpush_rcsum(skb, lse, MPLS_HLEN);
6186
6187 if (ethernet && mac_len >= ETH_HLEN)
6188 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
6189 skb->protocol = mpls_proto;
6190
6191 return 0;
6192}
6193EXPORT_SYMBOL_GPL(skb_mpls_push);
6194
6195/**
6196 * skb_mpls_pop() - pop the outermost MPLS header
6197 *
6198 * @skb: buffer
6199 * @next_proto: ethertype of header after popped MPLS header
6200 * @mac_len: length of the MAC header
6201 * @ethernet: flag to indicate if the packet is ethernet
6202 *
6203 * Expects skb->data at mac header.
6204 *
6205 * Returns 0 on success, -errno otherwise.
6206 */
6207int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
6208 bool ethernet)
6209{
6210 int err;
6211
6212 if (unlikely(!eth_p_mpls(skb->protocol)))
6213 return 0;
6214
6215 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
6216 if (unlikely(err))
6217 return err;
6218
6219 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
6220 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
6221 mac_len);
6222
6223 __skb_pull(skb, MPLS_HLEN);
6224 skb_reset_mac_header(skb);
6225 skb_set_network_header(skb, mac_len);
6226
6227 if (ethernet && mac_len >= ETH_HLEN) {
6228 struct ethhdr *hdr;
6229
6230 /* use mpls_hdr() to get ethertype to account for VLANs. */
6231 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
6232 skb_mod_eth_type(skb, hdr, next_proto);
6233 }
6234 skb->protocol = next_proto;
6235
6236 return 0;
6237}
6238EXPORT_SYMBOL_GPL(skb_mpls_pop);
6239
6240/**
6241 * skb_mpls_update_lse() - modify outermost MPLS header and update csum
6242 *
6243 * @skb: buffer
6244 * @mpls_lse: new MPLS label stack entry to update to
6245 *
6246 * Expects skb->data at mac header.
6247 *
6248 * Returns 0 on success, -errno otherwise.
6249 */
6250int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
6251{
6252 int err;
6253
6254 if (unlikely(!eth_p_mpls(skb->protocol)))
6255 return -EINVAL;
6256
6257 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
6258 if (unlikely(err))
6259 return err;
6260
6261 if (skb->ip_summed == CHECKSUM_COMPLETE) {
6262 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
6263
6264 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
6265 }
6266
6267 mpls_hdr(skb)->label_stack_entry = mpls_lse;
6268
6269 return 0;
6270}
6271EXPORT_SYMBOL_GPL(skb_mpls_update_lse);
6272
6273/**
6274 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
6275 *
6276 * @skb: buffer
6277 *
6278 * Expects skb->data at mac header.
6279 *
6280 * Returns 0 on success, -errno otherwise.
6281 */
6282int skb_mpls_dec_ttl(struct sk_buff *skb)
6283{
6284 u32 lse;
6285 u8 ttl;
6286
6287 if (unlikely(!eth_p_mpls(skb->protocol)))
6288 return -EINVAL;
6289
6290 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
6291 return -ENOMEM;
6292
6293 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
6294 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
6295 if (!--ttl)
6296 return -EINVAL;
6297
6298 lse &= ~MPLS_LS_TTL_MASK;
6299 lse |= ttl << MPLS_LS_TTL_SHIFT;
6300
6301 return skb_mpls_update_lse(skb, cpu_to_be32(lse));
6302}
6303EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
6304
6305/**
6306 * alloc_skb_with_frags - allocate skb with page frags
6307 *
6308 * @header_len: size of linear part
6309 * @data_len: needed length in frags
6310 * @order: max page order desired.
6311 * @errcode: pointer to error code if any
6312 * @gfp_mask: allocation mask
6313 *
6314 * This can be used to allocate a paged skb, given a maximal order for frags.
6315 */
6316struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
6317 unsigned long data_len,
6318 int order,
6319 int *errcode,
6320 gfp_t gfp_mask)
6321{
6322 unsigned long chunk;
6323 struct sk_buff *skb;
6324 struct page *page;
6325 int nr_frags = 0;
6326
6327 *errcode = -EMSGSIZE;
6328 if (unlikely(data_len > MAX_SKB_FRAGS * (PAGE_SIZE << order)))
6329 return NULL;
6330
6331 *errcode = -ENOBUFS;
6332 skb = alloc_skb(header_len, gfp_mask);
6333 if (!skb)
6334 return NULL;
6335
6336 while (data_len) {
6337 if (nr_frags == MAX_SKB_FRAGS - 1)
6338 goto failure;
6339 while (order && PAGE_ALIGN(data_len) < (PAGE_SIZE << order))
6340 order--;
6341
6342 if (order) {
6343 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
6344 __GFP_COMP |
6345 __GFP_NOWARN,
6346 order);
6347 if (!page) {
6348 order--;
6349 continue;
6350 }
6351 } else {
6352 page = alloc_page(gfp_mask);
6353 if (!page)
6354 goto failure;
6355 }
6356 chunk = min_t(unsigned long, data_len,
6357 PAGE_SIZE << order);
6358 skb_fill_page_desc(skb, nr_frags, page, 0, chunk);
6359 nr_frags++;
6360 skb->truesize += (PAGE_SIZE << order);
6361 data_len -= chunk;
6362 }
6363 return skb;
6364
6365failure:
6366 kfree_skb(skb);
6367 return NULL;
6368}
6369EXPORT_SYMBOL(alloc_skb_with_frags);
6370
6371/* carve out the first off bytes from skb when off < headlen */
6372static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
6373 const int headlen, gfp_t gfp_mask)
6374{
6375 int i;
6376 unsigned int size = skb_end_offset(skb);
6377 int new_hlen = headlen - off;
6378 u8 *data;
6379
6380 if (skb_pfmemalloc(skb))
6381 gfp_mask |= __GFP_MEMALLOC;
6382
6383 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
6384 if (!data)
6385 return -ENOMEM;
6386 size = SKB_WITH_OVERHEAD(size);
6387
6388 /* Copy real data, and all frags */
6389 skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
6390 skb->len -= off;
6391
6392 memcpy((struct skb_shared_info *)(data + size),
6393 skb_shinfo(skb),
6394 offsetof(struct skb_shared_info,
6395 frags[skb_shinfo(skb)->nr_frags]));
6396 if (skb_cloned(skb)) {
6397 /* drop the old head gracefully */
6398 if (skb_orphan_frags(skb, gfp_mask)) {
6399 skb_kfree_head(data, size);
6400 return -ENOMEM;
6401 }
6402 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
6403 skb_frag_ref(skb, i);
6404 if (skb_has_frag_list(skb))
6405 skb_clone_fraglist(skb);
6406 skb_release_data(skb, SKB_CONSUMED, false);
6407 } else {
6408 /* we can reuse existing recount- all we did was
6409 * relocate values
6410 */
6411 skb_free_head(skb, false);
6412 }
6413
6414 skb->head = data;
6415 skb->data = data;
6416 skb->head_frag = 0;
6417 skb_set_end_offset(skb, size);
6418 skb_set_tail_pointer(skb, skb_headlen(skb));
6419 skb_headers_offset_update(skb, 0);
6420 skb->cloned = 0;
6421 skb->hdr_len = 0;
6422 skb->nohdr = 0;
6423 atomic_set(&skb_shinfo(skb)->dataref, 1);
6424
6425 return 0;
6426}
6427
6428static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6429
6430/* carve out the first eat bytes from skb's frag_list. May recurse into
6431 * pskb_carve()
6432 */
6433static int pskb_carve_frag_list(struct sk_buff *skb,
6434 struct skb_shared_info *shinfo, int eat,
6435 gfp_t gfp_mask)
6436{
6437 struct sk_buff *list = shinfo->frag_list;
6438 struct sk_buff *clone = NULL;
6439 struct sk_buff *insp = NULL;
6440
6441 do {
6442 if (!list) {
6443 pr_err("Not enough bytes to eat. Want %d\n", eat);
6444 return -EFAULT;
6445 }
6446 if (list->len <= eat) {
6447 /* Eaten as whole. */
6448 eat -= list->len;
6449 list = list->next;
6450 insp = list;
6451 } else {
6452 /* Eaten partially. */
6453 if (skb_shared(list)) {
6454 clone = skb_clone(list, gfp_mask);
6455 if (!clone)
6456 return -ENOMEM;
6457 insp = list->next;
6458 list = clone;
6459 } else {
6460 /* This may be pulled without problems. */
6461 insp = list;
6462 }
6463 if (pskb_carve(list, eat, gfp_mask) < 0) {
6464 kfree_skb(clone);
6465 return -ENOMEM;
6466 }
6467 break;
6468 }
6469 } while (eat);
6470
6471 /* Free pulled out fragments. */
6472 while ((list = shinfo->frag_list) != insp) {
6473 shinfo->frag_list = list->next;
6474 consume_skb(list);
6475 }
6476 /* And insert new clone at head. */
6477 if (clone) {
6478 clone->next = list;
6479 shinfo->frag_list = clone;
6480 }
6481 return 0;
6482}
6483
6484/* carve off first len bytes from skb. Split line (off) is in the
6485 * non-linear part of skb
6486 */
6487static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
6488 int pos, gfp_t gfp_mask)
6489{
6490 int i, k = 0;
6491 unsigned int size = skb_end_offset(skb);
6492 u8 *data;
6493 const int nfrags = skb_shinfo(skb)->nr_frags;
6494 struct skb_shared_info *shinfo;
6495
6496 if (skb_pfmemalloc(skb))
6497 gfp_mask |= __GFP_MEMALLOC;
6498
6499 data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
6500 if (!data)
6501 return -ENOMEM;
6502 size = SKB_WITH_OVERHEAD(size);
6503
6504 memcpy((struct skb_shared_info *)(data + size),
6505 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
6506 if (skb_orphan_frags(skb, gfp_mask)) {
6507 skb_kfree_head(data, size);
6508 return -ENOMEM;
6509 }
6510 shinfo = (struct skb_shared_info *)(data + size);
6511 for (i = 0; i < nfrags; i++) {
6512 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
6513
6514 if (pos + fsize > off) {
6515 shinfo->frags[k] = skb_shinfo(skb)->frags[i];
6516
6517 if (pos < off) {
6518 /* Split frag.
6519 * We have two variants in this case:
6520 * 1. Move all the frag to the second
6521 * part, if it is possible. F.e.
6522 * this approach is mandatory for TUX,
6523 * where splitting is expensive.
6524 * 2. Split is accurately. We make this.
6525 */
6526 skb_frag_off_add(&shinfo->frags[0], off - pos);
6527 skb_frag_size_sub(&shinfo->frags[0], off - pos);
6528 }
6529 skb_frag_ref(skb, i);
6530 k++;
6531 }
6532 pos += fsize;
6533 }
6534 shinfo->nr_frags = k;
6535 if (skb_has_frag_list(skb))
6536 skb_clone_fraglist(skb);
6537
6538 /* split line is in frag list */
6539 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) {
6540 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
6541 if (skb_has_frag_list(skb))
6542 kfree_skb_list(skb_shinfo(skb)->frag_list);
6543 skb_kfree_head(data, size);
6544 return -ENOMEM;
6545 }
6546 skb_release_data(skb, SKB_CONSUMED, false);
6547
6548 skb->head = data;
6549 skb->head_frag = 0;
6550 skb->data = data;
6551 skb_set_end_offset(skb, size);
6552 skb_reset_tail_pointer(skb);
6553 skb_headers_offset_update(skb, 0);
6554 skb->cloned = 0;
6555 skb->hdr_len = 0;
6556 skb->nohdr = 0;
6557 skb->len -= off;
6558 skb->data_len = skb->len;
6559 atomic_set(&skb_shinfo(skb)->dataref, 1);
6560 return 0;
6561}
6562
6563/* remove len bytes from the beginning of the skb */
6564static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
6565{
6566 int headlen = skb_headlen(skb);
6567
6568 if (len < headlen)
6569 return pskb_carve_inside_header(skb, len, headlen, gfp);
6570 else
6571 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
6572}
6573
6574/* Extract to_copy bytes starting at off from skb, and return this in
6575 * a new skb
6576 */
6577struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
6578 int to_copy, gfp_t gfp)
6579{
6580 struct sk_buff *clone = skb_clone(skb, gfp);
6581
6582 if (!clone)
6583 return NULL;
6584
6585 if (pskb_carve(clone, off, gfp) < 0 ||
6586 pskb_trim(clone, to_copy)) {
6587 kfree_skb(clone);
6588 return NULL;
6589 }
6590 return clone;
6591}
6592EXPORT_SYMBOL(pskb_extract);
6593
6594/**
6595 * skb_condense - try to get rid of fragments/frag_list if possible
6596 * @skb: buffer
6597 *
6598 * Can be used to save memory before skb is added to a busy queue.
6599 * If packet has bytes in frags and enough tail room in skb->head,
6600 * pull all of them, so that we can free the frags right now and adjust
6601 * truesize.
6602 * Notes:
6603 * We do not reallocate skb->head thus can not fail.
6604 * Caller must re-evaluate skb->truesize if needed.
6605 */
6606void skb_condense(struct sk_buff *skb)
6607{
6608 if (skb->data_len) {
6609 if (skb->data_len > skb->end - skb->tail ||
6610 skb_cloned(skb))
6611 return;
6612
6613 /* Nice, we can free page frag(s) right now */
6614 __pskb_pull_tail(skb, skb->data_len);
6615 }
6616 /* At this point, skb->truesize might be over estimated,
6617 * because skb had a fragment, and fragments do not tell
6618 * their truesize.
6619 * When we pulled its content into skb->head, fragment
6620 * was freed, but __pskb_pull_tail() could not possibly
6621 * adjust skb->truesize, not knowing the frag truesize.
6622 */
6623 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6624}
6625EXPORT_SYMBOL(skb_condense);
6626
6627#ifdef CONFIG_SKB_EXTENSIONS
6628static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
6629{
6630 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
6631}
6632
6633/**
6634 * __skb_ext_alloc - allocate a new skb extensions storage
6635 *
6636 * @flags: See kmalloc().
6637 *
6638 * Returns the newly allocated pointer. The pointer can later attached to a
6639 * skb via __skb_ext_set().
6640 * Note: caller must handle the skb_ext as an opaque data.
6641 */
6642struct skb_ext *__skb_ext_alloc(gfp_t flags)
6643{
6644 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
6645
6646 if (new) {
6647 memset(new->offset, 0, sizeof(new->offset));
6648 refcount_set(&new->refcnt, 1);
6649 }
6650
6651 return new;
6652}
6653
6654static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
6655 unsigned int old_active)
6656{
6657 struct skb_ext *new;
6658
6659 if (refcount_read(&old->refcnt) == 1)
6660 return old;
6661
6662 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
6663 if (!new)
6664 return NULL;
6665
6666 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
6667 refcount_set(&new->refcnt, 1);
6668
6669#ifdef CONFIG_XFRM
6670 if (old_active & (1 << SKB_EXT_SEC_PATH)) {
6671 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
6672 unsigned int i;
6673
6674 for (i = 0; i < sp->len; i++)
6675 xfrm_state_hold(sp->xvec[i]);
6676 }
6677#endif
6678 __skb_ext_put(old);
6679 return new;
6680}
6681
6682/**
6683 * __skb_ext_set - attach the specified extension storage to this skb
6684 * @skb: buffer
6685 * @id: extension id
6686 * @ext: extension storage previously allocated via __skb_ext_alloc()
6687 *
6688 * Existing extensions, if any, are cleared.
6689 *
6690 * Returns the pointer to the extension.
6691 */
6692void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
6693 struct skb_ext *ext)
6694{
6695 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext);
6696
6697 skb_ext_put(skb);
6698 newlen = newoff + skb_ext_type_len[id];
6699 ext->chunks = newlen;
6700 ext->offset[id] = newoff;
6701 skb->extensions = ext;
6702 skb->active_extensions = 1 << id;
6703 return skb_ext_get_ptr(ext, id);
6704}
6705
6706/**
6707 * skb_ext_add - allocate space for given extension, COW if needed
6708 * @skb: buffer
6709 * @id: extension to allocate space for
6710 *
6711 * Allocates enough space for the given extension.
6712 * If the extension is already present, a pointer to that extension
6713 * is returned.
6714 *
6715 * If the skb was cloned, COW applies and the returned memory can be
6716 * modified without changing the extension space of clones buffers.
6717 *
6718 * Returns pointer to the extension or NULL on allocation failure.
6719 */
6720void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
6721{
6722 struct skb_ext *new, *old = NULL;
6723 unsigned int newlen, newoff;
6724
6725 if (skb->active_extensions) {
6726 old = skb->extensions;
6727
6728 new = skb_ext_maybe_cow(old, skb->active_extensions);
6729 if (!new)
6730 return NULL;
6731
6732 if (__skb_ext_exist(new, id))
6733 goto set_active;
6734
6735 newoff = new->chunks;
6736 } else {
6737 newoff = SKB_EXT_CHUNKSIZEOF(*new);
6738
6739 new = __skb_ext_alloc(GFP_ATOMIC);
6740 if (!new)
6741 return NULL;
6742 }
6743
6744 newlen = newoff + skb_ext_type_len[id];
6745 new->chunks = newlen;
6746 new->offset[id] = newoff;
6747set_active:
6748 skb->slow_gro = 1;
6749 skb->extensions = new;
6750 skb->active_extensions |= 1 << id;
6751 return skb_ext_get_ptr(new, id);
6752}
6753EXPORT_SYMBOL(skb_ext_add);
6754
6755#ifdef CONFIG_XFRM
6756static void skb_ext_put_sp(struct sec_path *sp)
6757{
6758 unsigned int i;
6759
6760 for (i = 0; i < sp->len; i++)
6761 xfrm_state_put(sp->xvec[i]);
6762}
6763#endif
6764
6765#ifdef CONFIG_MCTP_FLOWS
6766static void skb_ext_put_mctp(struct mctp_flow *flow)
6767{
6768 if (flow->key)
6769 mctp_key_unref(flow->key);
6770}
6771#endif
6772
6773void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
6774{
6775 struct skb_ext *ext = skb->extensions;
6776
6777 skb->active_extensions &= ~(1 << id);
6778 if (skb->active_extensions == 0) {
6779 skb->extensions = NULL;
6780 __skb_ext_put(ext);
6781#ifdef CONFIG_XFRM
6782 } else if (id == SKB_EXT_SEC_PATH &&
6783 refcount_read(&ext->refcnt) == 1) {
6784 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
6785
6786 skb_ext_put_sp(sp);
6787 sp->len = 0;
6788#endif
6789 }
6790}
6791EXPORT_SYMBOL(__skb_ext_del);
6792
6793void __skb_ext_put(struct skb_ext *ext)
6794{
6795 /* If this is last clone, nothing can increment
6796 * it after check passes. Avoids one atomic op.
6797 */
6798 if (refcount_read(&ext->refcnt) == 1)
6799 goto free_now;
6800
6801 if (!refcount_dec_and_test(&ext->refcnt))
6802 return;
6803free_now:
6804#ifdef CONFIG_XFRM
6805 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
6806 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
6807#endif
6808#ifdef CONFIG_MCTP_FLOWS
6809 if (__skb_ext_exist(ext, SKB_EXT_MCTP))
6810 skb_ext_put_mctp(skb_ext_get_ptr(ext, SKB_EXT_MCTP));
6811#endif
6812
6813 kmem_cache_free(skbuff_ext_cache, ext);
6814}
6815EXPORT_SYMBOL(__skb_ext_put);
6816#endif /* CONFIG_SKB_EXTENSIONS */
6817
6818/**
6819 * skb_attempt_defer_free - queue skb for remote freeing
6820 * @skb: buffer
6821 *
6822 * Put @skb in a per-cpu list, using the cpu which
6823 * allocated the skb/pages to reduce false sharing
6824 * and memory zone spinlock contention.
6825 */
6826void skb_attempt_defer_free(struct sk_buff *skb)
6827{
6828 int cpu = skb->alloc_cpu;
6829 struct softnet_data *sd;
6830 unsigned int defer_max;
6831 bool kick;
6832
6833 if (WARN_ON_ONCE(cpu >= nr_cpu_ids) ||
6834 !cpu_online(cpu) ||
6835 cpu == raw_smp_processor_id()) {
6836nodefer: __kfree_skb(skb);
6837 return;
6838 }
6839
6840 DEBUG_NET_WARN_ON_ONCE(skb_dst(skb));
6841 DEBUG_NET_WARN_ON_ONCE(skb->destructor);
6842
6843 sd = &per_cpu(softnet_data, cpu);
6844 defer_max = READ_ONCE(sysctl_skb_defer_max);
6845 if (READ_ONCE(sd->defer_count) >= defer_max)
6846 goto nodefer;
6847
6848 spin_lock_bh(&sd->defer_lock);
6849 /* Send an IPI every time queue reaches half capacity. */
6850 kick = sd->defer_count == (defer_max >> 1);
6851 /* Paired with the READ_ONCE() few lines above */
6852 WRITE_ONCE(sd->defer_count, sd->defer_count + 1);
6853
6854 skb->next = sd->defer_list;
6855 /* Paired with READ_ONCE() in skb_defer_free_flush() */
6856 WRITE_ONCE(sd->defer_list, skb);
6857 spin_unlock_bh(&sd->defer_lock);
6858
6859 /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
6860 * if we are unlucky enough (this seems very unlikely).
6861 */
6862 if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1))
6863 smp_call_function_single_async(cpu, &sd->defer_csd);
6864}
6865
6866static void skb_splice_csum_page(struct sk_buff *skb, struct page *page,
6867 size_t offset, size_t len)
6868{
6869 const char *kaddr;
6870 __wsum csum;
6871
6872 kaddr = kmap_local_page(page);
6873 csum = csum_partial(kaddr + offset, len, 0);
6874 kunmap_local(kaddr);
6875 skb->csum = csum_block_add(skb->csum, csum, skb->len);
6876}
6877
6878/**
6879 * skb_splice_from_iter - Splice (or copy) pages to skbuff
6880 * @skb: The buffer to add pages to
6881 * @iter: Iterator representing the pages to be added
6882 * @maxsize: Maximum amount of pages to be added
6883 * @gfp: Allocation flags
6884 *
6885 * This is a common helper function for supporting MSG_SPLICE_PAGES. It
6886 * extracts pages from an iterator and adds them to the socket buffer if
6887 * possible, copying them to fragments if not possible (such as if they're slab
6888 * pages).
6889 *
6890 * Returns the amount of data spliced/copied or -EMSGSIZE if there's
6891 * insufficient space in the buffer to transfer anything.
6892 */
6893ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
6894 ssize_t maxsize, gfp_t gfp)
6895{
6896 size_t frag_limit = READ_ONCE(sysctl_max_skb_frags);
6897 struct page *pages[8], **ppages = pages;
6898 ssize_t spliced = 0, ret = 0;
6899 unsigned int i;
6900
6901 while (iter->count > 0) {
6902 ssize_t space, nr, len;
6903 size_t off;
6904
6905 ret = -EMSGSIZE;
6906 space = frag_limit - skb_shinfo(skb)->nr_frags;
6907 if (space < 0)
6908 break;
6909
6910 /* We might be able to coalesce without increasing nr_frags */
6911 nr = clamp_t(size_t, space, 1, ARRAY_SIZE(pages));
6912
6913 len = iov_iter_extract_pages(iter, &ppages, maxsize, nr, 0, &off);
6914 if (len <= 0) {
6915 ret = len ?: -EIO;
6916 break;
6917 }
6918
6919 i = 0;
6920 do {
6921 struct page *page = pages[i++];
6922 size_t part = min_t(size_t, PAGE_SIZE - off, len);
6923
6924 ret = -EIO;
6925 if (WARN_ON_ONCE(!sendpage_ok(page)))
6926 goto out;
6927
6928 ret = skb_append_pagefrags(skb, page, off, part,
6929 frag_limit);
6930 if (ret < 0) {
6931 iov_iter_revert(iter, len);
6932 goto out;
6933 }
6934
6935 if (skb->ip_summed == CHECKSUM_NONE)
6936 skb_splice_csum_page(skb, page, off, part);
6937
6938 off = 0;
6939 spliced += part;
6940 maxsize -= part;
6941 len -= part;
6942 } while (len > 0);
6943
6944 if (maxsize <= 0)
6945 break;
6946 }
6947
6948out:
6949 skb_len_add(skb, spliced);
6950 return spliced ?: ret;
6951}
6952EXPORT_SYMBOL(skb_splice_from_iter);
6953
6954static __always_inline
6955size_t memcpy_from_iter_csum(void *iter_from, size_t progress,
6956 size_t len, void *to, void *priv2)
6957{
6958 __wsum *csum = priv2;
6959 __wsum next = csum_partial_copy_nocheck(iter_from, to + progress, len);
6960
6961 *csum = csum_block_add(*csum, next, progress);
6962 return 0;
6963}
6964
6965static __always_inline
6966size_t copy_from_user_iter_csum(void __user *iter_from, size_t progress,
6967 size_t len, void *to, void *priv2)
6968{
6969 __wsum next, *csum = priv2;
6970
6971 next = csum_and_copy_from_user(iter_from, to + progress, len);
6972 *csum = csum_block_add(*csum, next, progress);
6973 return next ? 0 : len;
6974}
6975
6976bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
6977 __wsum *csum, struct iov_iter *i)
6978{
6979 size_t copied;
6980
6981 if (WARN_ON_ONCE(!i->data_source))
6982 return false;
6983 copied = iterate_and_advance2(i, bytes, addr, csum,
6984 copy_from_user_iter_csum,
6985 memcpy_from_iter_csum);
6986 if (likely(copied == bytes))
6987 return true;
6988 iov_iter_revert(i, copied);
6989 return false;
6990}
6991EXPORT_SYMBOL(csum_and_copy_from_iter_full);