1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Routines having to do with the 'struct sk_buff' memory handlers.
5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
6 * Florian La Roche <rzsfl@rz.uni-sb.de>
9 * Alan Cox : Fixed the worst of the load
11 * Dave Platt : Interrupt stacking fix.
12 * Richard Kooijman : Timestamp fixes.
13 * Alan Cox : Changed buffer format.
14 * Alan Cox : destructor hook for AF_UNIX etc.
15 * Linus Torvalds : Better skb_clone.
16 * Alan Cox : Added skb_copy.
17 * Alan Cox : Added all the changed routines Linus
18 * only put in the headers
19 * Ray VanTassle : Fixed --skb->lock in free
20 * Alan Cox : skb_copy copy arp field
21 * Andi Kleen : slabified it.
22 * Robert Olsson : Removed skb_head_pool
25 * The __skb_ routines should be called with interrupts
26 * disabled, or you better be *real* sure that the operation is atomic
27 * with respect to whatever list is being frobbed (e.g. via lock_sock()
28 * or via disabling bottom half handlers, etc).
32 * The functions in this file will not compile correctly with gcc 2.4.x
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
41 #include <linux/interrupt.h>
43 #include <linux/inet.h>
44 #include <linux/slab.h>
45 #include <linux/tcp.h>
46 #include <linux/udp.h>
47 #include <linux/sctp.h>
48 #include <linux/netdevice.h>
49 #ifdef CONFIG_NET_CLS_ACT
50 #include <net/pkt_sched.h>
52 #include <linux/string.h>
53 #include <linux/skbuff.h>
54 #include <linux/splice.h>
55 #include <linux/cache.h>
56 #include <linux/rtnetlink.h>
57 #include <linux/init.h>
58 #include <linux/scatterlist.h>
59 #include <linux/errqueue.h>
60 #include <linux/prefetch.h>
61 #include <linux/if_vlan.h>
62 #include <linux/mpls.h>
64 #include <net/protocol.h>
67 #include <net/checksum.h>
68 #include <net/ip6_checksum.h>
72 #include <linux/uaccess.h>
73 #include <trace/events/skb.h>
74 #include <linux/highmem.h>
75 #include <linux/capability.h>
76 #include <linux/user_namespace.h>
77 #include <linux/indirect_call_wrapper.h>
81 struct kmem_cache
*skbuff_head_cache __ro_after_init
;
82 static struct kmem_cache
*skbuff_fclone_cache __ro_after_init
;
83 #ifdef CONFIG_SKB_EXTENSIONS
84 static struct kmem_cache
*skbuff_ext_cache __ro_after_init
;
86 int sysctl_max_skb_frags __read_mostly
= MAX_SKB_FRAGS
;
87 EXPORT_SYMBOL(sysctl_max_skb_frags
);
90 * skb_panic - private function for out-of-line support
94 * @msg: skb_over_panic or skb_under_panic
96 * Out-of-line support for skb_put() and skb_push().
97 * Called via the wrapper skb_over_panic() or skb_under_panic().
98 * Keep out of line to prevent kernel bloat.
99 * __builtin_return_address is not used because it is not always reliable.
101 static void skb_panic(struct sk_buff
*skb
, unsigned int sz
, void *addr
,
104 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
105 msg
, addr
, skb
->len
, sz
, skb
->head
, skb
->data
,
106 (unsigned long)skb
->tail
, (unsigned long)skb
->end
,
107 skb
->dev
? skb
->dev
->name
: "<NULL>");
111 static void skb_over_panic(struct sk_buff
*skb
, unsigned int sz
, void *addr
)
113 skb_panic(skb
, sz
, addr
, __func__
);
116 static void skb_under_panic(struct sk_buff
*skb
, unsigned int sz
, void *addr
)
118 skb_panic(skb
, sz
, addr
, __func__
);
122 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
123 * the caller if emergency pfmemalloc reserves are being used. If it is and
124 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
125 * may be used. Otherwise, the packet data may be discarded until enough
128 #define kmalloc_reserve(size, gfp, node, pfmemalloc) \
129 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
131 static void *__kmalloc_reserve(size_t size
, gfp_t flags
, int node
,
132 unsigned long ip
, bool *pfmemalloc
)
135 bool ret_pfmemalloc
= false;
138 * Try a regular allocation, when that fails and we're not entitled
139 * to the reserves, fail.
141 obj
= kmalloc_node_track_caller(size
,
142 flags
| __GFP_NOMEMALLOC
| __GFP_NOWARN
,
144 if (obj
|| !(gfp_pfmemalloc_allowed(flags
)))
147 /* Try again but now we are using pfmemalloc reserves */
148 ret_pfmemalloc
= true;
149 obj
= kmalloc_node_track_caller(size
, flags
, node
);
153 *pfmemalloc
= ret_pfmemalloc
;
158 /* Allocate a new skbuff. We do this ourselves so we can fill in a few
159 * 'private' fields and also do memory statistics to find all the
165 * __alloc_skb - allocate a network buffer
166 * @size: size to allocate
167 * @gfp_mask: allocation mask
168 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
169 * instead of head cache and allocate a cloned (child) skb.
170 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
171 * allocations in case the data is required for writeback
172 * @node: numa node to allocate memory on
174 * Allocate a new &sk_buff. The returned buffer has no headroom and a
175 * tail room of at least size bytes. The object has a reference count
176 * of one. The return is the buffer. On a failure the return is %NULL.
178 * Buffers may only be allocated from interrupts using a @gfp_mask of
181 struct sk_buff
*__alloc_skb(unsigned int size
, gfp_t gfp_mask
,
184 struct kmem_cache
*cache
;
185 struct skb_shared_info
*shinfo
;
190 cache
= (flags
& SKB_ALLOC_FCLONE
)
191 ? skbuff_fclone_cache
: skbuff_head_cache
;
193 if (sk_memalloc_socks() && (flags
& SKB_ALLOC_RX
))
194 gfp_mask
|= __GFP_MEMALLOC
;
197 skb
= kmem_cache_alloc_node(cache
, gfp_mask
& ~__GFP_DMA
, node
);
202 /* We do our best to align skb_shared_info on a separate cache
203 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
204 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
205 * Both skb->head and skb_shared_info are cache line aligned.
207 size
= SKB_DATA_ALIGN(size
);
208 size
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
209 data
= kmalloc_reserve(size
, gfp_mask
, node
, &pfmemalloc
);
212 /* kmalloc(size) might give us more room than requested.
213 * Put skb_shared_info exactly at the end of allocated zone,
214 * to allow max possible filling before reallocation.
216 size
= SKB_WITH_OVERHEAD(ksize(data
));
217 prefetchw(data
+ size
);
220 * Only clear those fields we need to clear, not those that we will
221 * actually initialise below. Hence, don't put any more fields after
222 * the tail pointer in struct sk_buff!
224 memset(skb
, 0, offsetof(struct sk_buff
, tail
));
225 /* Account for allocated memory : skb + skb->head */
226 skb
->truesize
= SKB_TRUESIZE(size
);
227 skb
->pfmemalloc
= pfmemalloc
;
228 refcount_set(&skb
->users
, 1);
231 skb_reset_tail_pointer(skb
);
232 skb
->end
= skb
->tail
+ size
;
233 skb
->mac_header
= (typeof(skb
->mac_header
))~0U;
234 skb
->transport_header
= (typeof(skb
->transport_header
))~0U;
236 /* make sure we initialize shinfo sequentially */
237 shinfo
= skb_shinfo(skb
);
238 memset(shinfo
, 0, offsetof(struct skb_shared_info
, dataref
));
239 atomic_set(&shinfo
->dataref
, 1);
241 if (flags
& SKB_ALLOC_FCLONE
) {
242 struct sk_buff_fclones
*fclones
;
244 fclones
= container_of(skb
, struct sk_buff_fclones
, skb1
);
246 skb
->fclone
= SKB_FCLONE_ORIG
;
247 refcount_set(&fclones
->fclone_ref
, 1);
249 fclones
->skb2
.fclone
= SKB_FCLONE_CLONE
;
254 kmem_cache_free(cache
, skb
);
258 EXPORT_SYMBOL(__alloc_skb
);
260 /* Caller must provide SKB that is memset cleared */
261 static struct sk_buff
*__build_skb_around(struct sk_buff
*skb
,
262 void *data
, unsigned int frag_size
)
264 struct skb_shared_info
*shinfo
;
265 unsigned int size
= frag_size
? : ksize(data
);
267 size
-= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
269 /* Assumes caller memset cleared SKB */
270 skb
->truesize
= SKB_TRUESIZE(size
);
271 refcount_set(&skb
->users
, 1);
274 skb_reset_tail_pointer(skb
);
275 skb
->end
= skb
->tail
+ size
;
276 skb
->mac_header
= (typeof(skb
->mac_header
))~0U;
277 skb
->transport_header
= (typeof(skb
->transport_header
))~0U;
279 /* make sure we initialize shinfo sequentially */
280 shinfo
= skb_shinfo(skb
);
281 memset(shinfo
, 0, offsetof(struct skb_shared_info
, dataref
));
282 atomic_set(&shinfo
->dataref
, 1);
288 * __build_skb - build a network buffer
289 * @data: data buffer provided by caller
290 * @frag_size: size of data, or 0 if head was kmalloced
292 * Allocate a new &sk_buff. Caller provides space holding head and
293 * skb_shared_info. @data must have been allocated by kmalloc() only if
294 * @frag_size is 0, otherwise data should come from the page allocator
296 * The return is the new skb buffer.
297 * On a failure the return is %NULL, and @data is not freed.
299 * Before IO, driver allocates only data buffer where NIC put incoming frame
300 * Driver should add room at head (NET_SKB_PAD) and
301 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
302 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
303 * before giving packet to stack.
304 * RX rings only contains data buffers, not full skbs.
306 struct sk_buff
*__build_skb(void *data
, unsigned int frag_size
)
310 skb
= kmem_cache_alloc(skbuff_head_cache
, GFP_ATOMIC
);
314 memset(skb
, 0, offsetof(struct sk_buff
, tail
));
316 return __build_skb_around(skb
, data
, frag_size
);
319 /* build_skb() is wrapper over __build_skb(), that specifically
320 * takes care of skb->head and skb->pfmemalloc
321 * This means that if @frag_size is not zero, then @data must be backed
322 * by a page fragment, not kmalloc() or vmalloc()
324 struct sk_buff
*build_skb(void *data
, unsigned int frag_size
)
326 struct sk_buff
*skb
= __build_skb(data
, frag_size
);
328 if (skb
&& frag_size
) {
330 if (page_is_pfmemalloc(virt_to_head_page(data
)))
335 EXPORT_SYMBOL(build_skb
);
338 * build_skb_around - build a network buffer around provided skb
339 * @skb: sk_buff provide by caller, must be memset cleared
340 * @data: data buffer provided by caller
341 * @frag_size: size of data, or 0 if head was kmalloced
343 struct sk_buff
*build_skb_around(struct sk_buff
*skb
,
344 void *data
, unsigned int frag_size
)
349 skb
= __build_skb_around(skb
, data
, frag_size
);
351 if (skb
&& frag_size
) {
353 if (page_is_pfmemalloc(virt_to_head_page(data
)))
358 EXPORT_SYMBOL(build_skb_around
);
360 #define NAPI_SKB_CACHE_SIZE 64
362 struct napi_alloc_cache
{
363 struct page_frag_cache page
;
364 unsigned int skb_count
;
365 void *skb_cache
[NAPI_SKB_CACHE_SIZE
];
368 static DEFINE_PER_CPU(struct page_frag_cache
, netdev_alloc_cache
);
369 static DEFINE_PER_CPU(struct napi_alloc_cache
, napi_alloc_cache
);
371 static void *__napi_alloc_frag(unsigned int fragsz
, gfp_t gfp_mask
)
373 struct napi_alloc_cache
*nc
= this_cpu_ptr(&napi_alloc_cache
);
375 return page_frag_alloc(&nc
->page
, fragsz
, gfp_mask
);
378 void *napi_alloc_frag(unsigned int fragsz
)
380 fragsz
= SKB_DATA_ALIGN(fragsz
);
382 return __napi_alloc_frag(fragsz
, GFP_ATOMIC
);
384 EXPORT_SYMBOL(napi_alloc_frag
);
387 * netdev_alloc_frag - allocate a page fragment
388 * @fragsz: fragment size
390 * Allocates a frag from a page for receive buffer.
391 * Uses GFP_ATOMIC allocations.
393 void *netdev_alloc_frag(unsigned int fragsz
)
395 struct page_frag_cache
*nc
;
398 fragsz
= SKB_DATA_ALIGN(fragsz
);
399 if (in_irq() || irqs_disabled()) {
400 nc
= this_cpu_ptr(&netdev_alloc_cache
);
401 data
= page_frag_alloc(nc
, fragsz
, GFP_ATOMIC
);
404 data
= __napi_alloc_frag(fragsz
, GFP_ATOMIC
);
409 EXPORT_SYMBOL(netdev_alloc_frag
);
412 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
413 * @dev: network device to receive on
414 * @len: length to allocate
415 * @gfp_mask: get_free_pages mask, passed to alloc_skb
417 * Allocate a new &sk_buff and assign it a usage count of one. The
418 * buffer has NET_SKB_PAD headroom built in. Users should allocate
419 * the headroom they think they need without accounting for the
420 * built in space. The built in space is used for optimisations.
422 * %NULL is returned if there is no free memory.
424 struct sk_buff
*__netdev_alloc_skb(struct net_device
*dev
, unsigned int len
,
427 struct page_frag_cache
*nc
;
434 if ((len
> SKB_WITH_OVERHEAD(PAGE_SIZE
)) ||
435 (gfp_mask
& (__GFP_DIRECT_RECLAIM
| GFP_DMA
))) {
436 skb
= __alloc_skb(len
, gfp_mask
, SKB_ALLOC_RX
, NUMA_NO_NODE
);
442 len
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
443 len
= SKB_DATA_ALIGN(len
);
445 if (sk_memalloc_socks())
446 gfp_mask
|= __GFP_MEMALLOC
;
448 if (in_irq() || irqs_disabled()) {
449 nc
= this_cpu_ptr(&netdev_alloc_cache
);
450 data
= page_frag_alloc(nc
, len
, gfp_mask
);
451 pfmemalloc
= nc
->pfmemalloc
;
454 nc
= this_cpu_ptr(&napi_alloc_cache
.page
);
455 data
= page_frag_alloc(nc
, len
, gfp_mask
);
456 pfmemalloc
= nc
->pfmemalloc
;
463 skb
= __build_skb(data
, len
);
464 if (unlikely(!skb
)) {
469 /* use OR instead of assignment to avoid clearing of bits in mask */
475 skb_reserve(skb
, NET_SKB_PAD
);
481 EXPORT_SYMBOL(__netdev_alloc_skb
);
484 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
485 * @napi: napi instance this buffer was allocated for
486 * @len: length to allocate
487 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
489 * Allocate a new sk_buff for use in NAPI receive. This buffer will
490 * attempt to allocate the head from a special reserved region used
491 * only for NAPI Rx allocation. By doing this we can save several
492 * CPU cycles by avoiding having to disable and re-enable IRQs.
494 * %NULL is returned if there is no free memory.
496 struct sk_buff
*__napi_alloc_skb(struct napi_struct
*napi
, unsigned int len
,
499 struct napi_alloc_cache
*nc
= this_cpu_ptr(&napi_alloc_cache
);
503 len
+= NET_SKB_PAD
+ NET_IP_ALIGN
;
505 if ((len
> SKB_WITH_OVERHEAD(PAGE_SIZE
)) ||
506 (gfp_mask
& (__GFP_DIRECT_RECLAIM
| GFP_DMA
))) {
507 skb
= __alloc_skb(len
, gfp_mask
, SKB_ALLOC_RX
, NUMA_NO_NODE
);
513 len
+= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
514 len
= SKB_DATA_ALIGN(len
);
516 if (sk_memalloc_socks())
517 gfp_mask
|= __GFP_MEMALLOC
;
519 data
= page_frag_alloc(&nc
->page
, len
, gfp_mask
);
523 skb
= __build_skb(data
, len
);
524 if (unlikely(!skb
)) {
529 /* use OR instead of assignment to avoid clearing of bits in mask */
530 if (nc
->page
.pfmemalloc
)
535 skb_reserve(skb
, NET_SKB_PAD
+ NET_IP_ALIGN
);
536 skb
->dev
= napi
->dev
;
541 EXPORT_SYMBOL(__napi_alloc_skb
);
543 void skb_add_rx_frag(struct sk_buff
*skb
, int i
, struct page
*page
, int off
,
544 int size
, unsigned int truesize
)
546 skb_fill_page_desc(skb
, i
, page
, off
, size
);
548 skb
->data_len
+= size
;
549 skb
->truesize
+= truesize
;
551 EXPORT_SYMBOL(skb_add_rx_frag
);
553 void skb_coalesce_rx_frag(struct sk_buff
*skb
, int i
, int size
,
554 unsigned int truesize
)
556 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
558 skb_frag_size_add(frag
, size
);
560 skb
->data_len
+= size
;
561 skb
->truesize
+= truesize
;
563 EXPORT_SYMBOL(skb_coalesce_rx_frag
);
565 static void skb_drop_list(struct sk_buff
**listp
)
567 kfree_skb_list(*listp
);
571 static inline void skb_drop_fraglist(struct sk_buff
*skb
)
573 skb_drop_list(&skb_shinfo(skb
)->frag_list
);
576 static void skb_clone_fraglist(struct sk_buff
*skb
)
578 struct sk_buff
*list
;
580 skb_walk_frags(skb
, list
)
584 static void skb_free_head(struct sk_buff
*skb
)
586 unsigned char *head
= skb
->head
;
594 static void skb_release_data(struct sk_buff
*skb
)
596 struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
600 atomic_sub_return(skb
->nohdr
? (1 << SKB_DATAREF_SHIFT
) + 1 : 1,
604 for (i
= 0; i
< shinfo
->nr_frags
; i
++)
605 __skb_frag_unref(&shinfo
->frags
[i
]);
607 if (shinfo
->frag_list
)
608 kfree_skb_list(shinfo
->frag_list
);
610 skb_zcopy_clear(skb
, true);
615 * Free an skbuff by memory without cleaning the state.
617 static void kfree_skbmem(struct sk_buff
*skb
)
619 struct sk_buff_fclones
*fclones
;
621 switch (skb
->fclone
) {
622 case SKB_FCLONE_UNAVAILABLE
:
623 kmem_cache_free(skbuff_head_cache
, skb
);
626 case SKB_FCLONE_ORIG
:
627 fclones
= container_of(skb
, struct sk_buff_fclones
, skb1
);
629 /* We usually free the clone (TX completion) before original skb
630 * This test would have no chance to be true for the clone,
631 * while here, branch prediction will be good.
633 if (refcount_read(&fclones
->fclone_ref
) == 1)
637 default: /* SKB_FCLONE_CLONE */
638 fclones
= container_of(skb
, struct sk_buff_fclones
, skb2
);
641 if (!refcount_dec_and_test(&fclones
->fclone_ref
))
644 kmem_cache_free(skbuff_fclone_cache
, fclones
);
647 void skb_release_head_state(struct sk_buff
*skb
)
650 if (skb
->destructor
) {
652 skb
->destructor(skb
);
654 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
655 nf_conntrack_put(skb_nfct(skb
));
660 /* Free everything but the sk_buff shell. */
661 static void skb_release_all(struct sk_buff
*skb
)
663 skb_release_head_state(skb
);
664 if (likely(skb
->head
))
665 skb_release_data(skb
);
669 * __kfree_skb - private function
672 * Free an sk_buff. Release anything attached to the buffer.
673 * Clean the state. This is an internal helper function. Users should
674 * always call kfree_skb
677 void __kfree_skb(struct sk_buff
*skb
)
679 skb_release_all(skb
);
682 EXPORT_SYMBOL(__kfree_skb
);
685 * kfree_skb - free an sk_buff
686 * @skb: buffer to free
688 * Drop a reference to the buffer and free it if the usage count has
691 void kfree_skb(struct sk_buff
*skb
)
696 trace_kfree_skb(skb
, __builtin_return_address(0));
699 EXPORT_SYMBOL(kfree_skb
);
701 void kfree_skb_list(struct sk_buff
*segs
)
704 struct sk_buff
*next
= segs
->next
;
710 EXPORT_SYMBOL(kfree_skb_list
);
712 /* Dump skb information and contents.
714 * Must only be called from net_ratelimit()-ed paths.
716 * Dumps up to can_dump_full whole packets if full_pkt, headers otherwise.
718 void skb_dump(const char *level
, const struct sk_buff
*skb
, bool full_pkt
)
720 static atomic_t can_dump_full
= ATOMIC_INIT(5);
721 struct skb_shared_info
*sh
= skb_shinfo(skb
);
722 struct net_device
*dev
= skb
->dev
;
723 struct sock
*sk
= skb
->sk
;
724 struct sk_buff
*list_skb
;
725 bool has_mac
, has_trans
;
726 int headroom
, tailroom
;
730 full_pkt
= atomic_dec_if_positive(&can_dump_full
) >= 0;
735 len
= min_t(int, skb
->len
, MAX_HEADER
+ 128);
737 headroom
= skb_headroom(skb
);
738 tailroom
= skb_tailroom(skb
);
740 has_mac
= skb_mac_header_was_set(skb
);
741 has_trans
= skb_transport_header_was_set(skb
);
743 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
744 "mac=(%d,%d) net=(%d,%d) trans=%d\n"
745 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
746 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
747 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
748 level
, skb
->len
, headroom
, skb_headlen(skb
), tailroom
,
749 has_mac
? skb
->mac_header
: -1,
750 has_mac
? skb_mac_header_len(skb
) : -1,
752 has_trans
? skb_network_header_len(skb
) : -1,
753 has_trans
? skb
->transport_header
: -1,
754 sh
->tx_flags
, sh
->nr_frags
,
755 sh
->gso_size
, sh
->gso_type
, sh
->gso_segs
,
756 skb
->csum
, skb
->ip_summed
, skb
->csum_complete_sw
,
757 skb
->csum_valid
, skb
->csum_level
,
758 skb
->hash
, skb
->sw_hash
, skb
->l4_hash
,
759 ntohs(skb
->protocol
), skb
->pkt_type
, skb
->skb_iif
);
762 printk("%sdev name=%s feat=0x%pNF\n",
763 level
, dev
->name
, &dev
->features
);
765 printk("%ssk family=%hu type=%u proto=%u\n",
766 level
, sk
->sk_family
, sk
->sk_type
, sk
->sk_protocol
);
768 if (full_pkt
&& headroom
)
769 print_hex_dump(level
, "skb headroom: ", DUMP_PREFIX_OFFSET
,
770 16, 1, skb
->head
, headroom
, false);
772 seg_len
= min_t(int, skb_headlen(skb
), len
);
774 print_hex_dump(level
, "skb linear: ", DUMP_PREFIX_OFFSET
,
775 16, 1, skb
->data
, seg_len
, false);
778 if (full_pkt
&& tailroom
)
779 print_hex_dump(level
, "skb tailroom: ", DUMP_PREFIX_OFFSET
,
780 16, 1, skb_tail_pointer(skb
), tailroom
, false);
782 for (i
= 0; len
&& i
< skb_shinfo(skb
)->nr_frags
; i
++) {
783 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
784 u32 p_off
, p_len
, copied
;
788 skb_frag_foreach_page(frag
, frag
->page_offset
,
789 skb_frag_size(frag
), p
, p_off
, p_len
,
791 seg_len
= min_t(int, p_len
, len
);
792 vaddr
= kmap_atomic(p
);
793 print_hex_dump(level
, "skb frag: ",
795 16, 1, vaddr
+ p_off
, seg_len
, false);
796 kunmap_atomic(vaddr
);
803 if (full_pkt
&& skb_has_frag_list(skb
)) {
804 printk("skb fraglist:\n");
805 skb_walk_frags(skb
, list_skb
)
806 skb_dump(level
, list_skb
, true);
809 EXPORT_SYMBOL(skb_dump
);
812 * skb_tx_error - report an sk_buff xmit error
813 * @skb: buffer that triggered an error
815 * Report xmit error if a device callback is tracking this skb.
816 * skb must be freed afterwards.
818 void skb_tx_error(struct sk_buff
*skb
)
820 skb_zcopy_clear(skb
, true);
822 EXPORT_SYMBOL(skb_tx_error
);
825 * consume_skb - free an skbuff
826 * @skb: buffer to free
828 * Drop a ref to the buffer and free it if the usage count has hit zero
829 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
830 * is being dropped after a failure and notes that
832 void consume_skb(struct sk_buff
*skb
)
837 trace_consume_skb(skb
);
840 EXPORT_SYMBOL(consume_skb
);
843 * consume_stateless_skb - free an skbuff, assuming it is stateless
844 * @skb: buffer to free
846 * Alike consume_skb(), but this variant assumes that this is the last
847 * skb reference and all the head states have been already dropped
849 void __consume_stateless_skb(struct sk_buff
*skb
)
851 trace_consume_skb(skb
);
852 skb_release_data(skb
);
856 void __kfree_skb_flush(void)
858 struct napi_alloc_cache
*nc
= this_cpu_ptr(&napi_alloc_cache
);
860 /* flush skb_cache if containing objects */
862 kmem_cache_free_bulk(skbuff_head_cache
, nc
->skb_count
,
868 static inline void _kfree_skb_defer(struct sk_buff
*skb
)
870 struct napi_alloc_cache
*nc
= this_cpu_ptr(&napi_alloc_cache
);
872 /* drop skb->head and call any destructors for packet */
873 skb_release_all(skb
);
875 /* record skb to CPU local list */
876 nc
->skb_cache
[nc
->skb_count
++] = skb
;
879 /* SLUB writes into objects when freeing */
883 /* flush skb_cache if it is filled */
884 if (unlikely(nc
->skb_count
== NAPI_SKB_CACHE_SIZE
)) {
885 kmem_cache_free_bulk(skbuff_head_cache
, NAPI_SKB_CACHE_SIZE
,
890 void __kfree_skb_defer(struct sk_buff
*skb
)
892 _kfree_skb_defer(skb
);
895 void napi_consume_skb(struct sk_buff
*skb
, int budget
)
900 /* Zero budget indicate non-NAPI context called us, like netpoll */
901 if (unlikely(!budget
)) {
902 dev_consume_skb_any(skb
);
909 /* if reaching here SKB is ready to free */
910 trace_consume_skb(skb
);
912 /* if SKB is a clone, don't handle this case */
913 if (skb
->fclone
!= SKB_FCLONE_UNAVAILABLE
) {
918 _kfree_skb_defer(skb
);
920 EXPORT_SYMBOL(napi_consume_skb
);
922 /* Make sure a field is enclosed inside headers_start/headers_end section */
923 #define CHECK_SKB_FIELD(field) \
924 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
925 offsetof(struct sk_buff, headers_start)); \
926 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
927 offsetof(struct sk_buff, headers_end)); \
929 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
931 new->tstamp
= old
->tstamp
;
932 /* We do not copy old->sk */
934 memcpy(new->cb
, old
->cb
, sizeof(old
->cb
));
935 skb_dst_copy(new, old
);
936 __skb_ext_copy(new, old
);
937 __nf_copy(new, old
, false);
939 /* Note : this field could be in headers_start/headers_end section
940 * It is not yet because we do not want to have a 16 bit hole
942 new->queue_mapping
= old
->queue_mapping
;
944 memcpy(&new->headers_start
, &old
->headers_start
,
945 offsetof(struct sk_buff
, headers_end
) -
946 offsetof(struct sk_buff
, headers_start
));
947 CHECK_SKB_FIELD(protocol
);
948 CHECK_SKB_FIELD(csum
);
949 CHECK_SKB_FIELD(hash
);
950 CHECK_SKB_FIELD(priority
);
951 CHECK_SKB_FIELD(skb_iif
);
952 CHECK_SKB_FIELD(vlan_proto
);
953 CHECK_SKB_FIELD(vlan_tci
);
954 CHECK_SKB_FIELD(transport_header
);
955 CHECK_SKB_FIELD(network_header
);
956 CHECK_SKB_FIELD(mac_header
);
957 CHECK_SKB_FIELD(inner_protocol
);
958 CHECK_SKB_FIELD(inner_transport_header
);
959 CHECK_SKB_FIELD(inner_network_header
);
960 CHECK_SKB_FIELD(inner_mac_header
);
961 CHECK_SKB_FIELD(mark
);
962 #ifdef CONFIG_NETWORK_SECMARK
963 CHECK_SKB_FIELD(secmark
);
965 #ifdef CONFIG_NET_RX_BUSY_POLL
966 CHECK_SKB_FIELD(napi_id
);
969 CHECK_SKB_FIELD(sender_cpu
);
971 #ifdef CONFIG_NET_SCHED
972 CHECK_SKB_FIELD(tc_index
);
978 * You should not add any new code to this function. Add it to
979 * __copy_skb_header above instead.
981 static struct sk_buff
*__skb_clone(struct sk_buff
*n
, struct sk_buff
*skb
)
983 #define C(x) n->x = skb->x
985 n
->next
= n
->prev
= NULL
;
987 __copy_skb_header(n
, skb
);
992 n
->hdr_len
= skb
->nohdr
? skb_headroom(skb
) : skb
->hdr_len
;
997 n
->destructor
= NULL
;
1004 refcount_set(&n
->users
, 1);
1006 atomic_inc(&(skb_shinfo(skb
)->dataref
));
1014 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1015 * @first: first sk_buff of the msg
1017 struct sk_buff
*alloc_skb_for_msg(struct sk_buff
*first
)
1021 n
= alloc_skb(0, GFP_ATOMIC
);
1025 n
->len
= first
->len
;
1026 n
->data_len
= first
->len
;
1027 n
->truesize
= first
->truesize
;
1029 skb_shinfo(n
)->frag_list
= first
;
1031 __copy_skb_header(n
, first
);
1032 n
->destructor
= NULL
;
1036 EXPORT_SYMBOL_GPL(alloc_skb_for_msg
);
1039 * skb_morph - morph one skb into another
1040 * @dst: the skb to receive the contents
1041 * @src: the skb to supply the contents
1043 * This is identical to skb_clone except that the target skb is
1044 * supplied by the user.
1046 * The target skb is returned upon exit.
1048 struct sk_buff
*skb_morph(struct sk_buff
*dst
, struct sk_buff
*src
)
1050 skb_release_all(dst
);
1051 return __skb_clone(dst
, src
);
1053 EXPORT_SYMBOL_GPL(skb_morph
);
1055 int mm_account_pinned_pages(struct mmpin
*mmp
, size_t size
)
1057 unsigned long max_pg
, num_pg
, new_pg
, old_pg
;
1058 struct user_struct
*user
;
1060 if (capable(CAP_IPC_LOCK
) || !size
)
1063 num_pg
= (size
>> PAGE_SHIFT
) + 2; /* worst case */
1064 max_pg
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
1065 user
= mmp
->user
? : current_user();
1068 old_pg
= atomic_long_read(&user
->locked_vm
);
1069 new_pg
= old_pg
+ num_pg
;
1070 if (new_pg
> max_pg
)
1072 } while (atomic_long_cmpxchg(&user
->locked_vm
, old_pg
, new_pg
) !=
1076 mmp
->user
= get_uid(user
);
1077 mmp
->num_pg
= num_pg
;
1079 mmp
->num_pg
+= num_pg
;
1084 EXPORT_SYMBOL_GPL(mm_account_pinned_pages
);
1086 void mm_unaccount_pinned_pages(struct mmpin
*mmp
)
1089 atomic_long_sub(mmp
->num_pg
, &mmp
->user
->locked_vm
);
1090 free_uid(mmp
->user
);
1093 EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages
);
1095 struct ubuf_info
*sock_zerocopy_alloc(struct sock
*sk
, size_t size
)
1097 struct ubuf_info
*uarg
;
1098 struct sk_buff
*skb
;
1100 WARN_ON_ONCE(!in_task());
1102 skb
= sock_omalloc(sk
, 0, GFP_KERNEL
);
1106 BUILD_BUG_ON(sizeof(*uarg
) > sizeof(skb
->cb
));
1107 uarg
= (void *)skb
->cb
;
1108 uarg
->mmp
.user
= NULL
;
1110 if (mm_account_pinned_pages(&uarg
->mmp
, size
)) {
1115 uarg
->callback
= sock_zerocopy_callback
;
1116 uarg
->id
= ((u32
)atomic_inc_return(&sk
->sk_zckey
)) - 1;
1118 uarg
->bytelen
= size
;
1120 refcount_set(&uarg
->refcnt
, 1);
1125 EXPORT_SYMBOL_GPL(sock_zerocopy_alloc
);
1127 static inline struct sk_buff
*skb_from_uarg(struct ubuf_info
*uarg
)
1129 return container_of((void *)uarg
, struct sk_buff
, cb
);
1132 struct ubuf_info
*sock_zerocopy_realloc(struct sock
*sk
, size_t size
,
1133 struct ubuf_info
*uarg
)
1136 const u32 byte_limit
= 1 << 19; /* limit to a few TSO */
1139 /* realloc only when socket is locked (TCP, UDP cork),
1140 * so uarg->len and sk_zckey access is serialized
1142 if (!sock_owned_by_user(sk
)) {
1147 bytelen
= uarg
->bytelen
+ size
;
1148 if (uarg
->len
== USHRT_MAX
- 1 || bytelen
> byte_limit
) {
1149 /* TCP can create new skb to attach new uarg */
1150 if (sk
->sk_type
== SOCK_STREAM
)
1155 next
= (u32
)atomic_read(&sk
->sk_zckey
);
1156 if ((u32
)(uarg
->id
+ uarg
->len
) == next
) {
1157 if (mm_account_pinned_pages(&uarg
->mmp
, size
))
1160 uarg
->bytelen
= bytelen
;
1161 atomic_set(&sk
->sk_zckey
, ++next
);
1163 /* no extra ref when appending to datagram (MSG_MORE) */
1164 if (sk
->sk_type
== SOCK_STREAM
)
1165 sock_zerocopy_get(uarg
);
1172 return sock_zerocopy_alloc(sk
, size
);
1174 EXPORT_SYMBOL_GPL(sock_zerocopy_realloc
);
1176 static bool skb_zerocopy_notify_extend(struct sk_buff
*skb
, u32 lo
, u16 len
)
1178 struct sock_exterr_skb
*serr
= SKB_EXT_ERR(skb
);
1182 old_lo
= serr
->ee
.ee_info
;
1183 old_hi
= serr
->ee
.ee_data
;
1184 sum_len
= old_hi
- old_lo
+ 1ULL + len
;
1186 if (sum_len
>= (1ULL << 32))
1189 if (lo
!= old_hi
+ 1)
1192 serr
->ee
.ee_data
+= len
;
1196 void sock_zerocopy_callback(struct ubuf_info
*uarg
, bool success
)
1198 struct sk_buff
*tail
, *skb
= skb_from_uarg(uarg
);
1199 struct sock_exterr_skb
*serr
;
1200 struct sock
*sk
= skb
->sk
;
1201 struct sk_buff_head
*q
;
1202 unsigned long flags
;
1206 mm_unaccount_pinned_pages(&uarg
->mmp
);
1208 /* if !len, there was only 1 call, and it was aborted
1209 * so do not queue a completion notification
1211 if (!uarg
->len
|| sock_flag(sk
, SOCK_DEAD
))
1216 hi
= uarg
->id
+ len
- 1;
1218 serr
= SKB_EXT_ERR(skb
);
1219 memset(serr
, 0, sizeof(*serr
));
1220 serr
->ee
.ee_errno
= 0;
1221 serr
->ee
.ee_origin
= SO_EE_ORIGIN_ZEROCOPY
;
1222 serr
->ee
.ee_data
= hi
;
1223 serr
->ee
.ee_info
= lo
;
1225 serr
->ee
.ee_code
|= SO_EE_CODE_ZEROCOPY_COPIED
;
1227 q
= &sk
->sk_error_queue
;
1228 spin_lock_irqsave(&q
->lock
, flags
);
1229 tail
= skb_peek_tail(q
);
1230 if (!tail
|| SKB_EXT_ERR(tail
)->ee
.ee_origin
!= SO_EE_ORIGIN_ZEROCOPY
||
1231 !skb_zerocopy_notify_extend(tail
, lo
, len
)) {
1232 __skb_queue_tail(q
, skb
);
1235 spin_unlock_irqrestore(&q
->lock
, flags
);
1237 sk
->sk_error_report(sk
);
1243 EXPORT_SYMBOL_GPL(sock_zerocopy_callback
);
1245 void sock_zerocopy_put(struct ubuf_info
*uarg
)
1247 if (uarg
&& refcount_dec_and_test(&uarg
->refcnt
)) {
1249 uarg
->callback(uarg
, uarg
->zerocopy
);
1251 consume_skb(skb_from_uarg(uarg
));
1254 EXPORT_SYMBOL_GPL(sock_zerocopy_put
);
1256 void sock_zerocopy_put_abort(struct ubuf_info
*uarg
, bool have_uref
)
1259 struct sock
*sk
= skb_from_uarg(uarg
)->sk
;
1261 atomic_dec(&sk
->sk_zckey
);
1265 sock_zerocopy_put(uarg
);
1268 EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort
);
1270 int skb_zerocopy_iter_dgram(struct sk_buff
*skb
, struct msghdr
*msg
, int len
)
1272 return __zerocopy_sg_from_iter(skb
->sk
, skb
, &msg
->msg_iter
, len
);
1274 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram
);
1276 int skb_zerocopy_iter_stream(struct sock
*sk
, struct sk_buff
*skb
,
1277 struct msghdr
*msg
, int len
,
1278 struct ubuf_info
*uarg
)
1280 struct ubuf_info
*orig_uarg
= skb_zcopy(skb
);
1281 struct iov_iter orig_iter
= msg
->msg_iter
;
1282 int err
, orig_len
= skb
->len
;
1284 /* An skb can only point to one uarg. This edge case happens when
1285 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1287 if (orig_uarg
&& uarg
!= orig_uarg
)
1290 err
= __zerocopy_sg_from_iter(sk
, skb
, &msg
->msg_iter
, len
);
1291 if (err
== -EFAULT
|| (err
== -EMSGSIZE
&& skb
->len
== orig_len
)) {
1292 struct sock
*save_sk
= skb
->sk
;
1294 /* Streams do not free skb on error. Reset to prev state. */
1295 msg
->msg_iter
= orig_iter
;
1297 ___pskb_trim(skb
, orig_len
);
1302 skb_zcopy_set(skb
, uarg
, NULL
);
1303 return skb
->len
- orig_len
;
1305 EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream
);
1307 static int skb_zerocopy_clone(struct sk_buff
*nskb
, struct sk_buff
*orig
,
1310 if (skb_zcopy(orig
)) {
1311 if (skb_zcopy(nskb
)) {
1312 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1317 if (skb_uarg(nskb
) == skb_uarg(orig
))
1319 if (skb_copy_ubufs(nskb
, GFP_ATOMIC
))
1322 skb_zcopy_set(nskb
, skb_uarg(orig
), NULL
);
1328 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1329 * @skb: the skb to modify
1330 * @gfp_mask: allocation priority
1332 * This must be called on SKBTX_DEV_ZEROCOPY skb.
1333 * It will copy all frags into kernel and drop the reference
1334 * to userspace pages.
1336 * If this function is called from an interrupt gfp_mask() must be
1339 * Returns 0 on success or a negative error code on failure
1340 * to allocate kernel memory to copy to.
1342 int skb_copy_ubufs(struct sk_buff
*skb
, gfp_t gfp_mask
)
1344 int num_frags
= skb_shinfo(skb
)->nr_frags
;
1345 struct page
*page
, *head
= NULL
;
1349 if (skb_shared(skb
) || skb_unclone(skb
, gfp_mask
))
1355 new_frags
= (__skb_pagelen(skb
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1356 for (i
= 0; i
< new_frags
; i
++) {
1357 page
= alloc_page(gfp_mask
);
1360 struct page
*next
= (struct page
*)page_private(head
);
1366 set_page_private(page
, (unsigned long)head
);
1372 for (i
= 0; i
< num_frags
; i
++) {
1373 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
1374 u32 p_off
, p_len
, copied
;
1378 skb_frag_foreach_page(f
, f
->page_offset
, skb_frag_size(f
),
1379 p
, p_off
, p_len
, copied
) {
1381 vaddr
= kmap_atomic(p
);
1383 while (done
< p_len
) {
1384 if (d_off
== PAGE_SIZE
) {
1386 page
= (struct page
*)page_private(page
);
1388 copy
= min_t(u32
, PAGE_SIZE
- d_off
, p_len
- done
);
1389 memcpy(page_address(page
) + d_off
,
1390 vaddr
+ p_off
+ done
, copy
);
1394 kunmap_atomic(vaddr
);
1398 /* skb frags release userspace buffers */
1399 for (i
= 0; i
< num_frags
; i
++)
1400 skb_frag_unref(skb
, i
);
1402 /* skb frags point to kernel buffers */
1403 for (i
= 0; i
< new_frags
- 1; i
++) {
1404 __skb_fill_page_desc(skb
, i
, head
, 0, PAGE_SIZE
);
1405 head
= (struct page
*)page_private(head
);
1407 __skb_fill_page_desc(skb
, new_frags
- 1, head
, 0, d_off
);
1408 skb_shinfo(skb
)->nr_frags
= new_frags
;
1411 skb_zcopy_clear(skb
, false);
1414 EXPORT_SYMBOL_GPL(skb_copy_ubufs
);
1417 * skb_clone - duplicate an sk_buff
1418 * @skb: buffer to clone
1419 * @gfp_mask: allocation priority
1421 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
1422 * copies share the same packet data but not structure. The new
1423 * buffer has a reference count of 1. If the allocation fails the
1424 * function returns %NULL otherwise the new buffer is returned.
1426 * If this function is called from an interrupt gfp_mask() must be
1430 struct sk_buff
*skb_clone(struct sk_buff
*skb
, gfp_t gfp_mask
)
1432 struct sk_buff_fclones
*fclones
= container_of(skb
,
1433 struct sk_buff_fclones
,
1437 if (skb_orphan_frags(skb
, gfp_mask
))
1440 if (skb
->fclone
== SKB_FCLONE_ORIG
&&
1441 refcount_read(&fclones
->fclone_ref
) == 1) {
1443 refcount_set(&fclones
->fclone_ref
, 2);
1445 if (skb_pfmemalloc(skb
))
1446 gfp_mask
|= __GFP_MEMALLOC
;
1448 n
= kmem_cache_alloc(skbuff_head_cache
, gfp_mask
);
1452 n
->fclone
= SKB_FCLONE_UNAVAILABLE
;
1455 return __skb_clone(n
, skb
);
1457 EXPORT_SYMBOL(skb_clone
);
1459 void skb_headers_offset_update(struct sk_buff
*skb
, int off
)
1461 /* Only adjust this if it actually is csum_start rather than csum */
1462 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
1463 skb
->csum_start
+= off
;
1464 /* {transport,network,mac}_header and tail are relative to skb->head */
1465 skb
->transport_header
+= off
;
1466 skb
->network_header
+= off
;
1467 if (skb_mac_header_was_set(skb
))
1468 skb
->mac_header
+= off
;
1469 skb
->inner_transport_header
+= off
;
1470 skb
->inner_network_header
+= off
;
1471 skb
->inner_mac_header
+= off
;
1473 EXPORT_SYMBOL(skb_headers_offset_update
);
1475 void skb_copy_header(struct sk_buff
*new, const struct sk_buff
*old
)
1477 __copy_skb_header(new, old
);
1479 skb_shinfo(new)->gso_size
= skb_shinfo(old
)->gso_size
;
1480 skb_shinfo(new)->gso_segs
= skb_shinfo(old
)->gso_segs
;
1481 skb_shinfo(new)->gso_type
= skb_shinfo(old
)->gso_type
;
1483 EXPORT_SYMBOL(skb_copy_header
);
1485 static inline int skb_alloc_rx_flag(const struct sk_buff
*skb
)
1487 if (skb_pfmemalloc(skb
))
1488 return SKB_ALLOC_RX
;
1493 * skb_copy - create private copy of an sk_buff
1494 * @skb: buffer to copy
1495 * @gfp_mask: allocation priority
1497 * Make a copy of both an &sk_buff and its data. This is used when the
1498 * caller wishes to modify the data and needs a private copy of the
1499 * data to alter. Returns %NULL on failure or the pointer to the buffer
1500 * on success. The returned buffer has a reference count of 1.
1502 * As by-product this function converts non-linear &sk_buff to linear
1503 * one, so that &sk_buff becomes completely private and caller is allowed
1504 * to modify all the data of returned buffer. This means that this
1505 * function is not recommended for use in circumstances when only
1506 * header is going to be modified. Use pskb_copy() instead.
1509 struct sk_buff
*skb_copy(const struct sk_buff
*skb
, gfp_t gfp_mask
)
1511 int headerlen
= skb_headroom(skb
);
1512 unsigned int size
= skb_end_offset(skb
) + skb
->data_len
;
1513 struct sk_buff
*n
= __alloc_skb(size
, gfp_mask
,
1514 skb_alloc_rx_flag(skb
), NUMA_NO_NODE
);
1519 /* Set the data pointer */
1520 skb_reserve(n
, headerlen
);
1521 /* Set the tail pointer and length */
1522 skb_put(n
, skb
->len
);
1524 BUG_ON(skb_copy_bits(skb
, -headerlen
, n
->head
, headerlen
+ skb
->len
));
1526 skb_copy_header(n
, skb
);
1529 EXPORT_SYMBOL(skb_copy
);
1532 * __pskb_copy_fclone - create copy of an sk_buff with private head.
1533 * @skb: buffer to copy
1534 * @headroom: headroom of new skb
1535 * @gfp_mask: allocation priority
1536 * @fclone: if true allocate the copy of the skb from the fclone
1537 * cache instead of the head cache; it is recommended to set this
1538 * to true for the cases where the copy will likely be cloned
1540 * Make a copy of both an &sk_buff and part of its data, located
1541 * in header. Fragmented data remain shared. This is used when
1542 * the caller wishes to modify only header of &sk_buff and needs
1543 * private copy of the header to alter. Returns %NULL on failure
1544 * or the pointer to the buffer on success.
1545 * The returned buffer has a reference count of 1.
1548 struct sk_buff
*__pskb_copy_fclone(struct sk_buff
*skb
, int headroom
,
1549 gfp_t gfp_mask
, bool fclone
)
1551 unsigned int size
= skb_headlen(skb
) + headroom
;
1552 int flags
= skb_alloc_rx_flag(skb
) | (fclone
? SKB_ALLOC_FCLONE
: 0);
1553 struct sk_buff
*n
= __alloc_skb(size
, gfp_mask
, flags
, NUMA_NO_NODE
);
1558 /* Set the data pointer */
1559 skb_reserve(n
, headroom
);
1560 /* Set the tail pointer and length */
1561 skb_put(n
, skb_headlen(skb
));
1562 /* Copy the bytes */
1563 skb_copy_from_linear_data(skb
, n
->data
, n
->len
);
1565 n
->truesize
+= skb
->data_len
;
1566 n
->data_len
= skb
->data_len
;
1569 if (skb_shinfo(skb
)->nr_frags
) {
1572 if (skb_orphan_frags(skb
, gfp_mask
) ||
1573 skb_zerocopy_clone(n
, skb
, gfp_mask
)) {
1578 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1579 skb_shinfo(n
)->frags
[i
] = skb_shinfo(skb
)->frags
[i
];
1580 skb_frag_ref(skb
, i
);
1582 skb_shinfo(n
)->nr_frags
= i
;
1585 if (skb_has_frag_list(skb
)) {
1586 skb_shinfo(n
)->frag_list
= skb_shinfo(skb
)->frag_list
;
1587 skb_clone_fraglist(n
);
1590 skb_copy_header(n
, skb
);
1594 EXPORT_SYMBOL(__pskb_copy_fclone
);
1597 * pskb_expand_head - reallocate header of &sk_buff
1598 * @skb: buffer to reallocate
1599 * @nhead: room to add at head
1600 * @ntail: room to add at tail
1601 * @gfp_mask: allocation priority
1603 * Expands (or creates identical copy, if @nhead and @ntail are zero)
1604 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1605 * reference count of 1. Returns zero in the case of success or error,
1606 * if expansion failed. In the last case, &sk_buff is not changed.
1608 * All the pointers pointing into skb header may change and must be
1609 * reloaded after call to this function.
1612 int pskb_expand_head(struct sk_buff
*skb
, int nhead
, int ntail
,
1615 int i
, osize
= skb_end_offset(skb
);
1616 int size
= osize
+ nhead
+ ntail
;
1622 BUG_ON(skb_shared(skb
));
1624 size
= SKB_DATA_ALIGN(size
);
1626 if (skb_pfmemalloc(skb
))
1627 gfp_mask
|= __GFP_MEMALLOC
;
1628 data
= kmalloc_reserve(size
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)),
1629 gfp_mask
, NUMA_NO_NODE
, NULL
);
1632 size
= SKB_WITH_OVERHEAD(ksize(data
));
1634 /* Copy only real data... and, alas, header. This should be
1635 * optimized for the cases when header is void.
1637 memcpy(data
+ nhead
, skb
->head
, skb_tail_pointer(skb
) - skb
->head
);
1639 memcpy((struct skb_shared_info
*)(data
+ size
),
1641 offsetof(struct skb_shared_info
, frags
[skb_shinfo(skb
)->nr_frags
]));
1644 * if shinfo is shared we must drop the old head gracefully, but if it
1645 * is not we can just drop the old head and let the existing refcount
1646 * be since all we did is relocate the values
1648 if (skb_cloned(skb
)) {
1649 if (skb_orphan_frags(skb
, gfp_mask
))
1652 refcount_inc(&skb_uarg(skb
)->refcnt
);
1653 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
1654 skb_frag_ref(skb
, i
);
1656 if (skb_has_frag_list(skb
))
1657 skb_clone_fraglist(skb
);
1659 skb_release_data(skb
);
1663 off
= (data
+ nhead
) - skb
->head
;
1668 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1672 skb
->end
= skb
->head
+ size
;
1675 skb_headers_offset_update(skb
, nhead
);
1679 atomic_set(&skb_shinfo(skb
)->dataref
, 1);
1681 skb_metadata_clear(skb
);
1683 /* It is not generally safe to change skb->truesize.
1684 * For the moment, we really care of rx path, or
1685 * when skb is orphaned (not attached to a socket).
1687 if (!skb
->sk
|| skb
->destructor
== sock_edemux
)
1688 skb
->truesize
+= size
- osize
;
1697 EXPORT_SYMBOL(pskb_expand_head
);
1699 /* Make private copy of skb with writable head and some headroom */
1701 struct sk_buff
*skb_realloc_headroom(struct sk_buff
*skb
, unsigned int headroom
)
1703 struct sk_buff
*skb2
;
1704 int delta
= headroom
- skb_headroom(skb
);
1707 skb2
= pskb_copy(skb
, GFP_ATOMIC
);
1709 skb2
= skb_clone(skb
, GFP_ATOMIC
);
1710 if (skb2
&& pskb_expand_head(skb2
, SKB_DATA_ALIGN(delta
), 0,
1718 EXPORT_SYMBOL(skb_realloc_headroom
);
1721 * skb_copy_expand - copy and expand sk_buff
1722 * @skb: buffer to copy
1723 * @newheadroom: new free bytes at head
1724 * @newtailroom: new free bytes at tail
1725 * @gfp_mask: allocation priority
1727 * Make a copy of both an &sk_buff and its data and while doing so
1728 * allocate additional space.
1730 * This is used when the caller wishes to modify the data and needs a
1731 * private copy of the data to alter as well as more space for new fields.
1732 * Returns %NULL on failure or the pointer to the buffer
1733 * on success. The returned buffer has a reference count of 1.
1735 * You must pass %GFP_ATOMIC as the allocation priority if this function
1736 * is called from an interrupt.
1738 struct sk_buff
*skb_copy_expand(const struct sk_buff
*skb
,
1739 int newheadroom
, int newtailroom
,
1743 * Allocate the copy buffer
1745 struct sk_buff
*n
= __alloc_skb(newheadroom
+ skb
->len
+ newtailroom
,
1746 gfp_mask
, skb_alloc_rx_flag(skb
),
1748 int oldheadroom
= skb_headroom(skb
);
1749 int head_copy_len
, head_copy_off
;
1754 skb_reserve(n
, newheadroom
);
1756 /* Set the tail pointer and length */
1757 skb_put(n
, skb
->len
);
1759 head_copy_len
= oldheadroom
;
1761 if (newheadroom
<= head_copy_len
)
1762 head_copy_len
= newheadroom
;
1764 head_copy_off
= newheadroom
- head_copy_len
;
1766 /* Copy the linear header and data. */
1767 BUG_ON(skb_copy_bits(skb
, -head_copy_len
, n
->head
+ head_copy_off
,
1768 skb
->len
+ head_copy_len
));
1770 skb_copy_header(n
, skb
);
1772 skb_headers_offset_update(n
, newheadroom
- oldheadroom
);
1776 EXPORT_SYMBOL(skb_copy_expand
);
1779 * __skb_pad - zero pad the tail of an skb
1780 * @skb: buffer to pad
1781 * @pad: space to pad
1782 * @free_on_error: free buffer on error
1784 * Ensure that a buffer is followed by a padding area that is zero
1785 * filled. Used by network drivers which may DMA or transfer data
1786 * beyond the buffer end onto the wire.
1788 * May return error in out of memory cases. The skb is freed on error
1789 * if @free_on_error is true.
1792 int __skb_pad(struct sk_buff
*skb
, int pad
, bool free_on_error
)
1797 /* If the skbuff is non linear tailroom is always zero.. */
1798 if (!skb_cloned(skb
) && skb_tailroom(skb
) >= pad
) {
1799 memset(skb
->data
+skb
->len
, 0, pad
);
1803 ntail
= skb
->data_len
+ pad
- (skb
->end
- skb
->tail
);
1804 if (likely(skb_cloned(skb
) || ntail
> 0)) {
1805 err
= pskb_expand_head(skb
, 0, ntail
, GFP_ATOMIC
);
1810 /* FIXME: The use of this function with non-linear skb's really needs
1813 err
= skb_linearize(skb
);
1817 memset(skb
->data
+ skb
->len
, 0, pad
);
1825 EXPORT_SYMBOL(__skb_pad
);
1828 * pskb_put - add data to the tail of a potentially fragmented buffer
1829 * @skb: start of the buffer to use
1830 * @tail: tail fragment of the buffer to use
1831 * @len: amount of data to add
1833 * This function extends the used data area of the potentially
1834 * fragmented buffer. @tail must be the last fragment of @skb -- or
1835 * @skb itself. If this would exceed the total buffer size the kernel
1836 * will panic. A pointer to the first byte of the extra data is
1840 void *pskb_put(struct sk_buff
*skb
, struct sk_buff
*tail
, int len
)
1843 skb
->data_len
+= len
;
1846 return skb_put(tail
, len
);
1848 EXPORT_SYMBOL_GPL(pskb_put
);
1851 * skb_put - add data to a buffer
1852 * @skb: buffer to use
1853 * @len: amount of data to add
1855 * This function extends the used data area of the buffer. If this would
1856 * exceed the total buffer size the kernel will panic. A pointer to the
1857 * first byte of the extra data is returned.
1859 void *skb_put(struct sk_buff
*skb
, unsigned int len
)
1861 void *tmp
= skb_tail_pointer(skb
);
1862 SKB_LINEAR_ASSERT(skb
);
1865 if (unlikely(skb
->tail
> skb
->end
))
1866 skb_over_panic(skb
, len
, __builtin_return_address(0));
1869 EXPORT_SYMBOL(skb_put
);
1872 * skb_push - add data to the start of a buffer
1873 * @skb: buffer to use
1874 * @len: amount of data to add
1876 * This function extends the used data area of the buffer at the buffer
1877 * start. If this would exceed the total buffer headroom the kernel will
1878 * panic. A pointer to the first byte of the extra data is returned.
1880 void *skb_push(struct sk_buff
*skb
, unsigned int len
)
1884 if (unlikely(skb
->data
< skb
->head
))
1885 skb_under_panic(skb
, len
, __builtin_return_address(0));
1888 EXPORT_SYMBOL(skb_push
);
1891 * skb_pull - remove data from the start of a buffer
1892 * @skb: buffer to use
1893 * @len: amount of data to remove
1895 * This function removes data from the start of a buffer, returning
1896 * the memory to the headroom. A pointer to the next data in the buffer
1897 * is returned. Once the data has been pulled future pushes will overwrite
1900 void *skb_pull(struct sk_buff
*skb
, unsigned int len
)
1902 return skb_pull_inline(skb
, len
);
1904 EXPORT_SYMBOL(skb_pull
);
1907 * skb_trim - remove end from a buffer
1908 * @skb: buffer to alter
1911 * Cut the length of a buffer down by removing data from the tail. If
1912 * the buffer is already under the length specified it is not modified.
1913 * The skb must be linear.
1915 void skb_trim(struct sk_buff
*skb
, unsigned int len
)
1918 __skb_trim(skb
, len
);
1920 EXPORT_SYMBOL(skb_trim
);
1922 /* Trims skb to length len. It can change skb pointers.
1925 int ___pskb_trim(struct sk_buff
*skb
, unsigned int len
)
1927 struct sk_buff
**fragp
;
1928 struct sk_buff
*frag
;
1929 int offset
= skb_headlen(skb
);
1930 int nfrags
= skb_shinfo(skb
)->nr_frags
;
1934 if (skb_cloned(skb
) &&
1935 unlikely((err
= pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
))))
1942 for (; i
< nfrags
; i
++) {
1943 int end
= offset
+ skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
1950 skb_frag_size_set(&skb_shinfo(skb
)->frags
[i
++], len
- offset
);
1953 skb_shinfo(skb
)->nr_frags
= i
;
1955 for (; i
< nfrags
; i
++)
1956 skb_frag_unref(skb
, i
);
1958 if (skb_has_frag_list(skb
))
1959 skb_drop_fraglist(skb
);
1963 for (fragp
= &skb_shinfo(skb
)->frag_list
; (frag
= *fragp
);
1964 fragp
= &frag
->next
) {
1965 int end
= offset
+ frag
->len
;
1967 if (skb_shared(frag
)) {
1968 struct sk_buff
*nfrag
;
1970 nfrag
= skb_clone(frag
, GFP_ATOMIC
);
1971 if (unlikely(!nfrag
))
1974 nfrag
->next
= frag
->next
;
1986 unlikely((err
= pskb_trim(frag
, len
- offset
))))
1990 skb_drop_list(&frag
->next
);
1995 if (len
> skb_headlen(skb
)) {
1996 skb
->data_len
-= skb
->len
- len
;
2001 skb_set_tail_pointer(skb
, len
);
2004 if (!skb
->sk
|| skb
->destructor
== sock_edemux
)
2008 EXPORT_SYMBOL(___pskb_trim
);
2010 /* Note : use pskb_trim_rcsum() instead of calling this directly
2012 int pskb_trim_rcsum_slow(struct sk_buff
*skb
, unsigned int len
)
2014 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
2015 int delta
= skb
->len
- len
;
2017 skb
->csum
= csum_block_sub(skb
->csum
,
2018 skb_checksum(skb
, len
, delta
, 0),
2021 return __pskb_trim(skb
, len
);
2023 EXPORT_SYMBOL(pskb_trim_rcsum_slow
);
2026 * __pskb_pull_tail - advance tail of skb header
2027 * @skb: buffer to reallocate
2028 * @delta: number of bytes to advance tail
2030 * The function makes a sense only on a fragmented &sk_buff,
2031 * it expands header moving its tail forward and copying necessary
2032 * data from fragmented part.
2034 * &sk_buff MUST have reference count of 1.
2036 * Returns %NULL (and &sk_buff does not change) if pull failed
2037 * or value of new tail of skb in the case of success.
2039 * All the pointers pointing into skb header may change and must be
2040 * reloaded after call to this function.
2043 /* Moves tail of skb head forward, copying data from fragmented part,
2044 * when it is necessary.
2045 * 1. It may fail due to malloc failure.
2046 * 2. It may change skb pointers.
2048 * It is pretty complicated. Luckily, it is called only in exceptional cases.
2050 void *__pskb_pull_tail(struct sk_buff
*skb
, int delta
)
2052 /* If skb has not enough free space at tail, get new one
2053 * plus 128 bytes for future expansions. If we have enough
2054 * room at tail, reallocate without expansion only if skb is cloned.
2056 int i
, k
, eat
= (skb
->tail
+ delta
) - skb
->end
;
2058 if (eat
> 0 || skb_cloned(skb
)) {
2059 if (pskb_expand_head(skb
, 0, eat
> 0 ? eat
+ 128 : 0,
2064 BUG_ON(skb_copy_bits(skb
, skb_headlen(skb
),
2065 skb_tail_pointer(skb
), delta
));
2067 /* Optimization: no fragments, no reasons to preestimate
2068 * size of pulled pages. Superb.
2070 if (!skb_has_frag_list(skb
))
2073 /* Estimate size of pulled pages. */
2075 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2076 int size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
2083 /* If we need update frag list, we are in troubles.
2084 * Certainly, it is possible to add an offset to skb data,
2085 * but taking into account that pulling is expected to
2086 * be very rare operation, it is worth to fight against
2087 * further bloating skb head and crucify ourselves here instead.
2088 * Pure masohism, indeed. 8)8)
2091 struct sk_buff
*list
= skb_shinfo(skb
)->frag_list
;
2092 struct sk_buff
*clone
= NULL
;
2093 struct sk_buff
*insp
= NULL
;
2096 if (list
->len
<= eat
) {
2097 /* Eaten as whole. */
2102 /* Eaten partially. */
2104 if (skb_shared(list
)) {
2105 /* Sucks! We need to fork list. :-( */
2106 clone
= skb_clone(list
, GFP_ATOMIC
);
2112 /* This may be pulled without
2116 if (!pskb_pull(list
, eat
)) {
2124 /* Free pulled out fragments. */
2125 while ((list
= skb_shinfo(skb
)->frag_list
) != insp
) {
2126 skb_shinfo(skb
)->frag_list
= list
->next
;
2129 /* And insert new clone at head. */
2132 skb_shinfo(skb
)->frag_list
= clone
;
2135 /* Success! Now we may commit changes to skb data. */
2140 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2141 int size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
2144 skb_frag_unref(skb
, i
);
2147 skb_shinfo(skb
)->frags
[k
] = skb_shinfo(skb
)->frags
[i
];
2149 skb_shinfo(skb
)->frags
[k
].page_offset
+= eat
;
2150 skb_frag_size_sub(&skb_shinfo(skb
)->frags
[k
], eat
);
2158 skb_shinfo(skb
)->nr_frags
= k
;
2162 skb
->data_len
-= delta
;
2165 skb_zcopy_clear(skb
, false);
2167 return skb_tail_pointer(skb
);
2169 EXPORT_SYMBOL(__pskb_pull_tail
);
2172 * skb_copy_bits - copy bits from skb to kernel buffer
2174 * @offset: offset in source
2175 * @to: destination buffer
2176 * @len: number of bytes to copy
2178 * Copy the specified number of bytes from the source skb to the
2179 * destination buffer.
2182 * If its prototype is ever changed,
2183 * check arch/{*}/net/{*}.S files,
2184 * since it is called from BPF assembly code.
2186 int skb_copy_bits(const struct sk_buff
*skb
, int offset
, void *to
, int len
)
2188 int start
= skb_headlen(skb
);
2189 struct sk_buff
*frag_iter
;
2192 if (offset
> (int)skb
->len
- len
)
2196 if ((copy
= start
- offset
) > 0) {
2199 skb_copy_from_linear_data_offset(skb
, offset
, to
, copy
);
2200 if ((len
-= copy
) == 0)
2206 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2208 skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[i
];
2210 WARN_ON(start
> offset
+ len
);
2212 end
= start
+ skb_frag_size(f
);
2213 if ((copy
= end
- offset
) > 0) {
2214 u32 p_off
, p_len
, copied
;
2221 skb_frag_foreach_page(f
,
2222 f
->page_offset
+ offset
- start
,
2223 copy
, p
, p_off
, p_len
, copied
) {
2224 vaddr
= kmap_atomic(p
);
2225 memcpy(to
+ copied
, vaddr
+ p_off
, p_len
);
2226 kunmap_atomic(vaddr
);
2229 if ((len
-= copy
) == 0)
2237 skb_walk_frags(skb
, frag_iter
) {
2240 WARN_ON(start
> offset
+ len
);
2242 end
= start
+ frag_iter
->len
;
2243 if ((copy
= end
- offset
) > 0) {
2246 if (skb_copy_bits(frag_iter
, offset
- start
, to
, copy
))
2248 if ((len
-= copy
) == 0)
2262 EXPORT_SYMBOL(skb_copy_bits
);
2265 * Callback from splice_to_pipe(), if we need to release some pages
2266 * at the end of the spd in case we error'ed out in filling the pipe.
2268 static void sock_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
2270 put_page(spd
->pages
[i
]);
2273 static struct page
*linear_to_page(struct page
*page
, unsigned int *len
,
2274 unsigned int *offset
,
2277 struct page_frag
*pfrag
= sk_page_frag(sk
);
2279 if (!sk_page_frag_refill(sk
, pfrag
))
2282 *len
= min_t(unsigned int, *len
, pfrag
->size
- pfrag
->offset
);
2284 memcpy(page_address(pfrag
->page
) + pfrag
->offset
,
2285 page_address(page
) + *offset
, *len
);
2286 *offset
= pfrag
->offset
;
2287 pfrag
->offset
+= *len
;
2292 static bool spd_can_coalesce(const struct splice_pipe_desc
*spd
,
2294 unsigned int offset
)
2296 return spd
->nr_pages
&&
2297 spd
->pages
[spd
->nr_pages
- 1] == page
&&
2298 (spd
->partial
[spd
->nr_pages
- 1].offset
+
2299 spd
->partial
[spd
->nr_pages
- 1].len
== offset
);
2303 * Fill page/offset/length into spd, if it can hold more pages.
2305 static bool spd_fill_page(struct splice_pipe_desc
*spd
,
2306 struct pipe_inode_info
*pipe
, struct page
*page
,
2307 unsigned int *len
, unsigned int offset
,
2311 if (unlikely(spd
->nr_pages
== MAX_SKB_FRAGS
))
2315 page
= linear_to_page(page
, len
, &offset
, sk
);
2319 if (spd_can_coalesce(spd
, page
, offset
)) {
2320 spd
->partial
[spd
->nr_pages
- 1].len
+= *len
;
2324 spd
->pages
[spd
->nr_pages
] = page
;
2325 spd
->partial
[spd
->nr_pages
].len
= *len
;
2326 spd
->partial
[spd
->nr_pages
].offset
= offset
;
2332 static bool __splice_segment(struct page
*page
, unsigned int poff
,
2333 unsigned int plen
, unsigned int *off
,
2335 struct splice_pipe_desc
*spd
, bool linear
,
2337 struct pipe_inode_info
*pipe
)
2342 /* skip this segment if already processed */
2348 /* ignore any bits we already processed */
2354 unsigned int flen
= min(*len
, plen
);
2356 if (spd_fill_page(spd
, pipe
, page
, &flen
, poff
,
2362 } while (*len
&& plen
);
2368 * Map linear and fragment data from the skb to spd. It reports true if the
2369 * pipe is full or if we already spliced the requested length.
2371 static bool __skb_splice_bits(struct sk_buff
*skb
, struct pipe_inode_info
*pipe
,
2372 unsigned int *offset
, unsigned int *len
,
2373 struct splice_pipe_desc
*spd
, struct sock
*sk
)
2376 struct sk_buff
*iter
;
2378 /* map the linear part :
2379 * If skb->head_frag is set, this 'linear' part is backed by a
2380 * fragment, and if the head is not shared with any clones then
2381 * we can avoid a copy since we own the head portion of this page.
2383 if (__splice_segment(virt_to_page(skb
->data
),
2384 (unsigned long) skb
->data
& (PAGE_SIZE
- 1),
2387 skb_head_is_locked(skb
),
2392 * then map the fragments
2394 for (seg
= 0; seg
< skb_shinfo(skb
)->nr_frags
; seg
++) {
2395 const skb_frag_t
*f
= &skb_shinfo(skb
)->frags
[seg
];
2397 if (__splice_segment(skb_frag_page(f
),
2398 f
->page_offset
, skb_frag_size(f
),
2399 offset
, len
, spd
, false, sk
, pipe
))
2403 skb_walk_frags(skb
, iter
) {
2404 if (*offset
>= iter
->len
) {
2405 *offset
-= iter
->len
;
2408 /* __skb_splice_bits() only fails if the output has no room
2409 * left, so no point in going over the frag_list for the error
2412 if (__skb_splice_bits(iter
, pipe
, offset
, len
, spd
, sk
))
2420 * Map data from the skb to a pipe. Should handle both the linear part,
2421 * the fragments, and the frag list.
2423 int skb_splice_bits(struct sk_buff
*skb
, struct sock
*sk
, unsigned int offset
,
2424 struct pipe_inode_info
*pipe
, unsigned int tlen
,
2427 struct partial_page partial
[MAX_SKB_FRAGS
];
2428 struct page
*pages
[MAX_SKB_FRAGS
];
2429 struct splice_pipe_desc spd
= {
2432 .nr_pages_max
= MAX_SKB_FRAGS
,
2433 .ops
= &nosteal_pipe_buf_ops
,
2434 .spd_release
= sock_spd_release
,
2438 __skb_splice_bits(skb
, pipe
, &offset
, &tlen
, &spd
, sk
);
2441 ret
= splice_to_pipe(pipe
, &spd
);
2445 EXPORT_SYMBOL_GPL(skb_splice_bits
);
2447 /* Send skb data on a socket. Socket must be locked. */
2448 int skb_send_sock_locked(struct sock
*sk
, struct sk_buff
*skb
, int offset
,
2451 unsigned int orig_len
= len
;
2452 struct sk_buff
*head
= skb
;
2453 unsigned short fragidx
;
2458 /* Deal with head data */
2459 while (offset
< skb_headlen(skb
) && len
) {
2463 slen
= min_t(int, len
, skb_headlen(skb
) - offset
);
2464 kv
.iov_base
= skb
->data
+ offset
;
2466 memset(&msg
, 0, sizeof(msg
));
2467 msg
.msg_flags
= MSG_DONTWAIT
;
2469 ret
= kernel_sendmsg_locked(sk
, &msg
, &kv
, 1, slen
);
2477 /* All the data was skb head? */
2481 /* Make offset relative to start of frags */
2482 offset
-= skb_headlen(skb
);
2484 /* Find where we are in frag list */
2485 for (fragidx
= 0; fragidx
< skb_shinfo(skb
)->nr_frags
; fragidx
++) {
2486 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[fragidx
];
2488 if (offset
< frag
->size
)
2491 offset
-= frag
->size
;
2494 for (; len
&& fragidx
< skb_shinfo(skb
)->nr_frags
; fragidx
++) {
2495 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[fragidx
];
2497 slen
= min_t(size_t, len
, frag
->size
- offset
);
2500 ret
= kernel_sendpage_locked(sk
, frag
->page
.p
,
2501 frag
->page_offset
+ offset
,
2502 slen
, MSG_DONTWAIT
);
2515 /* Process any frag lists */
2518 if (skb_has_frag_list(skb
)) {
2519 skb
= skb_shinfo(skb
)->frag_list
;
2522 } else if (skb
->next
) {
2529 return orig_len
- len
;
2532 return orig_len
== len
? ret
: orig_len
- len
;
2534 EXPORT_SYMBOL_GPL(skb_send_sock_locked
);
2537 * skb_store_bits - store bits from kernel buffer to skb
2538 * @skb: destination buffer
2539 * @offset: offset in destination
2540 * @from: source buffer
2541 * @len: number of bytes to copy
2543 * Copy the specified number of bytes from the source buffer to the
2544 * destination skb. This function handles all the messy bits of
2545 * traversing fragment lists and such.
2548 int skb_store_bits(struct sk_buff
*skb
, int offset
, const void *from
, int len
)
2550 int start
= skb_headlen(skb
);
2551 struct sk_buff
*frag_iter
;
2554 if (offset
> (int)skb
->len
- len
)
2557 if ((copy
= start
- offset
) > 0) {
2560 skb_copy_to_linear_data_offset(skb
, offset
, from
, copy
);
2561 if ((len
-= copy
) == 0)
2567 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2568 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2571 WARN_ON(start
> offset
+ len
);
2573 end
= start
+ skb_frag_size(frag
);
2574 if ((copy
= end
- offset
) > 0) {
2575 u32 p_off
, p_len
, copied
;
2582 skb_frag_foreach_page(frag
,
2583 frag
->page_offset
+ offset
- start
,
2584 copy
, p
, p_off
, p_len
, copied
) {
2585 vaddr
= kmap_atomic(p
);
2586 memcpy(vaddr
+ p_off
, from
+ copied
, p_len
);
2587 kunmap_atomic(vaddr
);
2590 if ((len
-= copy
) == 0)
2598 skb_walk_frags(skb
, frag_iter
) {
2601 WARN_ON(start
> offset
+ len
);
2603 end
= start
+ frag_iter
->len
;
2604 if ((copy
= end
- offset
) > 0) {
2607 if (skb_store_bits(frag_iter
, offset
- start
,
2610 if ((len
-= copy
) == 0)
2623 EXPORT_SYMBOL(skb_store_bits
);
2625 /* Checksum skb data. */
2626 __wsum
__skb_checksum(const struct sk_buff
*skb
, int offset
, int len
,
2627 __wsum csum
, const struct skb_checksum_ops
*ops
)
2629 int start
= skb_headlen(skb
);
2630 int i
, copy
= start
- offset
;
2631 struct sk_buff
*frag_iter
;
2634 /* Checksum header. */
2638 csum
= INDIRECT_CALL_1(ops
->update
, csum_partial_ext
,
2639 skb
->data
+ offset
, copy
, csum
);
2640 if ((len
-= copy
) == 0)
2646 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2648 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2650 WARN_ON(start
> offset
+ len
);
2652 end
= start
+ skb_frag_size(frag
);
2653 if ((copy
= end
- offset
) > 0) {
2654 u32 p_off
, p_len
, copied
;
2662 skb_frag_foreach_page(frag
,
2663 frag
->page_offset
+ offset
- start
,
2664 copy
, p
, p_off
, p_len
, copied
) {
2665 vaddr
= kmap_atomic(p
);
2666 csum2
= INDIRECT_CALL_1(ops
->update
,
2668 vaddr
+ p_off
, p_len
, 0);
2669 kunmap_atomic(vaddr
);
2670 csum
= INDIRECT_CALL_1(ops
->combine
,
2671 csum_block_add_ext
, csum
,
2683 skb_walk_frags(skb
, frag_iter
) {
2686 WARN_ON(start
> offset
+ len
);
2688 end
= start
+ frag_iter
->len
;
2689 if ((copy
= end
- offset
) > 0) {
2693 csum2
= __skb_checksum(frag_iter
, offset
- start
,
2695 csum
= INDIRECT_CALL_1(ops
->combine
, csum_block_add_ext
,
2696 csum
, csum2
, pos
, copy
);
2697 if ((len
-= copy
) == 0)
2708 EXPORT_SYMBOL(__skb_checksum
);
2710 __wsum
skb_checksum(const struct sk_buff
*skb
, int offset
,
2711 int len
, __wsum csum
)
2713 const struct skb_checksum_ops ops
= {
2714 .update
= csum_partial_ext
,
2715 .combine
= csum_block_add_ext
,
2718 return __skb_checksum(skb
, offset
, len
, csum
, &ops
);
2720 EXPORT_SYMBOL(skb_checksum
);
2722 /* Both of above in one bottle. */
2724 __wsum
skb_copy_and_csum_bits(const struct sk_buff
*skb
, int offset
,
2725 u8
*to
, int len
, __wsum csum
)
2727 int start
= skb_headlen(skb
);
2728 int i
, copy
= start
- offset
;
2729 struct sk_buff
*frag_iter
;
2736 csum
= csum_partial_copy_nocheck(skb
->data
+ offset
, to
,
2738 if ((len
-= copy
) == 0)
2745 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
2748 WARN_ON(start
> offset
+ len
);
2750 end
= start
+ skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
2751 if ((copy
= end
- offset
) > 0) {
2752 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2753 u32 p_off
, p_len
, copied
;
2761 skb_frag_foreach_page(frag
,
2762 frag
->page_offset
+ offset
- start
,
2763 copy
, p
, p_off
, p_len
, copied
) {
2764 vaddr
= kmap_atomic(p
);
2765 csum2
= csum_partial_copy_nocheck(vaddr
+ p_off
,
2768 kunmap_atomic(vaddr
);
2769 csum
= csum_block_add(csum
, csum2
, pos
);
2781 skb_walk_frags(skb
, frag_iter
) {
2785 WARN_ON(start
> offset
+ len
);
2787 end
= start
+ frag_iter
->len
;
2788 if ((copy
= end
- offset
) > 0) {
2791 csum2
= skb_copy_and_csum_bits(frag_iter
,
2794 csum
= csum_block_add(csum
, csum2
, pos
);
2795 if ((len
-= copy
) == 0)
2806 EXPORT_SYMBOL(skb_copy_and_csum_bits
);
2808 __sum16
__skb_checksum_complete_head(struct sk_buff
*skb
, int len
)
2812 sum
= csum_fold(skb_checksum(skb
, 0, len
, skb
->csum
));
2813 /* See comments in __skb_checksum_complete(). */
2815 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
2816 !skb
->csum_complete_sw
)
2817 netdev_rx_csum_fault(skb
->dev
, skb
);
2819 if (!skb_shared(skb
))
2820 skb
->csum_valid
= !sum
;
2823 EXPORT_SYMBOL(__skb_checksum_complete_head
);
2825 /* This function assumes skb->csum already holds pseudo header's checksum,
2826 * which has been changed from the hardware checksum, for example, by
2827 * __skb_checksum_validate_complete(). And, the original skb->csum must
2828 * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
2830 * It returns non-zero if the recomputed checksum is still invalid, otherwise
2831 * zero. The new checksum is stored back into skb->csum unless the skb is
2834 __sum16
__skb_checksum_complete(struct sk_buff
*skb
)
2839 csum
= skb_checksum(skb
, 0, skb
->len
, 0);
2841 sum
= csum_fold(csum_add(skb
->csum
, csum
));
2842 /* This check is inverted, because we already knew the hardware
2843 * checksum is invalid before calling this function. So, if the
2844 * re-computed checksum is valid instead, then we have a mismatch
2845 * between the original skb->csum and skb_checksum(). This means either
2846 * the original hardware checksum is incorrect or we screw up skb->csum
2847 * when moving skb->data around.
2850 if (unlikely(skb
->ip_summed
== CHECKSUM_COMPLETE
) &&
2851 !skb
->csum_complete_sw
)
2852 netdev_rx_csum_fault(skb
->dev
, skb
);
2855 if (!skb_shared(skb
)) {
2856 /* Save full packet checksum */
2858 skb
->ip_summed
= CHECKSUM_COMPLETE
;
2859 skb
->csum_complete_sw
= 1;
2860 skb
->csum_valid
= !sum
;
2865 EXPORT_SYMBOL(__skb_checksum_complete
);
2867 static __wsum
warn_crc32c_csum_update(const void *buff
, int len
, __wsum sum
)
2869 net_warn_ratelimited(
2870 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2875 static __wsum
warn_crc32c_csum_combine(__wsum csum
, __wsum csum2
,
2876 int offset
, int len
)
2878 net_warn_ratelimited(
2879 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2884 static const struct skb_checksum_ops default_crc32c_ops
= {
2885 .update
= warn_crc32c_csum_update
,
2886 .combine
= warn_crc32c_csum_combine
,
2889 const struct skb_checksum_ops
*crc32c_csum_stub __read_mostly
=
2890 &default_crc32c_ops
;
2891 EXPORT_SYMBOL(crc32c_csum_stub
);
2894 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2895 * @from: source buffer
2897 * Calculates the amount of linear headroom needed in the 'to' skb passed
2898 * into skb_zerocopy().
2901 skb_zerocopy_headlen(const struct sk_buff
*from
)
2903 unsigned int hlen
= 0;
2905 if (!from
->head_frag
||
2906 skb_headlen(from
) < L1_CACHE_BYTES
||
2907 skb_shinfo(from
)->nr_frags
>= MAX_SKB_FRAGS
)
2908 hlen
= skb_headlen(from
);
2910 if (skb_has_frag_list(from
))
2915 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen
);
2918 * skb_zerocopy - Zero copy skb to skb
2919 * @to: destination buffer
2920 * @from: source buffer
2921 * @len: number of bytes to copy from source buffer
2922 * @hlen: size of linear headroom in destination buffer
2924 * Copies up to `len` bytes from `from` to `to` by creating references
2925 * to the frags in the source buffer.
2927 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2928 * headroom in the `to` buffer.
2931 * 0: everything is OK
2932 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
2933 * -EFAULT: skb_copy_bits() found some problem with skb geometry
2936 skb_zerocopy(struct sk_buff
*to
, struct sk_buff
*from
, int len
, int hlen
)
2939 int plen
= 0; /* length of skb->head fragment */
2942 unsigned int offset
;
2944 BUG_ON(!from
->head_frag
&& !hlen
);
2946 /* dont bother with small payloads */
2947 if (len
<= skb_tailroom(to
))
2948 return skb_copy_bits(from
, 0, skb_put(to
, len
), len
);
2951 ret
= skb_copy_bits(from
, 0, skb_put(to
, hlen
), hlen
);
2956 plen
= min_t(int, skb_headlen(from
), len
);
2958 page
= virt_to_head_page(from
->head
);
2959 offset
= from
->data
- (unsigned char *)page_address(page
);
2960 __skb_fill_page_desc(to
, 0, page
, offset
, plen
);
2967 to
->truesize
+= len
+ plen
;
2968 to
->len
+= len
+ plen
;
2969 to
->data_len
+= len
+ plen
;
2971 if (unlikely(skb_orphan_frags(from
, GFP_ATOMIC
))) {
2975 skb_zerocopy_clone(to
, from
, GFP_ATOMIC
);
2977 for (i
= 0; i
< skb_shinfo(from
)->nr_frags
; i
++) {
2980 skb_shinfo(to
)->frags
[j
] = skb_shinfo(from
)->frags
[i
];
2981 skb_shinfo(to
)->frags
[j
].size
= min_t(int, skb_shinfo(to
)->frags
[j
].size
, len
);
2982 len
-= skb_shinfo(to
)->frags
[j
].size
;
2983 skb_frag_ref(to
, j
);
2986 skb_shinfo(to
)->nr_frags
= j
;
2990 EXPORT_SYMBOL_GPL(skb_zerocopy
);
2992 void skb_copy_and_csum_dev(const struct sk_buff
*skb
, u8
*to
)
2997 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2998 csstart
= skb_checksum_start_offset(skb
);
3000 csstart
= skb_headlen(skb
);
3002 BUG_ON(csstart
> skb_headlen(skb
));
3004 skb_copy_from_linear_data(skb
, to
, csstart
);
3007 if (csstart
!= skb
->len
)
3008 csum
= skb_copy_and_csum_bits(skb
, csstart
, to
+ csstart
,
3009 skb
->len
- csstart
, 0);
3011 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
3012 long csstuff
= csstart
+ skb
->csum_offset
;
3014 *((__sum16
*)(to
+ csstuff
)) = csum_fold(csum
);
3017 EXPORT_SYMBOL(skb_copy_and_csum_dev
);
3020 * skb_dequeue - remove from the head of the queue
3021 * @list: list to dequeue from
3023 * Remove the head of the list. The list lock is taken so the function
3024 * may be used safely with other locking list functions. The head item is
3025 * returned or %NULL if the list is empty.
3028 struct sk_buff
*skb_dequeue(struct sk_buff_head
*list
)
3030 unsigned long flags
;
3031 struct sk_buff
*result
;
3033 spin_lock_irqsave(&list
->lock
, flags
);
3034 result
= __skb_dequeue(list
);
3035 spin_unlock_irqrestore(&list
->lock
, flags
);
3038 EXPORT_SYMBOL(skb_dequeue
);
3041 * skb_dequeue_tail - remove from the tail of the queue
3042 * @list: list to dequeue from
3044 * Remove the tail of the list. The list lock is taken so the function
3045 * may be used safely with other locking list functions. The tail item is
3046 * returned or %NULL if the list is empty.
3048 struct sk_buff
*skb_dequeue_tail(struct sk_buff_head
*list
)
3050 unsigned long flags
;
3051 struct sk_buff
*result
;
3053 spin_lock_irqsave(&list
->lock
, flags
);
3054 result
= __skb_dequeue_tail(list
);
3055 spin_unlock_irqrestore(&list
->lock
, flags
);
3058 EXPORT_SYMBOL(skb_dequeue_tail
);
3061 * skb_queue_purge - empty a list
3062 * @list: list to empty
3064 * Delete all buffers on an &sk_buff list. Each buffer is removed from
3065 * the list and one reference dropped. This function takes the list
3066 * lock and is atomic with respect to other list locking functions.
3068 void skb_queue_purge(struct sk_buff_head
*list
)
3070 struct sk_buff
*skb
;
3071 while ((skb
= skb_dequeue(list
)) != NULL
)
3074 EXPORT_SYMBOL(skb_queue_purge
);
3077 * skb_rbtree_purge - empty a skb rbtree
3078 * @root: root of the rbtree to empty
3079 * Return value: the sum of truesizes of all purged skbs.
3081 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
3082 * the list and one reference dropped. This function does not take
3083 * any lock. Synchronization should be handled by the caller (e.g., TCP
3084 * out-of-order queue is protected by the socket lock).
3086 unsigned int skb_rbtree_purge(struct rb_root
*root
)
3088 struct rb_node
*p
= rb_first(root
);
3089 unsigned int sum
= 0;
3092 struct sk_buff
*skb
= rb_entry(p
, struct sk_buff
, rbnode
);
3095 rb_erase(&skb
->rbnode
, root
);
3096 sum
+= skb
->truesize
;
3103 * skb_queue_head - queue a buffer at the list head
3104 * @list: list to use
3105 * @newsk: buffer to queue
3107 * Queue a buffer at the start of the list. This function takes the
3108 * list lock and can be used safely with other locking &sk_buff functions
3111 * A buffer cannot be placed on two lists at the same time.
3113 void skb_queue_head(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
3115 unsigned long flags
;
3117 spin_lock_irqsave(&list
->lock
, flags
);
3118 __skb_queue_head(list
, newsk
);
3119 spin_unlock_irqrestore(&list
->lock
, flags
);
3121 EXPORT_SYMBOL(skb_queue_head
);
3124 * skb_queue_tail - queue a buffer at the list tail
3125 * @list: list to use
3126 * @newsk: buffer to queue
3128 * Queue a buffer at the tail of the list. This function takes the
3129 * list lock and can be used safely with other locking &sk_buff functions
3132 * A buffer cannot be placed on two lists at the same time.
3134 void skb_queue_tail(struct sk_buff_head
*list
, struct sk_buff
*newsk
)
3136 unsigned long flags
;
3138 spin_lock_irqsave(&list
->lock
, flags
);
3139 __skb_queue_tail(list
, newsk
);
3140 spin_unlock_irqrestore(&list
->lock
, flags
);
3142 EXPORT_SYMBOL(skb_queue_tail
);
3145 * skb_unlink - remove a buffer from a list
3146 * @skb: buffer to remove
3147 * @list: list to use
3149 * Remove a packet from a list. The list locks are taken and this
3150 * function is atomic with respect to other list locked calls
3152 * You must know what list the SKB is on.
3154 void skb_unlink(struct sk_buff
*skb
, struct sk_buff_head
*list
)
3156 unsigned long flags
;
3158 spin_lock_irqsave(&list
->lock
, flags
);
3159 __skb_unlink(skb
, list
);
3160 spin_unlock_irqrestore(&list
->lock
, flags
);
3162 EXPORT_SYMBOL(skb_unlink
);
3165 * skb_append - append a buffer
3166 * @old: buffer to insert after
3167 * @newsk: buffer to insert
3168 * @list: list to use
3170 * Place a packet after a given packet in a list. The list locks are taken
3171 * and this function is atomic with respect to other list locked calls.
3172 * A buffer cannot be placed on two lists at the same time.
3174 void skb_append(struct sk_buff
*old
, struct sk_buff
*newsk
, struct sk_buff_head
*list
)
3176 unsigned long flags
;
3178 spin_lock_irqsave(&list
->lock
, flags
);
3179 __skb_queue_after(list
, old
, newsk
);
3180 spin_unlock_irqrestore(&list
->lock
, flags
);
3182 EXPORT_SYMBOL(skb_append
);
3184 static inline void skb_split_inside_header(struct sk_buff
*skb
,
3185 struct sk_buff
* skb1
,
3186 const u32 len
, const int pos
)
3190 skb_copy_from_linear_data_offset(skb
, len
, skb_put(skb1
, pos
- len
),
3192 /* And move data appendix as is. */
3193 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
3194 skb_shinfo(skb1
)->frags
[i
] = skb_shinfo(skb
)->frags
[i
];
3196 skb_shinfo(skb1
)->nr_frags
= skb_shinfo(skb
)->nr_frags
;
3197 skb_shinfo(skb
)->nr_frags
= 0;
3198 skb1
->data_len
= skb
->data_len
;
3199 skb1
->len
+= skb1
->data_len
;
3202 skb_set_tail_pointer(skb
, len
);
3205 static inline void skb_split_no_header(struct sk_buff
*skb
,
3206 struct sk_buff
* skb1
,
3207 const u32 len
, int pos
)
3210 const int nfrags
= skb_shinfo(skb
)->nr_frags
;
3212 skb_shinfo(skb
)->nr_frags
= 0;
3213 skb1
->len
= skb1
->data_len
= skb
->len
- len
;
3215 skb
->data_len
= len
- pos
;
3217 for (i
= 0; i
< nfrags
; i
++) {
3218 int size
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
3220 if (pos
+ size
> len
) {
3221 skb_shinfo(skb1
)->frags
[k
] = skb_shinfo(skb
)->frags
[i
];
3225 * We have two variants in this case:
3226 * 1. Move all the frag to the second
3227 * part, if it is possible. F.e.
3228 * this approach is mandatory for TUX,
3229 * where splitting is expensive.
3230 * 2. Split is accurately. We make this.
3232 skb_frag_ref(skb
, i
);
3233 skb_shinfo(skb1
)->frags
[0].page_offset
+= len
- pos
;
3234 skb_frag_size_sub(&skb_shinfo(skb1
)->frags
[0], len
- pos
);
3235 skb_frag_size_set(&skb_shinfo(skb
)->frags
[i
], len
- pos
);
3236 skb_shinfo(skb
)->nr_frags
++;
3240 skb_shinfo(skb
)->nr_frags
++;
3243 skb_shinfo(skb1
)->nr_frags
= k
;
3247 * skb_split - Split fragmented skb to two parts at length len.
3248 * @skb: the buffer to split
3249 * @skb1: the buffer to receive the second part
3250 * @len: new length for skb
3252 void skb_split(struct sk_buff
*skb
, struct sk_buff
*skb1
, const u32 len
)
3254 int pos
= skb_headlen(skb
);
3256 skb_shinfo(skb1
)->tx_flags
|= skb_shinfo(skb
)->tx_flags
&
3258 skb_zerocopy_clone(skb1
, skb
, 0);
3259 if (len
< pos
) /* Split line is inside header. */
3260 skb_split_inside_header(skb
, skb1
, len
, pos
);
3261 else /* Second chunk has no header, nothing to copy. */
3262 skb_split_no_header(skb
, skb1
, len
, pos
);
3264 EXPORT_SYMBOL(skb_split
);
3266 /* Shifting from/to a cloned skb is a no-go.
3268 * Caller cannot keep skb_shinfo related pointers past calling here!
3270 static int skb_prepare_for_shift(struct sk_buff
*skb
)
3272 return skb_cloned(skb
) && pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
3276 * skb_shift - Shifts paged data partially from skb to another
3277 * @tgt: buffer into which tail data gets added
3278 * @skb: buffer from which the paged data comes from
3279 * @shiftlen: shift up to this many bytes
3281 * Attempts to shift up to shiftlen worth of bytes, which may be less than
3282 * the length of the skb, from skb to tgt. Returns number bytes shifted.
3283 * It's up to caller to free skb if everything was shifted.
3285 * If @tgt runs out of frags, the whole operation is aborted.
3287 * Skb cannot include anything else but paged data while tgt is allowed
3288 * to have non-paged data as well.
3290 * TODO: full sized shift could be optimized but that would need
3291 * specialized skb free'er to handle frags without up-to-date nr_frags.
3293 int skb_shift(struct sk_buff
*tgt
, struct sk_buff
*skb
, int shiftlen
)
3295 int from
, to
, merge
, todo
;
3296 struct skb_frag_struct
*fragfrom
, *fragto
;
3298 BUG_ON(shiftlen
> skb
->len
);
3300 if (skb_headlen(skb
))
3302 if (skb_zcopy(tgt
) || skb_zcopy(skb
))
3307 to
= skb_shinfo(tgt
)->nr_frags
;
3308 fragfrom
= &skb_shinfo(skb
)->frags
[from
];
3310 /* Actual merge is delayed until the point when we know we can
3311 * commit all, so that we don't have to undo partial changes
3314 !skb_can_coalesce(tgt
, to
, skb_frag_page(fragfrom
),
3315 fragfrom
->page_offset
)) {
3320 todo
-= skb_frag_size(fragfrom
);
3322 if (skb_prepare_for_shift(skb
) ||
3323 skb_prepare_for_shift(tgt
))
3326 /* All previous frag pointers might be stale! */
3327 fragfrom
= &skb_shinfo(skb
)->frags
[from
];
3328 fragto
= &skb_shinfo(tgt
)->frags
[merge
];
3330 skb_frag_size_add(fragto
, shiftlen
);
3331 skb_frag_size_sub(fragfrom
, shiftlen
);
3332 fragfrom
->page_offset
+= shiftlen
;
3340 /* Skip full, not-fitting skb to avoid expensive operations */
3341 if ((shiftlen
== skb
->len
) &&
3342 (skb_shinfo(skb
)->nr_frags
- from
) > (MAX_SKB_FRAGS
- to
))
3345 if (skb_prepare_for_shift(skb
) || skb_prepare_for_shift(tgt
))
3348 while ((todo
> 0) && (from
< skb_shinfo(skb
)->nr_frags
)) {
3349 if (to
== MAX_SKB_FRAGS
)
3352 fragfrom
= &skb_shinfo(skb
)->frags
[from
];
3353 fragto
= &skb_shinfo(tgt
)->frags
[to
];
3355 if (todo
>= skb_frag_size(fragfrom
)) {
3356 *fragto
= *fragfrom
;
3357 todo
-= skb_frag_size(fragfrom
);
3362 __skb_frag_ref(fragfrom
);
3363 fragto
->page
= fragfrom
->page
;
3364 fragto
->page_offset
= fragfrom
->page_offset
;
3365 skb_frag_size_set(fragto
, todo
);
3367 fragfrom
->page_offset
+= todo
;
3368 skb_frag_size_sub(fragfrom
, todo
);
3376 /* Ready to "commit" this state change to tgt */
3377 skb_shinfo(tgt
)->nr_frags
= to
;
3380 fragfrom
= &skb_shinfo(skb
)->frags
[0];
3381 fragto
= &skb_shinfo(tgt
)->frags
[merge
];
3383 skb_frag_size_add(fragto
, skb_frag_size(fragfrom
));
3384 __skb_frag_unref(fragfrom
);
3387 /* Reposition in the original skb */
3389 while (from
< skb_shinfo(skb
)->nr_frags
)
3390 skb_shinfo(skb
)->frags
[to
++] = skb_shinfo(skb
)->frags
[from
++];
3391 skb_shinfo(skb
)->nr_frags
= to
;
3393 BUG_ON(todo
> 0 && !skb_shinfo(skb
)->nr_frags
);
3396 /* Most likely the tgt won't ever need its checksum anymore, skb on
3397 * the other hand might need it if it needs to be resent
3399 tgt
->ip_summed
= CHECKSUM_PARTIAL
;
3400 skb
->ip_summed
= CHECKSUM_PARTIAL
;
3402 /* Yak, is it really working this way? Some helper please? */
3403 skb
->len
-= shiftlen
;
3404 skb
->data_len
-= shiftlen
;
3405 skb
->truesize
-= shiftlen
;
3406 tgt
->len
+= shiftlen
;
3407 tgt
->data_len
+= shiftlen
;
3408 tgt
->truesize
+= shiftlen
;
3414 * skb_prepare_seq_read - Prepare a sequential read of skb data
3415 * @skb: the buffer to read
3416 * @from: lower offset of data to be read
3417 * @to: upper offset of data to be read
3418 * @st: state variable
3420 * Initializes the specified state variable. Must be called before
3421 * invoking skb_seq_read() for the first time.
3423 void skb_prepare_seq_read(struct sk_buff
*skb
, unsigned int from
,
3424 unsigned int to
, struct skb_seq_state
*st
)
3426 st
->lower_offset
= from
;
3427 st
->upper_offset
= to
;
3428 st
->root_skb
= st
->cur_skb
= skb
;
3429 st
->frag_idx
= st
->stepped_offset
= 0;
3430 st
->frag_data
= NULL
;
3432 EXPORT_SYMBOL(skb_prepare_seq_read
);
3435 * skb_seq_read - Sequentially read skb data
3436 * @consumed: number of bytes consumed by the caller so far
3437 * @data: destination pointer for data to be returned
3438 * @st: state variable
3440 * Reads a block of skb data at @consumed relative to the
3441 * lower offset specified to skb_prepare_seq_read(). Assigns
3442 * the head of the data block to @data and returns the length
3443 * of the block or 0 if the end of the skb data or the upper
3444 * offset has been reached.
3446 * The caller is not required to consume all of the data
3447 * returned, i.e. @consumed is typically set to the number
3448 * of bytes already consumed and the next call to
3449 * skb_seq_read() will return the remaining part of the block.
3451 * Note 1: The size of each block of data returned can be arbitrary,
3452 * this limitation is the cost for zerocopy sequential
3453 * reads of potentially non linear data.
3455 * Note 2: Fragment lists within fragments are not implemented
3456 * at the moment, state->root_skb could be replaced with
3457 * a stack for this purpose.
3459 unsigned int skb_seq_read(unsigned int consumed
, const u8
**data
,
3460 struct skb_seq_state
*st
)
3462 unsigned int block_limit
, abs_offset
= consumed
+ st
->lower_offset
;
3465 if (unlikely(abs_offset
>= st
->upper_offset
)) {
3466 if (st
->frag_data
) {
3467 kunmap_atomic(st
->frag_data
);
3468 st
->frag_data
= NULL
;
3474 block_limit
= skb_headlen(st
->cur_skb
) + st
->stepped_offset
;
3476 if (abs_offset
< block_limit
&& !st
->frag_data
) {
3477 *data
= st
->cur_skb
->data
+ (abs_offset
- st
->stepped_offset
);
3478 return block_limit
- abs_offset
;
3481 if (st
->frag_idx
== 0 && !st
->frag_data
)
3482 st
->stepped_offset
+= skb_headlen(st
->cur_skb
);
3484 while (st
->frag_idx
< skb_shinfo(st
->cur_skb
)->nr_frags
) {
3485 frag
= &skb_shinfo(st
->cur_skb
)->frags
[st
->frag_idx
];
3486 block_limit
= skb_frag_size(frag
) + st
->stepped_offset
;
3488 if (abs_offset
< block_limit
) {
3490 st
->frag_data
= kmap_atomic(skb_frag_page(frag
));
3492 *data
= (u8
*) st
->frag_data
+ frag
->page_offset
+
3493 (abs_offset
- st
->stepped_offset
);
3495 return block_limit
- abs_offset
;
3498 if (st
->frag_data
) {
3499 kunmap_atomic(st
->frag_data
);
3500 st
->frag_data
= NULL
;
3504 st
->stepped_offset
+= skb_frag_size(frag
);
3507 if (st
->frag_data
) {
3508 kunmap_atomic(st
->frag_data
);
3509 st
->frag_data
= NULL
;
3512 if (st
->root_skb
== st
->cur_skb
&& skb_has_frag_list(st
->root_skb
)) {
3513 st
->cur_skb
= skb_shinfo(st
->root_skb
)->frag_list
;
3516 } else if (st
->cur_skb
->next
) {
3517 st
->cur_skb
= st
->cur_skb
->next
;
3524 EXPORT_SYMBOL(skb_seq_read
);
3527 * skb_abort_seq_read - Abort a sequential read of skb data
3528 * @st: state variable
3530 * Must be called if skb_seq_read() was not called until it
3533 void skb_abort_seq_read(struct skb_seq_state
*st
)
3536 kunmap_atomic(st
->frag_data
);
3538 EXPORT_SYMBOL(skb_abort_seq_read
);
3540 #define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
3542 static unsigned int skb_ts_get_next_block(unsigned int offset
, const u8
**text
,
3543 struct ts_config
*conf
,
3544 struct ts_state
*state
)
3546 return skb_seq_read(offset
, text
, TS_SKB_CB(state
));
3549 static void skb_ts_finish(struct ts_config
*conf
, struct ts_state
*state
)
3551 skb_abort_seq_read(TS_SKB_CB(state
));
3555 * skb_find_text - Find a text pattern in skb data
3556 * @skb: the buffer to look in
3557 * @from: search offset
3559 * @config: textsearch configuration
3561 * Finds a pattern in the skb data according to the specified
3562 * textsearch configuration. Use textsearch_next() to retrieve
3563 * subsequent occurrences of the pattern. Returns the offset
3564 * to the first occurrence or UINT_MAX if no match was found.
3566 unsigned int skb_find_text(struct sk_buff
*skb
, unsigned int from
,
3567 unsigned int to
, struct ts_config
*config
)
3569 struct ts_state state
;
3572 config
->get_next_block
= skb_ts_get_next_block
;
3573 config
->finish
= skb_ts_finish
;
3575 skb_prepare_seq_read(skb
, from
, to
, TS_SKB_CB(&state
));
3577 ret
= textsearch_find(config
, &state
);
3578 return (ret
<= to
- from
? ret
: UINT_MAX
);
3580 EXPORT_SYMBOL(skb_find_text
);
3582 int skb_append_pagefrags(struct sk_buff
*skb
, struct page
*page
,
3583 int offset
, size_t size
)
3585 int i
= skb_shinfo(skb
)->nr_frags
;
3587 if (skb_can_coalesce(skb
, i
, page
, offset
)) {
3588 skb_frag_size_add(&skb_shinfo(skb
)->frags
[i
- 1], size
);
3589 } else if (i
< MAX_SKB_FRAGS
) {
3591 skb_fill_page_desc(skb
, i
, page
, offset
, size
);
3598 EXPORT_SYMBOL_GPL(skb_append_pagefrags
);
3601 * skb_pull_rcsum - pull skb and update receive checksum
3602 * @skb: buffer to update
3603 * @len: length of data pulled
3605 * This function performs an skb_pull on the packet and updates
3606 * the CHECKSUM_COMPLETE checksum. It should be used on
3607 * receive path processing instead of skb_pull unless you know
3608 * that the checksum difference is zero (e.g., a valid IP header)
3609 * or you are setting ip_summed to CHECKSUM_NONE.
3611 void *skb_pull_rcsum(struct sk_buff
*skb
, unsigned int len
)
3613 unsigned char *data
= skb
->data
;
3615 BUG_ON(len
> skb
->len
);
3616 __skb_pull(skb
, len
);
3617 skb_postpull_rcsum(skb
, data
, len
);
3620 EXPORT_SYMBOL_GPL(skb_pull_rcsum
);
3622 static inline skb_frag_t
skb_head_frag_to_page_desc(struct sk_buff
*frag_skb
)
3624 skb_frag_t head_frag
;
3627 page
= virt_to_head_page(frag_skb
->head
);
3628 head_frag
.page
.p
= page
;
3629 head_frag
.page_offset
= frag_skb
->data
-
3630 (unsigned char *)page_address(page
);
3631 head_frag
.size
= skb_headlen(frag_skb
);
3636 * skb_segment - Perform protocol segmentation on skb.
3637 * @head_skb: buffer to segment
3638 * @features: features for the output path (see dev->features)
3640 * This function performs segmentation on the given skb. It returns
3641 * a pointer to the first in a list of new skbs for the segments.
3642 * In case of error it returns ERR_PTR(err).
3644 struct sk_buff
*skb_segment(struct sk_buff
*head_skb
,
3645 netdev_features_t features
)
3647 struct sk_buff
*segs
= NULL
;
3648 struct sk_buff
*tail
= NULL
;
3649 struct sk_buff
*list_skb
= skb_shinfo(head_skb
)->frag_list
;
3650 skb_frag_t
*frag
= skb_shinfo(head_skb
)->frags
;
3651 unsigned int mss
= skb_shinfo(head_skb
)->gso_size
;
3652 unsigned int doffset
= head_skb
->data
- skb_mac_header(head_skb
);
3653 struct sk_buff
*frag_skb
= head_skb
;
3654 unsigned int offset
= doffset
;
3655 unsigned int tnl_hlen
= skb_tnl_header_len(head_skb
);
3656 unsigned int partial_segs
= 0;
3657 unsigned int headroom
;
3658 unsigned int len
= head_skb
->len
;
3661 int nfrags
= skb_shinfo(head_skb
)->nr_frags
;
3667 __skb_push(head_skb
, doffset
);
3668 proto
= skb_network_protocol(head_skb
, &dummy
);
3669 if (unlikely(!proto
))
3670 return ERR_PTR(-EINVAL
);
3672 sg
= !!(features
& NETIF_F_SG
);
3673 csum
= !!can_checksum_protocol(features
, proto
);
3675 if (sg
&& csum
&& (mss
!= GSO_BY_FRAGS
)) {
3676 if (!(features
& NETIF_F_GSO_PARTIAL
)) {
3677 struct sk_buff
*iter
;
3678 unsigned int frag_len
;
3681 !net_gso_ok(features
, skb_shinfo(head_skb
)->gso_type
))
3684 /* If we get here then all the required
3685 * GSO features except frag_list are supported.
3686 * Try to split the SKB to multiple GSO SKBs
3687 * with no frag_list.
3688 * Currently we can do that only when the buffers don't
3689 * have a linear part and all the buffers except
3690 * the last are of the same length.
3692 frag_len
= list_skb
->len
;
3693 skb_walk_frags(head_skb
, iter
) {
3694 if (frag_len
!= iter
->len
&& iter
->next
)
3696 if (skb_headlen(iter
) && !iter
->head_frag
)
3702 if (len
!= frag_len
)
3706 /* GSO partial only requires that we trim off any excess that
3707 * doesn't fit into an MSS sized block, so take care of that
3710 partial_segs
= len
/ mss
;
3711 if (partial_segs
> 1)
3712 mss
*= partial_segs
;
3718 headroom
= skb_headroom(head_skb
);
3719 pos
= skb_headlen(head_skb
);
3722 struct sk_buff
*nskb
;
3723 skb_frag_t
*nskb_frag
;
3727 if (unlikely(mss
== GSO_BY_FRAGS
)) {
3728 len
= list_skb
->len
;
3730 len
= head_skb
->len
- offset
;
3735 hsize
= skb_headlen(head_skb
) - offset
;
3738 if (hsize
> len
|| !sg
)
3741 if (!hsize
&& i
>= nfrags
&& skb_headlen(list_skb
) &&
3742 (skb_headlen(list_skb
) == len
|| sg
)) {
3743 BUG_ON(skb_headlen(list_skb
) > len
);
3746 nfrags
= skb_shinfo(list_skb
)->nr_frags
;
3747 frag
= skb_shinfo(list_skb
)->frags
;
3748 frag_skb
= list_skb
;
3749 pos
+= skb_headlen(list_skb
);
3751 while (pos
< offset
+ len
) {
3752 BUG_ON(i
>= nfrags
);
3754 size
= skb_frag_size(frag
);
3755 if (pos
+ size
> offset
+ len
)
3763 nskb
= skb_clone(list_skb
, GFP_ATOMIC
);
3764 list_skb
= list_skb
->next
;
3766 if (unlikely(!nskb
))
3769 if (unlikely(pskb_trim(nskb
, len
))) {
3774 hsize
= skb_end_offset(nskb
);
3775 if (skb_cow_head(nskb
, doffset
+ headroom
)) {
3780 nskb
->truesize
+= skb_end_offset(nskb
) - hsize
;
3781 skb_release_head_state(nskb
);
3782 __skb_push(nskb
, doffset
);
3784 nskb
= __alloc_skb(hsize
+ doffset
+ headroom
,
3785 GFP_ATOMIC
, skb_alloc_rx_flag(head_skb
),
3788 if (unlikely(!nskb
))
3791 skb_reserve(nskb
, headroom
);
3792 __skb_put(nskb
, doffset
);
3801 __copy_skb_header(nskb
, head_skb
);
3803 skb_headers_offset_update(nskb
, skb_headroom(nskb
) - headroom
);
3804 skb_reset_mac_len(nskb
);
3806 skb_copy_from_linear_data_offset(head_skb
, -tnl_hlen
,
3807 nskb
->data
- tnl_hlen
,
3808 doffset
+ tnl_hlen
);
3810 if (nskb
->len
== len
+ doffset
)
3811 goto perform_csum_check
;
3814 if (!nskb
->remcsum_offload
)
3815 nskb
->ip_summed
= CHECKSUM_NONE
;
3816 SKB_GSO_CB(nskb
)->csum
=
3817 skb_copy_and_csum_bits(head_skb
, offset
,
3820 SKB_GSO_CB(nskb
)->csum_start
=
3821 skb_headroom(nskb
) + doffset
;
3825 nskb_frag
= skb_shinfo(nskb
)->frags
;
3827 skb_copy_from_linear_data_offset(head_skb
, offset
,
3828 skb_put(nskb
, hsize
), hsize
);
3830 skb_shinfo(nskb
)->tx_flags
|= skb_shinfo(head_skb
)->tx_flags
&
3833 if (skb_orphan_frags(frag_skb
, GFP_ATOMIC
) ||
3834 skb_zerocopy_clone(nskb
, frag_skb
, GFP_ATOMIC
))
3837 while (pos
< offset
+ len
) {
3840 nfrags
= skb_shinfo(list_skb
)->nr_frags
;
3841 frag
= skb_shinfo(list_skb
)->frags
;
3842 frag_skb
= list_skb
;
3843 if (!skb_headlen(list_skb
)) {
3846 BUG_ON(!list_skb
->head_frag
);
3848 /* to make room for head_frag. */
3852 if (skb_orphan_frags(frag_skb
, GFP_ATOMIC
) ||
3853 skb_zerocopy_clone(nskb
, frag_skb
,
3857 list_skb
= list_skb
->next
;
3860 if (unlikely(skb_shinfo(nskb
)->nr_frags
>=
3862 net_warn_ratelimited(
3863 "skb_segment: too many frags: %u %u\n",
3869 *nskb_frag
= (i
< 0) ? skb_head_frag_to_page_desc(frag_skb
) : *frag
;
3870 __skb_frag_ref(nskb_frag
);
3871 size
= skb_frag_size(nskb_frag
);
3874 nskb_frag
->page_offset
+= offset
- pos
;
3875 skb_frag_size_sub(nskb_frag
, offset
- pos
);
3878 skb_shinfo(nskb
)->nr_frags
++;
3880 if (pos
+ size
<= offset
+ len
) {
3885 skb_frag_size_sub(nskb_frag
, pos
+ size
- (offset
+ len
));
3893 nskb
->data_len
= len
- hsize
;
3894 nskb
->len
+= nskb
->data_len
;
3895 nskb
->truesize
+= nskb
->data_len
;
3899 if (skb_has_shared_frag(nskb
) &&
3900 __skb_linearize(nskb
))
3903 if (!nskb
->remcsum_offload
)
3904 nskb
->ip_summed
= CHECKSUM_NONE
;
3905 SKB_GSO_CB(nskb
)->csum
=
3906 skb_checksum(nskb
, doffset
,
3907 nskb
->len
- doffset
, 0);
3908 SKB_GSO_CB(nskb
)->csum_start
=
3909 skb_headroom(nskb
) + doffset
;
3911 } while ((offset
+= len
) < head_skb
->len
);
3913 /* Some callers want to get the end of the list.
3914 * Put it in segs->prev to avoid walking the list.
3915 * (see validate_xmit_skb_list() for example)
3920 struct sk_buff
*iter
;
3921 int type
= skb_shinfo(head_skb
)->gso_type
;
3922 unsigned short gso_size
= skb_shinfo(head_skb
)->gso_size
;
3924 /* Update type to add partial and then remove dodgy if set */
3925 type
|= (features
& NETIF_F_GSO_PARTIAL
) / NETIF_F_GSO_PARTIAL
* SKB_GSO_PARTIAL
;
3926 type
&= ~SKB_GSO_DODGY
;
3928 /* Update GSO info and prepare to start updating headers on
3929 * our way back down the stack of protocols.
3931 for (iter
= segs
; iter
; iter
= iter
->next
) {
3932 skb_shinfo(iter
)->gso_size
= gso_size
;
3933 skb_shinfo(iter
)->gso_segs
= partial_segs
;
3934 skb_shinfo(iter
)->gso_type
= type
;
3935 SKB_GSO_CB(iter
)->data_offset
= skb_headroom(iter
) + doffset
;
3938 if (tail
->len
- doffset
<= gso_size
)
3939 skb_shinfo(tail
)->gso_size
= 0;
3940 else if (tail
!= segs
)
3941 skb_shinfo(tail
)->gso_segs
= DIV_ROUND_UP(tail
->len
- doffset
, gso_size
);
3944 /* Following permits correct backpressure, for protocols
3945 * using skb_set_owner_w().
3946 * Idea is to tranfert ownership from head_skb to last segment.
3948 if (head_skb
->destructor
== sock_wfree
) {
3949 swap(tail
->truesize
, head_skb
->truesize
);
3950 swap(tail
->destructor
, head_skb
->destructor
);
3951 swap(tail
->sk
, head_skb
->sk
);
3956 kfree_skb_list(segs
);
3957 return ERR_PTR(err
);
3959 EXPORT_SYMBOL_GPL(skb_segment
);
3961 int skb_gro_receive(struct sk_buff
*p
, struct sk_buff
*skb
)
3963 struct skb_shared_info
*pinfo
, *skbinfo
= skb_shinfo(skb
);
3964 unsigned int offset
= skb_gro_offset(skb
);
3965 unsigned int headlen
= skb_headlen(skb
);
3966 unsigned int len
= skb_gro_len(skb
);
3967 unsigned int delta_truesize
;
3970 if (unlikely(p
->len
+ len
>= 65536 || NAPI_GRO_CB(skb
)->flush
))
3973 lp
= NAPI_GRO_CB(p
)->last
;
3974 pinfo
= skb_shinfo(lp
);
3976 if (headlen
<= offset
) {
3979 int i
= skbinfo
->nr_frags
;
3980 int nr_frags
= pinfo
->nr_frags
+ i
;
3982 if (nr_frags
> MAX_SKB_FRAGS
)
3986 pinfo
->nr_frags
= nr_frags
;
3987 skbinfo
->nr_frags
= 0;
3989 frag
= pinfo
->frags
+ nr_frags
;
3990 frag2
= skbinfo
->frags
+ i
;
3995 frag
->page_offset
+= offset
;
3996 skb_frag_size_sub(frag
, offset
);
3998 /* all fragments truesize : remove (head size + sk_buff) */
3999 delta_truesize
= skb
->truesize
-
4000 SKB_TRUESIZE(skb_end_offset(skb
));
4002 skb
->truesize
-= skb
->data_len
;
4003 skb
->len
-= skb
->data_len
;
4006 NAPI_GRO_CB(skb
)->free
= NAPI_GRO_FREE
;
4008 } else if (skb
->head_frag
) {
4009 int nr_frags
= pinfo
->nr_frags
;
4010 skb_frag_t
*frag
= pinfo
->frags
+ nr_frags
;
4011 struct page
*page
= virt_to_head_page(skb
->head
);
4012 unsigned int first_size
= headlen
- offset
;
4013 unsigned int first_offset
;
4015 if (nr_frags
+ 1 + skbinfo
->nr_frags
> MAX_SKB_FRAGS
)
4018 first_offset
= skb
->data
-
4019 (unsigned char *)page_address(page
) +
4022 pinfo
->nr_frags
= nr_frags
+ 1 + skbinfo
->nr_frags
;
4024 frag
->page
.p
= page
;
4025 frag
->page_offset
= first_offset
;
4026 skb_frag_size_set(frag
, first_size
);
4028 memcpy(frag
+ 1, skbinfo
->frags
, sizeof(*frag
) * skbinfo
->nr_frags
);
4029 /* We dont need to clear skbinfo->nr_frags here */
4031 delta_truesize
= skb
->truesize
- SKB_DATA_ALIGN(sizeof(struct sk_buff
));
4032 NAPI_GRO_CB(skb
)->free
= NAPI_GRO_FREE_STOLEN_HEAD
;
4037 delta_truesize
= skb
->truesize
;
4038 if (offset
> headlen
) {
4039 unsigned int eat
= offset
- headlen
;
4041 skbinfo
->frags
[0].page_offset
+= eat
;
4042 skb_frag_size_sub(&skbinfo
->frags
[0], eat
);
4043 skb
->data_len
-= eat
;
4048 __skb_pull(skb
, offset
);
4050 if (NAPI_GRO_CB(p
)->last
== p
)
4051 skb_shinfo(p
)->frag_list
= skb
;
4053 NAPI_GRO_CB(p
)->last
->next
= skb
;
4054 NAPI_GRO_CB(p
)->last
= skb
;
4055 __skb_header_release(skb
);
4059 NAPI_GRO_CB(p
)->count
++;
4061 p
->truesize
+= delta_truesize
;
4064 lp
->data_len
+= len
;
4065 lp
->truesize
+= delta_truesize
;
4068 NAPI_GRO_CB(skb
)->same_flow
= 1;
4071 EXPORT_SYMBOL_GPL(skb_gro_receive
);
4073 #ifdef CONFIG_SKB_EXTENSIONS
4074 #define SKB_EXT_ALIGN_VALUE 8
4075 #define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
4077 static const u8 skb_ext_type_len
[] = {
4078 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4079 [SKB_EXT_BRIDGE_NF
] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info
),
4082 [SKB_EXT_SEC_PATH
] = SKB_EXT_CHUNKSIZEOF(struct sec_path
),
4086 static __always_inline
unsigned int skb_ext_total_length(void)
4088 return SKB_EXT_CHUNKSIZEOF(struct skb_ext
) +
4089 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4090 skb_ext_type_len
[SKB_EXT_BRIDGE_NF
] +
4093 skb_ext_type_len
[SKB_EXT_SEC_PATH
] +
4098 static void skb_extensions_init(void)
4100 BUILD_BUG_ON(SKB_EXT_NUM
>= 8);
4101 BUILD_BUG_ON(skb_ext_total_length() > 255);
4103 skbuff_ext_cache
= kmem_cache_create("skbuff_ext_cache",
4104 SKB_EXT_ALIGN_VALUE
* skb_ext_total_length(),
4106 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
4110 static void skb_extensions_init(void) {}
4113 void __init
skb_init(void)
4115 skbuff_head_cache
= kmem_cache_create_usercopy("skbuff_head_cache",
4116 sizeof(struct sk_buff
),
4118 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
4119 offsetof(struct sk_buff
, cb
),
4120 sizeof_field(struct sk_buff
, cb
),
4122 skbuff_fclone_cache
= kmem_cache_create("skbuff_fclone_cache",
4123 sizeof(struct sk_buff_fclones
),
4125 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
4127 skb_extensions_init();
4131 __skb_to_sgvec(struct sk_buff
*skb
, struct scatterlist
*sg
, int offset
, int len
,
4132 unsigned int recursion_level
)
4134 int start
= skb_headlen(skb
);
4135 int i
, copy
= start
- offset
;
4136 struct sk_buff
*frag_iter
;
4139 if (unlikely(recursion_level
>= 24))
4145 sg_set_buf(sg
, skb
->data
+ offset
, copy
);
4147 if ((len
-= copy
) == 0)
4152 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
4155 WARN_ON(start
> offset
+ len
);
4157 end
= start
+ skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
4158 if ((copy
= end
- offset
) > 0) {
4159 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
4160 if (unlikely(elt
&& sg_is_last(&sg
[elt
- 1])))
4165 sg_set_page(&sg
[elt
], skb_frag_page(frag
), copy
,
4166 frag
->page_offset
+offset
-start
);
4175 skb_walk_frags(skb
, frag_iter
) {
4178 WARN_ON(start
> offset
+ len
);
4180 end
= start
+ frag_iter
->len
;
4181 if ((copy
= end
- offset
) > 0) {
4182 if (unlikely(elt
&& sg_is_last(&sg
[elt
- 1])))
4187 ret
= __skb_to_sgvec(frag_iter
, sg
+elt
, offset
- start
,
4188 copy
, recursion_level
+ 1);
4189 if (unlikely(ret
< 0))
4192 if ((len
-= copy
) == 0)
4203 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4204 * @skb: Socket buffer containing the buffers to be mapped
4205 * @sg: The scatter-gather list to map into
4206 * @offset: The offset into the buffer's contents to start mapping
4207 * @len: Length of buffer space to be mapped
4209 * Fill the specified scatter-gather list with mappings/pointers into a
4210 * region of the buffer space attached to a socket buffer. Returns either
4211 * the number of scatterlist items used, or -EMSGSIZE if the contents
4214 int skb_to_sgvec(struct sk_buff
*skb
, struct scatterlist
*sg
, int offset
, int len
)
4216 int nsg
= __skb_to_sgvec(skb
, sg
, offset
, len
, 0);
4221 sg_mark_end(&sg
[nsg
- 1]);
4225 EXPORT_SYMBOL_GPL(skb_to_sgvec
);
4227 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4228 * sglist without mark the sg which contain last skb data as the end.
4229 * So the caller can mannipulate sg list as will when padding new data after
4230 * the first call without calling sg_unmark_end to expend sg list.
4232 * Scenario to use skb_to_sgvec_nomark:
4234 * 2. skb_to_sgvec_nomark(payload1)
4235 * 3. skb_to_sgvec_nomark(payload2)
4237 * This is equivalent to:
4239 * 2. skb_to_sgvec(payload1)
4241 * 4. skb_to_sgvec(payload2)
4243 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4244 * is more preferable.
4246 int skb_to_sgvec_nomark(struct sk_buff
*skb
, struct scatterlist
*sg
,
4247 int offset
, int len
)
4249 return __skb_to_sgvec(skb
, sg
, offset
, len
, 0);
4251 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark
);
4256 * skb_cow_data - Check that a socket buffer's data buffers are writable
4257 * @skb: The socket buffer to check.
4258 * @tailbits: Amount of trailing space to be added
4259 * @trailer: Returned pointer to the skb where the @tailbits space begins
4261 * Make sure that the data buffers attached to a socket buffer are
4262 * writable. If they are not, private copies are made of the data buffers
4263 * and the socket buffer is set to use these instead.
4265 * If @tailbits is given, make sure that there is space to write @tailbits
4266 * bytes of data beyond current end of socket buffer. @trailer will be
4267 * set to point to the skb in which this space begins.
4269 * The number of scatterlist elements required to completely map the
4270 * COW'd and extended socket buffer will be returned.
4272 int skb_cow_data(struct sk_buff
*skb
, int tailbits
, struct sk_buff
**trailer
)
4276 struct sk_buff
*skb1
, **skb_p
;
4278 /* If skb is cloned or its head is paged, reallocate
4279 * head pulling out all the pages (pages are considered not writable
4280 * at the moment even if they are anonymous).
4282 if ((skb_cloned(skb
) || skb_shinfo(skb
)->nr_frags
) &&
4283 __pskb_pull_tail(skb
, skb_pagelen(skb
)-skb_headlen(skb
)) == NULL
)
4286 /* Easy case. Most of packets will go this way. */
4287 if (!skb_has_frag_list(skb
)) {
4288 /* A little of trouble, not enough of space for trailer.
4289 * This should not happen, when stack is tuned to generate
4290 * good frames. OK, on miss we reallocate and reserve even more
4291 * space, 128 bytes is fair. */
4293 if (skb_tailroom(skb
) < tailbits
&&
4294 pskb_expand_head(skb
, 0, tailbits
-skb_tailroom(skb
)+128, GFP_ATOMIC
))
4302 /* Misery. We are in troubles, going to mincer fragments... */
4305 skb_p
= &skb_shinfo(skb
)->frag_list
;
4308 while ((skb1
= *skb_p
) != NULL
) {
4311 /* The fragment is partially pulled by someone,
4312 * this can happen on input. Copy it and everything
4315 if (skb_shared(skb1
))
4318 /* If the skb is the last, worry about trailer. */
4320 if (skb1
->next
== NULL
&& tailbits
) {
4321 if (skb_shinfo(skb1
)->nr_frags
||
4322 skb_has_frag_list(skb1
) ||
4323 skb_tailroom(skb1
) < tailbits
)
4324 ntail
= tailbits
+ 128;
4330 skb_shinfo(skb1
)->nr_frags
||
4331 skb_has_frag_list(skb1
)) {
4332 struct sk_buff
*skb2
;
4334 /* Fuck, we are miserable poor guys... */
4336 skb2
= skb_copy(skb1
, GFP_ATOMIC
);
4338 skb2
= skb_copy_expand(skb1
,
4342 if (unlikely(skb2
== NULL
))
4346 skb_set_owner_w(skb2
, skb1
->sk
);
4348 /* Looking around. Are we still alive?
4349 * OK, link new skb, drop old one */
4351 skb2
->next
= skb1
->next
;
4358 skb_p
= &skb1
->next
;
4363 EXPORT_SYMBOL_GPL(skb_cow_data
);
4365 static void sock_rmem_free(struct sk_buff
*skb
)
4367 struct sock
*sk
= skb
->sk
;
4369 atomic_sub(skb
->truesize
, &sk
->sk_rmem_alloc
);
4372 static void skb_set_err_queue(struct sk_buff
*skb
)
4374 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
4375 * So, it is safe to (mis)use it to mark skbs on the error queue.
4377 skb
->pkt_type
= PACKET_OUTGOING
;
4378 BUILD_BUG_ON(PACKET_OUTGOING
== 0);
4382 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4384 int sock_queue_err_skb(struct sock
*sk
, struct sk_buff
*skb
)
4386 if (atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
>=
4387 (unsigned int)sk
->sk_rcvbuf
)
4392 skb
->destructor
= sock_rmem_free
;
4393 atomic_add(skb
->truesize
, &sk
->sk_rmem_alloc
);
4394 skb_set_err_queue(skb
);
4396 /* before exiting rcu section, make sure dst is refcounted */
4399 skb_queue_tail(&sk
->sk_error_queue
, skb
);
4400 if (!sock_flag(sk
, SOCK_DEAD
))
4401 sk
->sk_error_report(sk
);
4404 EXPORT_SYMBOL(sock_queue_err_skb
);
4406 static bool is_icmp_err_skb(const struct sk_buff
*skb
)
4408 return skb
&& (SKB_EXT_ERR(skb
)->ee
.ee_origin
== SO_EE_ORIGIN_ICMP
||
4409 SKB_EXT_ERR(skb
)->ee
.ee_origin
== SO_EE_ORIGIN_ICMP6
);
4412 struct sk_buff
*sock_dequeue_err_skb(struct sock
*sk
)
4414 struct sk_buff_head
*q
= &sk
->sk_error_queue
;
4415 struct sk_buff
*skb
, *skb_next
= NULL
;
4416 bool icmp_next
= false;
4417 unsigned long flags
;
4419 spin_lock_irqsave(&q
->lock
, flags
);
4420 skb
= __skb_dequeue(q
);
4421 if (skb
&& (skb_next
= skb_peek(q
))) {
4422 icmp_next
= is_icmp_err_skb(skb_next
);
4424 sk
->sk_err
= SKB_EXT_ERR(skb_next
)->ee
.ee_origin
;
4426 spin_unlock_irqrestore(&q
->lock
, flags
);
4428 if (is_icmp_err_skb(skb
) && !icmp_next
)
4432 sk
->sk_error_report(sk
);
4436 EXPORT_SYMBOL(sock_dequeue_err_skb
);
4439 * skb_clone_sk - create clone of skb, and take reference to socket
4440 * @skb: the skb to clone
4442 * This function creates a clone of a buffer that holds a reference on
4443 * sk_refcnt. Buffers created via this function are meant to be
4444 * returned using sock_queue_err_skb, or free via kfree_skb.
4446 * When passing buffers allocated with this function to sock_queue_err_skb
4447 * it is necessary to wrap the call with sock_hold/sock_put in order to
4448 * prevent the socket from being released prior to being enqueued on
4449 * the sk_error_queue.
4451 struct sk_buff
*skb_clone_sk(struct sk_buff
*skb
)
4453 struct sock
*sk
= skb
->sk
;
4454 struct sk_buff
*clone
;
4456 if (!sk
|| !refcount_inc_not_zero(&sk
->sk_refcnt
))
4459 clone
= skb_clone(skb
, GFP_ATOMIC
);
4466 clone
->destructor
= sock_efree
;
4470 EXPORT_SYMBOL(skb_clone_sk
);
4472 static void __skb_complete_tx_timestamp(struct sk_buff
*skb
,
4477 struct sock_exterr_skb
*serr
;
4480 BUILD_BUG_ON(sizeof(struct sock_exterr_skb
) > sizeof(skb
->cb
));
4482 serr
= SKB_EXT_ERR(skb
);
4483 memset(serr
, 0, sizeof(*serr
));
4484 serr
->ee
.ee_errno
= ENOMSG
;
4485 serr
->ee
.ee_origin
= SO_EE_ORIGIN_TIMESTAMPING
;
4486 serr
->ee
.ee_info
= tstype
;
4487 serr
->opt_stats
= opt_stats
;
4488 serr
->header
.h4
.iif
= skb
->dev
? skb
->dev
->ifindex
: 0;
4489 if (sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_ID
) {
4490 serr
->ee
.ee_data
= skb_shinfo(skb
)->tskey
;
4491 if (sk
->sk_protocol
== IPPROTO_TCP
&&
4492 sk
->sk_type
== SOCK_STREAM
)
4493 serr
->ee
.ee_data
-= sk
->sk_tskey
;
4496 err
= sock_queue_err_skb(sk
, skb
);
4502 static bool skb_may_tx_timestamp(struct sock
*sk
, bool tsonly
)
4506 if (likely(sysctl_tstamp_allow_data
|| tsonly
))
4509 read_lock_bh(&sk
->sk_callback_lock
);
4510 ret
= sk
->sk_socket
&& sk
->sk_socket
->file
&&
4511 file_ns_capable(sk
->sk_socket
->file
, &init_user_ns
, CAP_NET_RAW
);
4512 read_unlock_bh(&sk
->sk_callback_lock
);
4516 void skb_complete_tx_timestamp(struct sk_buff
*skb
,
4517 struct skb_shared_hwtstamps
*hwtstamps
)
4519 struct sock
*sk
= skb
->sk
;
4521 if (!skb_may_tx_timestamp(sk
, false))
4524 /* Take a reference to prevent skb_orphan() from freeing the socket,
4525 * but only if the socket refcount is not zero.
4527 if (likely(refcount_inc_not_zero(&sk
->sk_refcnt
))) {
4528 *skb_hwtstamps(skb
) = *hwtstamps
;
4529 __skb_complete_tx_timestamp(skb
, sk
, SCM_TSTAMP_SND
, false);
4537 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp
);
4539 void __skb_tstamp_tx(struct sk_buff
*orig_skb
,
4540 struct skb_shared_hwtstamps
*hwtstamps
,
4541 struct sock
*sk
, int tstype
)
4543 struct sk_buff
*skb
;
4544 bool tsonly
, opt_stats
= false;
4549 if (!hwtstamps
&& !(sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_TX_SWHW
) &&
4550 skb_shinfo(orig_skb
)->tx_flags
& SKBTX_IN_PROGRESS
)
4553 tsonly
= sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_TSONLY
;
4554 if (!skb_may_tx_timestamp(sk
, tsonly
))
4559 if ((sk
->sk_tsflags
& SOF_TIMESTAMPING_OPT_STATS
) &&
4560 sk
->sk_protocol
== IPPROTO_TCP
&&
4561 sk
->sk_type
== SOCK_STREAM
) {
4562 skb
= tcp_get_timestamping_opt_stats(sk
);
4566 skb
= alloc_skb(0, GFP_ATOMIC
);
4568 skb
= skb_clone(orig_skb
, GFP_ATOMIC
);
4574 skb_shinfo(skb
)->tx_flags
|= skb_shinfo(orig_skb
)->tx_flags
&
4576 skb_shinfo(skb
)->tskey
= skb_shinfo(orig_skb
)->tskey
;
4580 *skb_hwtstamps(skb
) = *hwtstamps
;
4582 skb
->tstamp
= ktime_get_real();
4584 __skb_complete_tx_timestamp(skb
, sk
, tstype
, opt_stats
);
4586 EXPORT_SYMBOL_GPL(__skb_tstamp_tx
);
4588 void skb_tstamp_tx(struct sk_buff
*orig_skb
,
4589 struct skb_shared_hwtstamps
*hwtstamps
)
4591 return __skb_tstamp_tx(orig_skb
, hwtstamps
, orig_skb
->sk
,
4594 EXPORT_SYMBOL_GPL(skb_tstamp_tx
);
4596 void skb_complete_wifi_ack(struct sk_buff
*skb
, bool acked
)
4598 struct sock
*sk
= skb
->sk
;
4599 struct sock_exterr_skb
*serr
;
4602 skb
->wifi_acked_valid
= 1;
4603 skb
->wifi_acked
= acked
;
4605 serr
= SKB_EXT_ERR(skb
);
4606 memset(serr
, 0, sizeof(*serr
));
4607 serr
->ee
.ee_errno
= ENOMSG
;
4608 serr
->ee
.ee_origin
= SO_EE_ORIGIN_TXSTATUS
;
4610 /* Take a reference to prevent skb_orphan() from freeing the socket,
4611 * but only if the socket refcount is not zero.
4613 if (likely(refcount_inc_not_zero(&sk
->sk_refcnt
))) {
4614 err
= sock_queue_err_skb(sk
, skb
);
4620 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack
);
4623 * skb_partial_csum_set - set up and verify partial csum values for packet
4624 * @skb: the skb to set
4625 * @start: the number of bytes after skb->data to start checksumming.
4626 * @off: the offset from start to place the checksum.
4628 * For untrusted partially-checksummed packets, we need to make sure the values
4629 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4631 * This function checks and sets those values and skb->ip_summed: if this
4632 * returns false you should drop the packet.
4634 bool skb_partial_csum_set(struct sk_buff
*skb
, u16 start
, u16 off
)
4636 u32 csum_end
= (u32
)start
+ (u32
)off
+ sizeof(__sum16
);
4637 u32 csum_start
= skb_headroom(skb
) + (u32
)start
;
4639 if (unlikely(csum_start
> U16_MAX
|| csum_end
> skb_headlen(skb
))) {
4640 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
4641 start
, off
, skb_headroom(skb
), skb_headlen(skb
));
4644 skb
->ip_summed
= CHECKSUM_PARTIAL
;
4645 skb
->csum_start
= csum_start
;
4646 skb
->csum_offset
= off
;
4647 skb_set_transport_header(skb
, start
);
4650 EXPORT_SYMBOL_GPL(skb_partial_csum_set
);
4652 static int skb_maybe_pull_tail(struct sk_buff
*skb
, unsigned int len
,
4655 if (skb_headlen(skb
) >= len
)
4658 /* If we need to pullup then pullup to the max, so we
4659 * won't need to do it again.
4664 if (__pskb_pull_tail(skb
, max
- skb_headlen(skb
)) == NULL
)
4667 if (skb_headlen(skb
) < len
)
4673 #define MAX_TCP_HDR_LEN (15 * 4)
4675 static __sum16
*skb_checksum_setup_ip(struct sk_buff
*skb
,
4676 typeof(IPPROTO_IP
) proto
,
4683 err
= skb_maybe_pull_tail(skb
, off
+ sizeof(struct tcphdr
),
4684 off
+ MAX_TCP_HDR_LEN
);
4685 if (!err
&& !skb_partial_csum_set(skb
, off
,
4686 offsetof(struct tcphdr
,
4689 return err
? ERR_PTR(err
) : &tcp_hdr(skb
)->check
;
4692 err
= skb_maybe_pull_tail(skb
, off
+ sizeof(struct udphdr
),
4693 off
+ sizeof(struct udphdr
));
4694 if (!err
&& !skb_partial_csum_set(skb
, off
,
4695 offsetof(struct udphdr
,
4698 return err
? ERR_PTR(err
) : &udp_hdr(skb
)->check
;
4701 return ERR_PTR(-EPROTO
);
4704 /* This value should be large enough to cover a tagged ethernet header plus
4705 * maximally sized IP and TCP or UDP headers.
4707 #define MAX_IP_HDR_LEN 128
4709 static int skb_checksum_setup_ipv4(struct sk_buff
*skb
, bool recalculate
)
4718 err
= skb_maybe_pull_tail(skb
,
4719 sizeof(struct iphdr
),
4724 if (ip_hdr(skb
)->frag_off
& htons(IP_OFFSET
| IP_MF
))
4727 off
= ip_hdrlen(skb
);
4734 csum
= skb_checksum_setup_ip(skb
, ip_hdr(skb
)->protocol
, off
);
4736 return PTR_ERR(csum
);
4739 *csum
= ~csum_tcpudp_magic(ip_hdr(skb
)->saddr
,
4742 ip_hdr(skb
)->protocol
, 0);
4749 /* This value should be large enough to cover a tagged ethernet header plus
4750 * an IPv6 header, all options, and a maximal TCP or UDP header.
4752 #define MAX_IPV6_HDR_LEN 256
4754 #define OPT_HDR(type, skb, off) \
4755 (type *)(skb_network_header(skb) + (off))
4757 static int skb_checksum_setup_ipv6(struct sk_buff
*skb
, bool recalculate
)
4770 off
= sizeof(struct ipv6hdr
);
4772 err
= skb_maybe_pull_tail(skb
, off
, MAX_IPV6_HDR_LEN
);
4776 nexthdr
= ipv6_hdr(skb
)->nexthdr
;
4778 len
= sizeof(struct ipv6hdr
) + ntohs(ipv6_hdr(skb
)->payload_len
);
4779 while (off
<= len
&& !done
) {
4781 case IPPROTO_DSTOPTS
:
4782 case IPPROTO_HOPOPTS
:
4783 case IPPROTO_ROUTING
: {
4784 struct ipv6_opt_hdr
*hp
;
4786 err
= skb_maybe_pull_tail(skb
,
4788 sizeof(struct ipv6_opt_hdr
),
4793 hp
= OPT_HDR(struct ipv6_opt_hdr
, skb
, off
);
4794 nexthdr
= hp
->nexthdr
;
4795 off
+= ipv6_optlen(hp
);
4799 struct ip_auth_hdr
*hp
;
4801 err
= skb_maybe_pull_tail(skb
,
4803 sizeof(struct ip_auth_hdr
),
4808 hp
= OPT_HDR(struct ip_auth_hdr
, skb
, off
);
4809 nexthdr
= hp
->nexthdr
;
4810 off
+= ipv6_authlen(hp
);
4813 case IPPROTO_FRAGMENT
: {
4814 struct frag_hdr
*hp
;
4816 err
= skb_maybe_pull_tail(skb
,
4818 sizeof(struct frag_hdr
),
4823 hp
= OPT_HDR(struct frag_hdr
, skb
, off
);
4825 if (hp
->frag_off
& htons(IP6_OFFSET
| IP6_MF
))
4828 nexthdr
= hp
->nexthdr
;
4829 off
+= sizeof(struct frag_hdr
);
4840 if (!done
|| fragment
)
4843 csum
= skb_checksum_setup_ip(skb
, nexthdr
, off
);
4845 return PTR_ERR(csum
);
4848 *csum
= ~csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
,
4849 &ipv6_hdr(skb
)->daddr
,
4850 skb
->len
- off
, nexthdr
, 0);
4858 * skb_checksum_setup - set up partial checksum offset
4859 * @skb: the skb to set up
4860 * @recalculate: if true the pseudo-header checksum will be recalculated
4862 int skb_checksum_setup(struct sk_buff
*skb
, bool recalculate
)
4866 switch (skb
->protocol
) {
4867 case htons(ETH_P_IP
):
4868 err
= skb_checksum_setup_ipv4(skb
, recalculate
);
4871 case htons(ETH_P_IPV6
):
4872 err
= skb_checksum_setup_ipv6(skb
, recalculate
);
4882 EXPORT_SYMBOL(skb_checksum_setup
);
4885 * skb_checksum_maybe_trim - maybe trims the given skb
4886 * @skb: the skb to check
4887 * @transport_len: the data length beyond the network header
4889 * Checks whether the given skb has data beyond the given transport length.
4890 * If so, returns a cloned skb trimmed to this transport length.
4891 * Otherwise returns the provided skb. Returns NULL in error cases
4892 * (e.g. transport_len exceeds skb length or out-of-memory).
4894 * Caller needs to set the skb transport header and free any returned skb if it
4895 * differs from the provided skb.
4897 static struct sk_buff
*skb_checksum_maybe_trim(struct sk_buff
*skb
,
4898 unsigned int transport_len
)
4900 struct sk_buff
*skb_chk
;
4901 unsigned int len
= skb_transport_offset(skb
) + transport_len
;
4906 else if (skb
->len
== len
)
4909 skb_chk
= skb_clone(skb
, GFP_ATOMIC
);
4913 ret
= pskb_trim_rcsum(skb_chk
, len
);
4923 * skb_checksum_trimmed - validate checksum of an skb
4924 * @skb: the skb to check
4925 * @transport_len: the data length beyond the network header
4926 * @skb_chkf: checksum function to use
4928 * Applies the given checksum function skb_chkf to the provided skb.
4929 * Returns a checked and maybe trimmed skb. Returns NULL on error.
4931 * If the skb has data beyond the given transport length, then a
4932 * trimmed & cloned skb is checked and returned.
4934 * Caller needs to set the skb transport header and free any returned skb if it
4935 * differs from the provided skb.
4937 struct sk_buff
*skb_checksum_trimmed(struct sk_buff
*skb
,
4938 unsigned int transport_len
,
4939 __sum16(*skb_chkf
)(struct sk_buff
*skb
))
4941 struct sk_buff
*skb_chk
;
4942 unsigned int offset
= skb_transport_offset(skb
);
4945 skb_chk
= skb_checksum_maybe_trim(skb
, transport_len
);
4949 if (!pskb_may_pull(skb_chk
, offset
))
4952 skb_pull_rcsum(skb_chk
, offset
);
4953 ret
= skb_chkf(skb_chk
);
4954 skb_push_rcsum(skb_chk
, offset
);
4962 if (skb_chk
&& skb_chk
!= skb
)
4968 EXPORT_SYMBOL(skb_checksum_trimmed
);
4970 void __skb_warn_lro_forwarding(const struct sk_buff
*skb
)
4972 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
4975 EXPORT_SYMBOL(__skb_warn_lro_forwarding
);
4977 void kfree_skb_partial(struct sk_buff
*skb
, bool head_stolen
)
4980 skb_release_head_state(skb
);
4981 kmem_cache_free(skbuff_head_cache
, skb
);
4986 EXPORT_SYMBOL(kfree_skb_partial
);
4989 * skb_try_coalesce - try to merge skb to prior one
4991 * @from: buffer to add
4992 * @fragstolen: pointer to boolean
4993 * @delta_truesize: how much more was allocated than was requested
4995 bool skb_try_coalesce(struct sk_buff
*to
, struct sk_buff
*from
,
4996 bool *fragstolen
, int *delta_truesize
)
4998 struct skb_shared_info
*to_shinfo
, *from_shinfo
;
4999 int i
, delta
, len
= from
->len
;
5001 *fragstolen
= false;
5006 if (len
<= skb_tailroom(to
)) {
5008 BUG_ON(skb_copy_bits(from
, 0, skb_put(to
, len
), len
));
5009 *delta_truesize
= 0;
5013 to_shinfo
= skb_shinfo(to
);
5014 from_shinfo
= skb_shinfo(from
);
5015 if (to_shinfo
->frag_list
|| from_shinfo
->frag_list
)
5017 if (skb_zcopy(to
) || skb_zcopy(from
))
5020 if (skb_headlen(from
) != 0) {
5022 unsigned int offset
;
5024 if (to_shinfo
->nr_frags
+
5025 from_shinfo
->nr_frags
>= MAX_SKB_FRAGS
)
5028 if (skb_head_is_locked(from
))
5031 delta
= from
->truesize
- SKB_DATA_ALIGN(sizeof(struct sk_buff
));
5033 page
= virt_to_head_page(from
->head
);
5034 offset
= from
->data
- (unsigned char *)page_address(page
);
5036 skb_fill_page_desc(to
, to_shinfo
->nr_frags
,
5037 page
, offset
, skb_headlen(from
));
5040 if (to_shinfo
->nr_frags
+
5041 from_shinfo
->nr_frags
> MAX_SKB_FRAGS
)
5044 delta
= from
->truesize
- SKB_TRUESIZE(skb_end_offset(from
));
5047 WARN_ON_ONCE(delta
< len
);
5049 memcpy(to_shinfo
->frags
+ to_shinfo
->nr_frags
,
5051 from_shinfo
->nr_frags
* sizeof(skb_frag_t
));
5052 to_shinfo
->nr_frags
+= from_shinfo
->nr_frags
;
5054 if (!skb_cloned(from
))
5055 from_shinfo
->nr_frags
= 0;
5057 /* if the skb is not cloned this does nothing
5058 * since we set nr_frags to 0.
5060 for (i
= 0; i
< from_shinfo
->nr_frags
; i
++)
5061 __skb_frag_ref(&from_shinfo
->frags
[i
]);
5063 to
->truesize
+= delta
;
5065 to
->data_len
+= len
;
5067 *delta_truesize
= delta
;
5070 EXPORT_SYMBOL(skb_try_coalesce
);
5073 * skb_scrub_packet - scrub an skb
5075 * @skb: buffer to clean
5076 * @xnet: packet is crossing netns
5078 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
5079 * into/from a tunnel. Some information have to be cleared during these
5081 * skb_scrub_packet can also be used to clean a skb before injecting it in
5082 * another namespace (@xnet == true). We have to clear all information in the
5083 * skb that could impact namespace isolation.
5085 void skb_scrub_packet(struct sk_buff
*skb
, bool xnet
)
5087 skb
->pkt_type
= PACKET_HOST
;
5093 nf_reset_trace(skb
);
5095 #ifdef CONFIG_NET_SWITCHDEV
5096 skb
->offload_fwd_mark
= 0;
5097 skb
->offload_l3_fwd_mark
= 0;
5107 EXPORT_SYMBOL_GPL(skb_scrub_packet
);
5110 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
5114 * skb_gso_transport_seglen is used to determine the real size of the
5115 * individual segments, including Layer4 headers (TCP/UDP).
5117 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
5119 static unsigned int skb_gso_transport_seglen(const struct sk_buff
*skb
)
5121 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
5122 unsigned int thlen
= 0;
5124 if (skb
->encapsulation
) {
5125 thlen
= skb_inner_transport_header(skb
) -
5126 skb_transport_header(skb
);
5128 if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
)))
5129 thlen
+= inner_tcp_hdrlen(skb
);
5130 } else if (likely(shinfo
->gso_type
& (SKB_GSO_TCPV4
| SKB_GSO_TCPV6
))) {
5131 thlen
= tcp_hdrlen(skb
);
5132 } else if (unlikely(skb_is_gso_sctp(skb
))) {
5133 thlen
= sizeof(struct sctphdr
);
5134 } else if (shinfo
->gso_type
& SKB_GSO_UDP_L4
) {
5135 thlen
= sizeof(struct udphdr
);
5137 /* UFO sets gso_size to the size of the fragmentation
5138 * payload, i.e. the size of the L4 (UDP) header is already
5141 return thlen
+ shinfo
->gso_size
;
5145 * skb_gso_network_seglen - Return length of individual segments of a gso packet
5149 * skb_gso_network_seglen is used to determine the real size of the
5150 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
5152 * The MAC/L2 header is not accounted for.
5154 static unsigned int skb_gso_network_seglen(const struct sk_buff
*skb
)
5156 unsigned int hdr_len
= skb_transport_header(skb
) -
5157 skb_network_header(skb
);
5159 return hdr_len
+ skb_gso_transport_seglen(skb
);
5163 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
5167 * skb_gso_mac_seglen is used to determine the real size of the
5168 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
5169 * headers (TCP/UDP).
5171 static unsigned int skb_gso_mac_seglen(const struct sk_buff
*skb
)
5173 unsigned int hdr_len
= skb_transport_header(skb
) - skb_mac_header(skb
);
5175 return hdr_len
+ skb_gso_transport_seglen(skb
);
5179 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
5181 * There are a couple of instances where we have a GSO skb, and we
5182 * want to determine what size it would be after it is segmented.
5184 * We might want to check:
5185 * - L3+L4+payload size (e.g. IP forwarding)
5186 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
5188 * This is a helper to do that correctly considering GSO_BY_FRAGS.
5192 * @seg_len: The segmented length (from skb_gso_*_seglen). In the
5193 * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
5195 * @max_len: The maximum permissible length.
5197 * Returns true if the segmented length <= max length.
5199 static inline bool skb_gso_size_check(const struct sk_buff
*skb
,
5200 unsigned int seg_len
,
5201 unsigned int max_len
) {
5202 const struct skb_shared_info
*shinfo
= skb_shinfo(skb
);
5203 const struct sk_buff
*iter
;
5205 if (shinfo
->gso_size
!= GSO_BY_FRAGS
)
5206 return seg_len
<= max_len
;
5208 /* Undo this so we can re-use header sizes */
5209 seg_len
-= GSO_BY_FRAGS
;
5211 skb_walk_frags(skb
, iter
) {
5212 if (seg_len
+ skb_headlen(iter
) > max_len
)
5220 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5223 * @mtu: MTU to validate against
5225 * skb_gso_validate_network_len validates if a given skb will fit a
5226 * wanted MTU once split. It considers L3 headers, L4 headers, and the
5229 bool skb_gso_validate_network_len(const struct sk_buff
*skb
, unsigned int mtu
)
5231 return skb_gso_size_check(skb
, skb_gso_network_seglen(skb
), mtu
);
5233 EXPORT_SYMBOL_GPL(skb_gso_validate_network_len
);
5236 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
5239 * @len: length to validate against
5241 * skb_gso_validate_mac_len validates if a given skb will fit a wanted
5242 * length once split, including L2, L3 and L4 headers and the payload.
5244 bool skb_gso_validate_mac_len(const struct sk_buff
*skb
, unsigned int len
)
5246 return skb_gso_size_check(skb
, skb_gso_mac_seglen(skb
), len
);
5248 EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len
);
5250 static struct sk_buff
*skb_reorder_vlan_header(struct sk_buff
*skb
)
5252 int mac_len
, meta_len
;
5255 if (skb_cow(skb
, skb_headroom(skb
)) < 0) {
5260 mac_len
= skb
->data
- skb_mac_header(skb
);
5261 if (likely(mac_len
> VLAN_HLEN
+ ETH_TLEN
)) {
5262 memmove(skb_mac_header(skb
) + VLAN_HLEN
, skb_mac_header(skb
),
5263 mac_len
- VLAN_HLEN
- ETH_TLEN
);
5266 meta_len
= skb_metadata_len(skb
);
5268 meta
= skb_metadata_end(skb
) - meta_len
;
5269 memmove(meta
+ VLAN_HLEN
, meta
, meta_len
);
5272 skb
->mac_header
+= VLAN_HLEN
;
5276 struct sk_buff
*skb_vlan_untag(struct sk_buff
*skb
)
5278 struct vlan_hdr
*vhdr
;
5281 if (unlikely(skb_vlan_tag_present(skb
))) {
5282 /* vlan_tci is already set-up so leave this for another time */
5286 skb
= skb_share_check(skb
, GFP_ATOMIC
);
5290 if (unlikely(!pskb_may_pull(skb
, VLAN_HLEN
)))
5293 vhdr
= (struct vlan_hdr
*)skb
->data
;
5294 vlan_tci
= ntohs(vhdr
->h_vlan_TCI
);
5295 __vlan_hwaccel_put_tag(skb
, skb
->protocol
, vlan_tci
);
5297 skb_pull_rcsum(skb
, VLAN_HLEN
);
5298 vlan_set_encap_proto(skb
, vhdr
);
5300 skb
= skb_reorder_vlan_header(skb
);
5304 skb_reset_network_header(skb
);
5305 skb_reset_transport_header(skb
);
5306 skb_reset_mac_len(skb
);
5314 EXPORT_SYMBOL(skb_vlan_untag
);
5316 int skb_ensure_writable(struct sk_buff
*skb
, int write_len
)
5318 if (!pskb_may_pull(skb
, write_len
))
5321 if (!skb_cloned(skb
) || skb_clone_writable(skb
, write_len
))
5324 return pskb_expand_head(skb
, 0, 0, GFP_ATOMIC
);
5326 EXPORT_SYMBOL(skb_ensure_writable
);
5328 /* remove VLAN header from packet and update csum accordingly.
5329 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5331 int __skb_vlan_pop(struct sk_buff
*skb
, u16
*vlan_tci
)
5333 struct vlan_hdr
*vhdr
;
5334 int offset
= skb
->data
- skb_mac_header(skb
);
5337 if (WARN_ONCE(offset
,
5338 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5343 err
= skb_ensure_writable(skb
, VLAN_ETH_HLEN
);
5347 skb_postpull_rcsum(skb
, skb
->data
+ (2 * ETH_ALEN
), VLAN_HLEN
);
5349 vhdr
= (struct vlan_hdr
*)(skb
->data
+ ETH_HLEN
);
5350 *vlan_tci
= ntohs(vhdr
->h_vlan_TCI
);
5352 memmove(skb
->data
+ VLAN_HLEN
, skb
->data
, 2 * ETH_ALEN
);
5353 __skb_pull(skb
, VLAN_HLEN
);
5355 vlan_set_encap_proto(skb
, vhdr
);
5356 skb
->mac_header
+= VLAN_HLEN
;
5358 if (skb_network_offset(skb
) < ETH_HLEN
)
5359 skb_set_network_header(skb
, ETH_HLEN
);
5361 skb_reset_mac_len(skb
);
5365 EXPORT_SYMBOL(__skb_vlan_pop
);
5367 /* Pop a vlan tag either from hwaccel or from payload.
5368 * Expects skb->data at mac header.
5370 int skb_vlan_pop(struct sk_buff
*skb
)
5376 if (likely(skb_vlan_tag_present(skb
))) {
5377 __vlan_hwaccel_clear_tag(skb
);
5379 if (unlikely(!eth_type_vlan(skb
->protocol
)))
5382 err
= __skb_vlan_pop(skb
, &vlan_tci
);
5386 /* move next vlan tag to hw accel tag */
5387 if (likely(!eth_type_vlan(skb
->protocol
)))
5390 vlan_proto
= skb
->protocol
;
5391 err
= __skb_vlan_pop(skb
, &vlan_tci
);
5395 __vlan_hwaccel_put_tag(skb
, vlan_proto
, vlan_tci
);
5398 EXPORT_SYMBOL(skb_vlan_pop
);
5400 /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5401 * Expects skb->data at mac header.
5403 int skb_vlan_push(struct sk_buff
*skb
, __be16 vlan_proto
, u16 vlan_tci
)
5405 if (skb_vlan_tag_present(skb
)) {
5406 int offset
= skb
->data
- skb_mac_header(skb
);
5409 if (WARN_ONCE(offset
,
5410 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5415 err
= __vlan_insert_tag(skb
, skb
->vlan_proto
,
5416 skb_vlan_tag_get(skb
));
5420 skb
->protocol
= skb
->vlan_proto
;
5421 skb
->mac_len
+= VLAN_HLEN
;
5423 skb_postpush_rcsum(skb
, skb
->data
+ (2 * ETH_ALEN
), VLAN_HLEN
);
5425 __vlan_hwaccel_put_tag(skb
, vlan_proto
, vlan_tci
);
5428 EXPORT_SYMBOL(skb_vlan_push
);
5430 /* Update the ethertype of hdr and the skb csum value if required. */
5431 static void skb_mod_eth_type(struct sk_buff
*skb
, struct ethhdr
*hdr
,
5434 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
5435 __be16 diff
[] = { ~hdr
->h_proto
, ethertype
};
5437 skb
->csum
= csum_partial((char *)diff
, sizeof(diff
), skb
->csum
);
5440 hdr
->h_proto
= ethertype
;
5444 * skb_mpls_push() - push a new MPLS header after the mac header
5447 * @mpls_lse: MPLS label stack entry to push
5448 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
5450 * Expects skb->data at mac header.
5452 * Returns 0 on success, -errno otherwise.
5454 int skb_mpls_push(struct sk_buff
*skb
, __be32 mpls_lse
, __be16 mpls_proto
)
5456 struct mpls_shim_hdr
*lse
;
5459 if (unlikely(!eth_p_mpls(mpls_proto
)))
5462 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */
5463 if (skb
->encapsulation
)
5466 err
= skb_cow_head(skb
, MPLS_HLEN
);
5470 if (!skb
->inner_protocol
) {
5471 skb_set_inner_network_header(skb
, skb
->mac_len
);
5472 skb_set_inner_protocol(skb
, skb
->protocol
);
5475 skb_push(skb
, MPLS_HLEN
);
5476 memmove(skb_mac_header(skb
) - MPLS_HLEN
, skb_mac_header(skb
),
5478 skb_reset_mac_header(skb
);
5479 skb_set_network_header(skb
, skb
->mac_len
);
5481 lse
= mpls_hdr(skb
);
5482 lse
->label_stack_entry
= mpls_lse
;
5483 skb_postpush_rcsum(skb
, lse
, MPLS_HLEN
);
5485 if (skb
->dev
&& skb
->dev
->type
== ARPHRD_ETHER
)
5486 skb_mod_eth_type(skb
, eth_hdr(skb
), mpls_proto
);
5487 skb
->protocol
= mpls_proto
;
5491 EXPORT_SYMBOL_GPL(skb_mpls_push
);
5494 * skb_mpls_pop() - pop the outermost MPLS header
5497 * @next_proto: ethertype of header after popped MPLS header
5499 * Expects skb->data at mac header.
5501 * Returns 0 on success, -errno otherwise.
5503 int skb_mpls_pop(struct sk_buff
*skb
, __be16 next_proto
)
5507 if (unlikely(!eth_p_mpls(skb
->protocol
)))
5510 err
= skb_ensure_writable(skb
, skb
->mac_len
+ MPLS_HLEN
);
5514 skb_postpull_rcsum(skb
, mpls_hdr(skb
), MPLS_HLEN
);
5515 memmove(skb_mac_header(skb
) + MPLS_HLEN
, skb_mac_header(skb
),
5518 __skb_pull(skb
, MPLS_HLEN
);
5519 skb_reset_mac_header(skb
);
5520 skb_set_network_header(skb
, skb
->mac_len
);
5522 if (skb
->dev
&& skb
->dev
->type
== ARPHRD_ETHER
) {
5525 /* use mpls_hdr() to get ethertype to account for VLANs. */
5526 hdr
= (struct ethhdr
*)((void *)mpls_hdr(skb
) - ETH_HLEN
);
5527 skb_mod_eth_type(skb
, hdr
, next_proto
);
5529 skb
->protocol
= next_proto
;
5533 EXPORT_SYMBOL_GPL(skb_mpls_pop
);
5536 * skb_mpls_update_lse() - modify outermost MPLS header and update csum
5539 * @mpls_lse: new MPLS label stack entry to update to
5541 * Expects skb->data at mac header.
5543 * Returns 0 on success, -errno otherwise.
5545 int skb_mpls_update_lse(struct sk_buff
*skb
, __be32 mpls_lse
)
5549 if (unlikely(!eth_p_mpls(skb
->protocol
)))
5552 err
= skb_ensure_writable(skb
, skb
->mac_len
+ MPLS_HLEN
);
5556 if (skb
->ip_summed
== CHECKSUM_COMPLETE
) {
5557 __be32 diff
[] = { ~mpls_hdr(skb
)->label_stack_entry
, mpls_lse
};
5559 skb
->csum
= csum_partial((char *)diff
, sizeof(diff
), skb
->csum
);
5562 mpls_hdr(skb
)->label_stack_entry
= mpls_lse
;
5566 EXPORT_SYMBOL_GPL(skb_mpls_update_lse
);
5569 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
5573 * Expects skb->data at mac header.
5575 * Returns 0 on success, -errno otherwise.
5577 int skb_mpls_dec_ttl(struct sk_buff
*skb
)
5582 if (unlikely(!eth_p_mpls(skb
->protocol
)))
5585 lse
= be32_to_cpu(mpls_hdr(skb
)->label_stack_entry
);
5586 ttl
= (lse
& MPLS_LS_TTL_MASK
) >> MPLS_LS_TTL_SHIFT
;
5590 lse
&= ~MPLS_LS_TTL_MASK
;
5591 lse
|= ttl
<< MPLS_LS_TTL_SHIFT
;
5593 return skb_mpls_update_lse(skb
, cpu_to_be32(lse
));
5595 EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl
);
5598 * alloc_skb_with_frags - allocate skb with page frags
5600 * @header_len: size of linear part
5601 * @data_len: needed length in frags
5602 * @max_page_order: max page order desired.
5603 * @errcode: pointer to error code if any
5604 * @gfp_mask: allocation mask
5606 * This can be used to allocate a paged skb, given a maximal order for frags.
5608 struct sk_buff
*alloc_skb_with_frags(unsigned long header_len
,
5609 unsigned long data_len
,
5614 int npages
= (data_len
+ (PAGE_SIZE
- 1)) >> PAGE_SHIFT
;
5615 unsigned long chunk
;
5616 struct sk_buff
*skb
;
5620 *errcode
= -EMSGSIZE
;
5621 /* Note this test could be relaxed, if we succeed to allocate
5622 * high order pages...
5624 if (npages
> MAX_SKB_FRAGS
)
5627 *errcode
= -ENOBUFS
;
5628 skb
= alloc_skb(header_len
, gfp_mask
);
5632 skb
->truesize
+= npages
<< PAGE_SHIFT
;
5634 for (i
= 0; npages
> 0; i
++) {
5635 int order
= max_page_order
;
5638 if (npages
>= 1 << order
) {
5639 page
= alloc_pages((gfp_mask
& ~__GFP_DIRECT_RECLAIM
) |
5645 /* Do not retry other high order allocations */
5651 page
= alloc_page(gfp_mask
);
5655 chunk
= min_t(unsigned long, data_len
,
5656 PAGE_SIZE
<< order
);
5657 skb_fill_page_desc(skb
, i
, page
, 0, chunk
);
5659 npages
-= 1 << order
;
5667 EXPORT_SYMBOL(alloc_skb_with_frags
);
5669 /* carve out the first off bytes from skb when off < headlen */
5670 static int pskb_carve_inside_header(struct sk_buff
*skb
, const u32 off
,
5671 const int headlen
, gfp_t gfp_mask
)
5674 int size
= skb_end_offset(skb
);
5675 int new_hlen
= headlen
- off
;
5678 size
= SKB_DATA_ALIGN(size
);
5680 if (skb_pfmemalloc(skb
))
5681 gfp_mask
|= __GFP_MEMALLOC
;
5682 data
= kmalloc_reserve(size
+
5683 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)),
5684 gfp_mask
, NUMA_NO_NODE
, NULL
);
5688 size
= SKB_WITH_OVERHEAD(ksize(data
));
5690 /* Copy real data, and all frags */
5691 skb_copy_from_linear_data_offset(skb
, off
, data
, new_hlen
);
5694 memcpy((struct skb_shared_info
*)(data
+ size
),
5696 offsetof(struct skb_shared_info
,
5697 frags
[skb_shinfo(skb
)->nr_frags
]));
5698 if (skb_cloned(skb
)) {
5699 /* drop the old head gracefully */
5700 if (skb_orphan_frags(skb
, gfp_mask
)) {
5704 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++)
5705 skb_frag_ref(skb
, i
);
5706 if (skb_has_frag_list(skb
))
5707 skb_clone_fraglist(skb
);
5708 skb_release_data(skb
);
5710 /* we can reuse existing recount- all we did was
5719 #ifdef NET_SKBUFF_DATA_USES_OFFSET
5722 skb
->end
= skb
->head
+ size
;
5724 skb_set_tail_pointer(skb
, skb_headlen(skb
));
5725 skb_headers_offset_update(skb
, 0);
5729 atomic_set(&skb_shinfo(skb
)->dataref
, 1);
5734 static int pskb_carve(struct sk_buff
*skb
, const u32 off
, gfp_t gfp
);
5736 /* carve out the first eat bytes from skb's frag_list. May recurse into
5739 static int pskb_carve_frag_list(struct sk_buff
*skb
,
5740 struct skb_shared_info
*shinfo
, int eat
,
5743 struct sk_buff
*list
= shinfo
->frag_list
;
5744 struct sk_buff
*clone
= NULL
;
5745 struct sk_buff
*insp
= NULL
;
5749 pr_err("Not enough bytes to eat. Want %d\n", eat
);
5752 if (list
->len
<= eat
) {
5753 /* Eaten as whole. */
5758 /* Eaten partially. */
5759 if (skb_shared(list
)) {
5760 clone
= skb_clone(list
, gfp_mask
);
5766 /* This may be pulled without problems. */
5769 if (pskb_carve(list
, eat
, gfp_mask
) < 0) {
5777 /* Free pulled out fragments. */
5778 while ((list
= shinfo
->frag_list
) != insp
) {
5779 shinfo
->frag_list
= list
->next
;
5782 /* And insert new clone at head. */
5785 shinfo
->frag_list
= clone
;
5790 /* carve off first len bytes from skb. Split line (off) is in the
5791 * non-linear part of skb
5793 static int pskb_carve_inside_nonlinear(struct sk_buff
*skb
, const u32 off
,
5794 int pos
, gfp_t gfp_mask
)
5797 int size
= skb_end_offset(skb
);
5799 const int nfrags
= skb_shinfo(skb
)->nr_frags
;
5800 struct skb_shared_info
*shinfo
;
5802 size
= SKB_DATA_ALIGN(size
);
5804 if (skb_pfmemalloc(skb
))
5805 gfp_mask
|= __GFP_MEMALLOC
;
5806 data
= kmalloc_reserve(size
+
5807 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)),
5808 gfp_mask
, NUMA_NO_NODE
, NULL
);
5812 size
= SKB_WITH_OVERHEAD(ksize(data
));
5814 memcpy((struct skb_shared_info
*)(data
+ size
),
5815 skb_shinfo(skb
), offsetof(struct skb_shared_info
,
5816 frags
[skb_shinfo(skb
)->nr_frags
]));
5817 if (skb_orphan_frags(skb
, gfp_mask
)) {
5821 shinfo
= (struct skb_shared_info
*)(data
+ size
);
5822 for (i
= 0; i
< nfrags
; i
++) {
5823 int fsize
= skb_frag_size(&skb_shinfo(skb
)->frags
[i
]);
5825 if (pos
+ fsize
> off
) {
5826 shinfo
->frags
[k
] = skb_shinfo(skb
)->frags
[i
];
5830 * We have two variants in this case:
5831 * 1. Move all the frag to the second
5832 * part, if it is possible. F.e.
5833 * this approach is mandatory for TUX,
5834 * where splitting is expensive.
5835 * 2. Split is accurately. We make this.
5837 shinfo
->frags
[0].page_offset
+= off
- pos
;
5838 skb_frag_size_sub(&shinfo
->frags
[0], off
- pos
);
5840 skb_frag_ref(skb
, i
);
5845 shinfo
->nr_frags
= k
;
5846 if (skb_has_frag_list(skb
))
5847 skb_clone_fraglist(skb
);
5850 /* split line is in frag list */
5851 pskb_carve_frag_list(skb
, shinfo
, off
- pos
, gfp_mask
);
5853 skb_release_data(skb
);
5858 #ifdef NET_SKBUFF_DATA_USES_OFFSET
5861 skb
->end
= skb
->head
+ size
;
5863 skb_reset_tail_pointer(skb
);
5864 skb_headers_offset_update(skb
, 0);
5869 skb
->data_len
= skb
->len
;
5870 atomic_set(&skb_shinfo(skb
)->dataref
, 1);
5874 /* remove len bytes from the beginning of the skb */
5875 static int pskb_carve(struct sk_buff
*skb
, const u32 len
, gfp_t gfp
)
5877 int headlen
= skb_headlen(skb
);
5880 return pskb_carve_inside_header(skb
, len
, headlen
, gfp
);
5882 return pskb_carve_inside_nonlinear(skb
, len
, headlen
, gfp
);
5885 /* Extract to_copy bytes starting at off from skb, and return this in
5888 struct sk_buff
*pskb_extract(struct sk_buff
*skb
, int off
,
5889 int to_copy
, gfp_t gfp
)
5891 struct sk_buff
*clone
= skb_clone(skb
, gfp
);
5896 if (pskb_carve(clone
, off
, gfp
) < 0 ||
5897 pskb_trim(clone
, to_copy
)) {
5903 EXPORT_SYMBOL(pskb_extract
);
5906 * skb_condense - try to get rid of fragments/frag_list if possible
5909 * Can be used to save memory before skb is added to a busy queue.
5910 * If packet has bytes in frags and enough tail room in skb->head,
5911 * pull all of them, so that we can free the frags right now and adjust
5914 * We do not reallocate skb->head thus can not fail.
5915 * Caller must re-evaluate skb->truesize if needed.
5917 void skb_condense(struct sk_buff
*skb
)
5919 if (skb
->data_len
) {
5920 if (skb
->data_len
> skb
->end
- skb
->tail
||
5924 /* Nice, we can free page frag(s) right now */
5925 __pskb_pull_tail(skb
, skb
->data_len
);
5927 /* At this point, skb->truesize might be over estimated,
5928 * because skb had a fragment, and fragments do not tell
5930 * When we pulled its content into skb->head, fragment
5931 * was freed, but __pskb_pull_tail() could not possibly
5932 * adjust skb->truesize, not knowing the frag truesize.
5934 skb
->truesize
= SKB_TRUESIZE(skb_end_offset(skb
));
5937 #ifdef CONFIG_SKB_EXTENSIONS
5938 static void *skb_ext_get_ptr(struct skb_ext
*ext
, enum skb_ext_id id
)
5940 return (void *)ext
+ (ext
->offset
[id
] * SKB_EXT_ALIGN_VALUE
);
5943 static struct skb_ext
*skb_ext_alloc(void)
5945 struct skb_ext
*new = kmem_cache_alloc(skbuff_ext_cache
, GFP_ATOMIC
);
5948 memset(new->offset
, 0, sizeof(new->offset
));
5949 refcount_set(&new->refcnt
, 1);
5955 static struct skb_ext
*skb_ext_maybe_cow(struct skb_ext
*old
,
5956 unsigned int old_active
)
5958 struct skb_ext
*new;
5960 if (refcount_read(&old
->refcnt
) == 1)
5963 new = kmem_cache_alloc(skbuff_ext_cache
, GFP_ATOMIC
);
5967 memcpy(new, old
, old
->chunks
* SKB_EXT_ALIGN_VALUE
);
5968 refcount_set(&new->refcnt
, 1);
5971 if (old_active
& (1 << SKB_EXT_SEC_PATH
)) {
5972 struct sec_path
*sp
= skb_ext_get_ptr(old
, SKB_EXT_SEC_PATH
);
5975 for (i
= 0; i
< sp
->len
; i
++)
5976 xfrm_state_hold(sp
->xvec
[i
]);
5984 * skb_ext_add - allocate space for given extension, COW if needed
5986 * @id: extension to allocate space for
5988 * Allocates enough space for the given extension.
5989 * If the extension is already present, a pointer to that extension
5992 * If the skb was cloned, COW applies and the returned memory can be
5993 * modified without changing the extension space of clones buffers.
5995 * Returns pointer to the extension or NULL on allocation failure.
5997 void *skb_ext_add(struct sk_buff
*skb
, enum skb_ext_id id
)
5999 struct skb_ext
*new, *old
= NULL
;
6000 unsigned int newlen
, newoff
;
6002 if (skb
->active_extensions
) {
6003 old
= skb
->extensions
;
6005 new = skb_ext_maybe_cow(old
, skb
->active_extensions
);
6009 if (__skb_ext_exist(new, id
))
6012 newoff
= new->chunks
;
6014 newoff
= SKB_EXT_CHUNKSIZEOF(*new);
6016 new = skb_ext_alloc();
6021 newlen
= newoff
+ skb_ext_type_len
[id
];
6022 new->chunks
= newlen
;
6023 new->offset
[id
] = newoff
;
6025 skb
->extensions
= new;
6026 skb
->active_extensions
|= 1 << id
;
6027 return skb_ext_get_ptr(new, id
);
6029 EXPORT_SYMBOL(skb_ext_add
);
6032 static void skb_ext_put_sp(struct sec_path
*sp
)
6036 for (i
= 0; i
< sp
->len
; i
++)
6037 xfrm_state_put(sp
->xvec
[i
]);
6041 void __skb_ext_del(struct sk_buff
*skb
, enum skb_ext_id id
)
6043 struct skb_ext
*ext
= skb
->extensions
;
6045 skb
->active_extensions
&= ~(1 << id
);
6046 if (skb
->active_extensions
== 0) {
6047 skb
->extensions
= NULL
;
6050 } else if (id
== SKB_EXT_SEC_PATH
&&
6051 refcount_read(&ext
->refcnt
) == 1) {
6052 struct sec_path
*sp
= skb_ext_get_ptr(ext
, SKB_EXT_SEC_PATH
);
6059 EXPORT_SYMBOL(__skb_ext_del
);
6061 void __skb_ext_put(struct skb_ext
*ext
)
6063 /* If this is last clone, nothing can increment
6064 * it after check passes. Avoids one atomic op.
6066 if (refcount_read(&ext
->refcnt
) == 1)
6069 if (!refcount_dec_and_test(&ext
->refcnt
))
6073 if (__skb_ext_exist(ext
, SKB_EXT_SEC_PATH
))
6074 skb_ext_put_sp(skb_ext_get_ptr(ext
, SKB_EXT_SEC_PATH
));
6077 kmem_cache_free(skbuff_ext_cache
, ext
);
6079 EXPORT_SYMBOL(__skb_ext_put
);
6080 #endif /* CONFIG_SKB_EXTENSIONS */