]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - net/core/skbuff.c
net: gso: Fix skb_segment splat when splitting gso_size mangled skb having linear...
[mirror_ubuntu-eoan-kernel.git] / net / core / skbuff.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * Routines having to do with the 'struct sk_buff' memory handlers.
4 *
113aa838 5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
6 * Florian La Roche <rzsfl@rz.uni-sb.de>
7 *
1da177e4
LT
8 * Fixes:
9 * Alan Cox : Fixed the worst of the load
10 * balancer bugs.
11 * Dave Platt : Interrupt stacking fix.
12 * Richard Kooijman : Timestamp fixes.
13 * Alan Cox : Changed buffer format.
14 * Alan Cox : destructor hook for AF_UNIX etc.
15 * Linus Torvalds : Better skb_clone.
16 * Alan Cox : Added skb_copy.
17 * Alan Cox : Added all the changed routines Linus
18 * only put in the headers
19 * Ray VanTassle : Fixed --skb->lock in free
20 * Alan Cox : skb_copy copy arp field
21 * Andi Kleen : slabified it.
22 * Robert Olsson : Removed skb_head_pool
23 *
24 * NOTE:
25 * The __skb_ routines should be called with interrupts
26 * disabled, or you better be *real* sure that the operation is atomic
27 * with respect to whatever list is being frobbed (e.g. via lock_sock()
28 * or via disabling bottom half handlers, etc).
1da177e4
LT
29 */
30
31/*
32 * The functions in this file will not compile correctly with gcc 2.4.x
33 */
34
e005d193
JP
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
1da177e4
LT
37#include <linux/module.h>
38#include <linux/types.h>
39#include <linux/kernel.h>
1da177e4
LT
40#include <linux/mm.h>
41#include <linux/interrupt.h>
42#include <linux/in.h>
43#include <linux/inet.h>
44#include <linux/slab.h>
de960aa9
FW
45#include <linux/tcp.h>
46#include <linux/udp.h>
90017acc 47#include <linux/sctp.h>
1da177e4
LT
48#include <linux/netdevice.h>
49#ifdef CONFIG_NET_CLS_ACT
50#include <net/pkt_sched.h>
51#endif
52#include <linux/string.h>
53#include <linux/skbuff.h>
9c55e01c 54#include <linux/splice.h>
1da177e4
LT
55#include <linux/cache.h>
56#include <linux/rtnetlink.h>
57#include <linux/init.h>
716ea3a7 58#include <linux/scatterlist.h>
ac45f602 59#include <linux/errqueue.h>
268bb0ce 60#include <linux/prefetch.h>
0d5501c1 61#include <linux/if_vlan.h>
2a2ea508 62#include <linux/mpls.h>
1da177e4
LT
63
64#include <net/protocol.h>
65#include <net/dst.h>
66#include <net/sock.h>
67#include <net/checksum.h>
ed1f50c3 68#include <net/ip6_checksum.h>
1da177e4 69#include <net/xfrm.h>
8822e270 70#include <net/mpls.h>
1da177e4 71
7c0f6ba6 72#include <linux/uaccess.h>
ad8d75ff 73#include <trace/events/skb.h>
51c56b00 74#include <linux/highmem.h>
b245be1f
WB
75#include <linux/capability.h>
76#include <linux/user_namespace.h>
2544af03 77#include <linux/indirect_call_wrapper.h>
a1f8e7f7 78
7b7ed885
BVA
79#include "datagram.h"
80
08009a76
AD
81struct kmem_cache *skbuff_head_cache __ro_after_init;
82static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
df5042f4
FW
83#ifdef CONFIG_SKB_EXTENSIONS
84static struct kmem_cache *skbuff_ext_cache __ro_after_init;
85#endif
5f74f82e
HWR
86int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
87EXPORT_SYMBOL(sysctl_max_skb_frags);
1da177e4 88
1da177e4 89/**
f05de73b
JS
90 * skb_panic - private function for out-of-line support
91 * @skb: buffer
92 * @sz: size
93 * @addr: address
99d5851e 94 * @msg: skb_over_panic or skb_under_panic
1da177e4 95 *
f05de73b
JS
96 * Out-of-line support for skb_put() and skb_push().
97 * Called via the wrapper skb_over_panic() or skb_under_panic().
98 * Keep out of line to prevent kernel bloat.
99 * __builtin_return_address is not used because it is not always reliable.
1da177e4 100 */
f05de73b 101static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
99d5851e 102 const char msg[])
1da177e4 103{
e005d193 104 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
99d5851e 105 msg, addr, skb->len, sz, skb->head, skb->data,
e005d193
JP
106 (unsigned long)skb->tail, (unsigned long)skb->end,
107 skb->dev ? skb->dev->name : "<NULL>");
1da177e4
LT
108 BUG();
109}
110
f05de73b 111static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
1da177e4 112{
f05de73b 113 skb_panic(skb, sz, addr, __func__);
1da177e4
LT
114}
115
f05de73b
JS
116static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
117{
118 skb_panic(skb, sz, addr, __func__);
119}
c93bdd0e
MG
120
121/*
122 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
123 * the caller if emergency pfmemalloc reserves are being used. If it is and
124 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
125 * may be used. Otherwise, the packet data may be discarded until enough
126 * memory is free
127 */
128#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
129 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
61c5e88a 130
131static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
132 unsigned long ip, bool *pfmemalloc)
c93bdd0e
MG
133{
134 void *obj;
135 bool ret_pfmemalloc = false;
136
137 /*
138 * Try a regular allocation, when that fails and we're not entitled
139 * to the reserves, fail.
140 */
141 obj = kmalloc_node_track_caller(size,
142 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
143 node);
144 if (obj || !(gfp_pfmemalloc_allowed(flags)))
145 goto out;
146
147 /* Try again but now we are using pfmemalloc reserves */
148 ret_pfmemalloc = true;
149 obj = kmalloc_node_track_caller(size, flags, node);
150
151out:
152 if (pfmemalloc)
153 *pfmemalloc = ret_pfmemalloc;
154
155 return obj;
156}
157
1da177e4
LT
158/* Allocate a new skbuff. We do this ourselves so we can fill in a few
159 * 'private' fields and also do memory statistics to find all the
160 * [BEEP] leaks.
161 *
162 */
163
164/**
d179cd12 165 * __alloc_skb - allocate a network buffer
1da177e4
LT
166 * @size: size to allocate
167 * @gfp_mask: allocation mask
c93bdd0e
MG
168 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
169 * instead of head cache and allocate a cloned (child) skb.
170 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
171 * allocations in case the data is required for writeback
b30973f8 172 * @node: numa node to allocate memory on
1da177e4
LT
173 *
174 * Allocate a new &sk_buff. The returned buffer has no headroom and a
94b6042c
BH
175 * tail room of at least size bytes. The object has a reference count
176 * of one. The return is the buffer. On a failure the return is %NULL.
1da177e4
LT
177 *
178 * Buffers may only be allocated from interrupts using a @gfp_mask of
179 * %GFP_ATOMIC.
180 */
dd0fc66f 181struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
c93bdd0e 182 int flags, int node)
1da177e4 183{
e18b890b 184 struct kmem_cache *cache;
4947d3ef 185 struct skb_shared_info *shinfo;
1da177e4
LT
186 struct sk_buff *skb;
187 u8 *data;
c93bdd0e 188 bool pfmemalloc;
1da177e4 189
c93bdd0e
MG
190 cache = (flags & SKB_ALLOC_FCLONE)
191 ? skbuff_fclone_cache : skbuff_head_cache;
192
193 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
194 gfp_mask |= __GFP_MEMALLOC;
8798b3fb 195
1da177e4 196 /* Get the HEAD */
b30973f8 197 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
1da177e4
LT
198 if (!skb)
199 goto out;
ec7d2f2c 200 prefetchw(skb);
1da177e4 201
87fb4b7b
ED
202 /* We do our best to align skb_shared_info on a separate cache
203 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
204 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
205 * Both skb->head and skb_shared_info are cache line aligned.
206 */
bc417e30 207 size = SKB_DATA_ALIGN(size);
87fb4b7b 208 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
c93bdd0e 209 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
1da177e4
LT
210 if (!data)
211 goto nodata;
87fb4b7b
ED
212 /* kmalloc(size) might give us more room than requested.
213 * Put skb_shared_info exactly at the end of allocated zone,
214 * to allow max possible filling before reallocation.
215 */
216 size = SKB_WITH_OVERHEAD(ksize(data));
ec7d2f2c 217 prefetchw(data + size);
1da177e4 218
ca0605a7 219 /*
c8005785
JB
220 * Only clear those fields we need to clear, not those that we will
221 * actually initialise below. Hence, don't put any more fields after
222 * the tail pointer in struct sk_buff!
ca0605a7
ACM
223 */
224 memset(skb, 0, offsetof(struct sk_buff, tail));
87fb4b7b
ED
225 /* Account for allocated memory : skb + skb->head */
226 skb->truesize = SKB_TRUESIZE(size);
c93bdd0e 227 skb->pfmemalloc = pfmemalloc;
63354797 228 refcount_set(&skb->users, 1);
1da177e4
LT
229 skb->head = data;
230 skb->data = data;
27a884dc 231 skb_reset_tail_pointer(skb);
4305b541 232 skb->end = skb->tail + size;
35d04610
CW
233 skb->mac_header = (typeof(skb->mac_header))~0U;
234 skb->transport_header = (typeof(skb->transport_header))~0U;
19633e12 235
4947d3ef
BL
236 /* make sure we initialize shinfo sequentially */
237 shinfo = skb_shinfo(skb);
ec7d2f2c 238 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
4947d3ef 239 atomic_set(&shinfo->dataref, 1);
4947d3ef 240
c93bdd0e 241 if (flags & SKB_ALLOC_FCLONE) {
d0bf4a9e 242 struct sk_buff_fclones *fclones;
1da177e4 243
d0bf4a9e
ED
244 fclones = container_of(skb, struct sk_buff_fclones, skb1);
245
d179cd12 246 skb->fclone = SKB_FCLONE_ORIG;
2638595a 247 refcount_set(&fclones->fclone_ref, 1);
d179cd12 248
6ffe75eb 249 fclones->skb2.fclone = SKB_FCLONE_CLONE;
d179cd12 250 }
1da177e4
LT
251out:
252 return skb;
253nodata:
8798b3fb 254 kmem_cache_free(cache, skb);
1da177e4
LT
255 skb = NULL;
256 goto out;
1da177e4 257}
b4ac530f 258EXPORT_SYMBOL(__alloc_skb);
1da177e4 259
ba0509b6
JDB
260/* Caller must provide SKB that is memset cleared */
261static struct sk_buff *__build_skb_around(struct sk_buff *skb,
262 void *data, unsigned int frag_size)
263{
264 struct skb_shared_info *shinfo;
265 unsigned int size = frag_size ? : ksize(data);
266
267 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
268
269 /* Assumes caller memset cleared SKB */
270 skb->truesize = SKB_TRUESIZE(size);
271 refcount_set(&skb->users, 1);
272 skb->head = data;
273 skb->data = data;
274 skb_reset_tail_pointer(skb);
275 skb->end = skb->tail + size;
276 skb->mac_header = (typeof(skb->mac_header))~0U;
277 skb->transport_header = (typeof(skb->transport_header))~0U;
278
279 /* make sure we initialize shinfo sequentially */
280 shinfo = skb_shinfo(skb);
281 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
282 atomic_set(&shinfo->dataref, 1);
283
284 return skb;
285}
286
b2b5ce9d 287/**
2ea2f62c 288 * __build_skb - build a network buffer
b2b5ce9d 289 * @data: data buffer provided by caller
2ea2f62c 290 * @frag_size: size of data, or 0 if head was kmalloced
b2b5ce9d
ED
291 *
292 * Allocate a new &sk_buff. Caller provides space holding head and
deceb4c0 293 * skb_shared_info. @data must have been allocated by kmalloc() only if
2ea2f62c
ED
294 * @frag_size is 0, otherwise data should come from the page allocator
295 * or vmalloc()
b2b5ce9d
ED
296 * The return is the new skb buffer.
297 * On a failure the return is %NULL, and @data is not freed.
298 * Notes :
299 * Before IO, driver allocates only data buffer where NIC put incoming frame
300 * Driver should add room at head (NET_SKB_PAD) and
301 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
302 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
303 * before giving packet to stack.
304 * RX rings only contains data buffers, not full skbs.
305 */
2ea2f62c 306struct sk_buff *__build_skb(void *data, unsigned int frag_size)
b2b5ce9d 307{
b2b5ce9d 308 struct sk_buff *skb;
b2b5ce9d
ED
309
310 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
ba0509b6 311 if (unlikely(!skb))
b2b5ce9d
ED
312 return NULL;
313
b2b5ce9d 314 memset(skb, 0, offsetof(struct sk_buff, tail));
b2b5ce9d 315
ba0509b6 316 return __build_skb_around(skb, data, frag_size);
b2b5ce9d 317}
2ea2f62c
ED
318
319/* build_skb() is wrapper over __build_skb(), that specifically
320 * takes care of skb->head and skb->pfmemalloc
321 * This means that if @frag_size is not zero, then @data must be backed
322 * by a page fragment, not kmalloc() or vmalloc()
323 */
324struct sk_buff *build_skb(void *data, unsigned int frag_size)
325{
326 struct sk_buff *skb = __build_skb(data, frag_size);
327
328 if (skb && frag_size) {
329 skb->head_frag = 1;
2f064f34 330 if (page_is_pfmemalloc(virt_to_head_page(data)))
2ea2f62c
ED
331 skb->pfmemalloc = 1;
332 }
333 return skb;
334}
b2b5ce9d
ED
335EXPORT_SYMBOL(build_skb);
336
ba0509b6
JDB
337/**
338 * build_skb_around - build a network buffer around provided skb
339 * @skb: sk_buff provide by caller, must be memset cleared
340 * @data: data buffer provided by caller
341 * @frag_size: size of data, or 0 if head was kmalloced
342 */
343struct sk_buff *build_skb_around(struct sk_buff *skb,
344 void *data, unsigned int frag_size)
345{
346 if (unlikely(!skb))
347 return NULL;
348
349 skb = __build_skb_around(skb, data, frag_size);
350
351 if (skb && frag_size) {
352 skb->head_frag = 1;
353 if (page_is_pfmemalloc(virt_to_head_page(data)))
354 skb->pfmemalloc = 1;
355 }
356 return skb;
357}
358EXPORT_SYMBOL(build_skb_around);
359
795bb1c0
JDB
360#define NAPI_SKB_CACHE_SIZE 64
361
362struct napi_alloc_cache {
363 struct page_frag_cache page;
e0d7924a 364 unsigned int skb_count;
795bb1c0
JDB
365 void *skb_cache[NAPI_SKB_CACHE_SIZE];
366};
367
b63ae8ca 368static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
795bb1c0 369static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
ffde7328 370
7ba7aeab 371static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
ffde7328 372{
7ba7aeab 373 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
ffde7328 374
7ba7aeab
SAS
375 return page_frag_alloc(&nc->page, fragsz, gfp_mask);
376}
377
378void *napi_alloc_frag(unsigned int fragsz)
379{
380 fragsz = SKB_DATA_ALIGN(fragsz);
381
382 return __napi_alloc_frag(fragsz, GFP_ATOMIC);
6f532612 383}
7ba7aeab 384EXPORT_SYMBOL(napi_alloc_frag);
c93bdd0e
MG
385
386/**
387 * netdev_alloc_frag - allocate a page fragment
388 * @fragsz: fragment size
389 *
390 * Allocates a frag from a page for receive buffer.
391 * Uses GFP_ATOMIC allocations.
392 */
393void *netdev_alloc_frag(unsigned int fragsz)
394{
7ba7aeab
SAS
395 struct page_frag_cache *nc;
396 void *data;
ffde7328 397
3bed3cc4 398 fragsz = SKB_DATA_ALIGN(fragsz);
7ba7aeab
SAS
399 if (in_irq() || irqs_disabled()) {
400 nc = this_cpu_ptr(&netdev_alloc_cache);
401 data = page_frag_alloc(nc, fragsz, GFP_ATOMIC);
402 } else {
403 local_bh_disable();
404 data = __napi_alloc_frag(fragsz, GFP_ATOMIC);
405 local_bh_enable();
406 }
407 return data;
ffde7328 408}
7ba7aeab 409EXPORT_SYMBOL(netdev_alloc_frag);
ffde7328 410
fd11a83d
AD
411/**
412 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
413 * @dev: network device to receive on
d7499160 414 * @len: length to allocate
fd11a83d
AD
415 * @gfp_mask: get_free_pages mask, passed to alloc_skb
416 *
417 * Allocate a new &sk_buff and assign it a usage count of one. The
418 * buffer has NET_SKB_PAD headroom built in. Users should allocate
419 * the headroom they think they need without accounting for the
420 * built in space. The built in space is used for optimisations.
421 *
422 * %NULL is returned if there is no free memory.
423 */
9451980a
AD
424struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
425 gfp_t gfp_mask)
fd11a83d 426{
b63ae8ca 427 struct page_frag_cache *nc;
fd11a83d 428 struct sk_buff *skb;
9451980a
AD
429 bool pfmemalloc;
430 void *data;
431
432 len += NET_SKB_PAD;
fd11a83d 433
9451980a 434 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
d0164adc 435 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
a080e7bd
AD
436 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
437 if (!skb)
438 goto skb_fail;
439 goto skb_success;
440 }
fd11a83d 441
9451980a
AD
442 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
443 len = SKB_DATA_ALIGN(len);
444
445 if (sk_memalloc_socks())
446 gfp_mask |= __GFP_MEMALLOC;
447
92dcabd7
SAS
448 if (in_irq() || irqs_disabled()) {
449 nc = this_cpu_ptr(&netdev_alloc_cache);
450 data = page_frag_alloc(nc, len, gfp_mask);
451 pfmemalloc = nc->pfmemalloc;
452 } else {
453 local_bh_disable();
454 nc = this_cpu_ptr(&napi_alloc_cache.page);
455 data = page_frag_alloc(nc, len, gfp_mask);
456 pfmemalloc = nc->pfmemalloc;
457 local_bh_enable();
458 }
9451980a
AD
459
460 if (unlikely(!data))
461 return NULL;
462
463 skb = __build_skb(data, len);
464 if (unlikely(!skb)) {
181edb2b 465 skb_free_frag(data);
9451980a 466 return NULL;
7b2e497a 467 }
fd11a83d 468
9451980a
AD
469 /* use OR instead of assignment to avoid clearing of bits in mask */
470 if (pfmemalloc)
471 skb->pfmemalloc = 1;
472 skb->head_frag = 1;
473
a080e7bd 474skb_success:
9451980a
AD
475 skb_reserve(skb, NET_SKB_PAD);
476 skb->dev = dev;
477
a080e7bd 478skb_fail:
8af27456
CH
479 return skb;
480}
b4ac530f 481EXPORT_SYMBOL(__netdev_alloc_skb);
1da177e4 482
fd11a83d
AD
483/**
484 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
485 * @napi: napi instance this buffer was allocated for
d7499160 486 * @len: length to allocate
fd11a83d
AD
487 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
488 *
489 * Allocate a new sk_buff for use in NAPI receive. This buffer will
490 * attempt to allocate the head from a special reserved region used
491 * only for NAPI Rx allocation. By doing this we can save several
492 * CPU cycles by avoiding having to disable and re-enable IRQs.
493 *
494 * %NULL is returned if there is no free memory.
495 */
9451980a
AD
496struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
497 gfp_t gfp_mask)
fd11a83d 498{
795bb1c0 499 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
fd11a83d 500 struct sk_buff *skb;
9451980a
AD
501 void *data;
502
503 len += NET_SKB_PAD + NET_IP_ALIGN;
fd11a83d 504
9451980a 505 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
d0164adc 506 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
a080e7bd
AD
507 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
508 if (!skb)
509 goto skb_fail;
510 goto skb_success;
511 }
9451980a
AD
512
513 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
514 len = SKB_DATA_ALIGN(len);
515
516 if (sk_memalloc_socks())
517 gfp_mask |= __GFP_MEMALLOC;
fd11a83d 518
8c2dd3e4 519 data = page_frag_alloc(&nc->page, len, gfp_mask);
9451980a
AD
520 if (unlikely(!data))
521 return NULL;
522
523 skb = __build_skb(data, len);
524 if (unlikely(!skb)) {
181edb2b 525 skb_free_frag(data);
9451980a 526 return NULL;
fd11a83d
AD
527 }
528
9451980a 529 /* use OR instead of assignment to avoid clearing of bits in mask */
795bb1c0 530 if (nc->page.pfmemalloc)
9451980a
AD
531 skb->pfmemalloc = 1;
532 skb->head_frag = 1;
533
a080e7bd 534skb_success:
9451980a
AD
535 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
536 skb->dev = napi->dev;
537
a080e7bd 538skb_fail:
fd11a83d
AD
539 return skb;
540}
541EXPORT_SYMBOL(__napi_alloc_skb);
542
654bed16 543void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
50269e19 544 int size, unsigned int truesize)
654bed16
PZ
545{
546 skb_fill_page_desc(skb, i, page, off, size);
547 skb->len += size;
548 skb->data_len += size;
50269e19 549 skb->truesize += truesize;
654bed16
PZ
550}
551EXPORT_SYMBOL(skb_add_rx_frag);
552
f8e617e1
JW
553void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
554 unsigned int truesize)
555{
556 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
557
558 skb_frag_size_add(frag, size);
559 skb->len += size;
560 skb->data_len += size;
561 skb->truesize += truesize;
562}
563EXPORT_SYMBOL(skb_coalesce_rx_frag);
564
27b437c8 565static void skb_drop_list(struct sk_buff **listp)
1da177e4 566{
bd8a7036 567 kfree_skb_list(*listp);
27b437c8 568 *listp = NULL;
1da177e4
LT
569}
570
27b437c8
HX
571static inline void skb_drop_fraglist(struct sk_buff *skb)
572{
573 skb_drop_list(&skb_shinfo(skb)->frag_list);
574}
575
1da177e4
LT
576static void skb_clone_fraglist(struct sk_buff *skb)
577{
578 struct sk_buff *list;
579
fbb398a8 580 skb_walk_frags(skb, list)
1da177e4
LT
581 skb_get(list);
582}
583
d3836f21
ED
584static void skb_free_head(struct sk_buff *skb)
585{
181edb2b
AD
586 unsigned char *head = skb->head;
587
d3836f21 588 if (skb->head_frag)
181edb2b 589 skb_free_frag(head);
d3836f21 590 else
181edb2b 591 kfree(head);
d3836f21
ED
592}
593
5bba1712 594static void skb_release_data(struct sk_buff *skb)
1da177e4 595{
ff04a771
ED
596 struct skb_shared_info *shinfo = skb_shinfo(skb);
597 int i;
1da177e4 598
ff04a771
ED
599 if (skb->cloned &&
600 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
601 &shinfo->dataref))
602 return;
a6686f2f 603
ff04a771
ED
604 for (i = 0; i < shinfo->nr_frags; i++)
605 __skb_frag_unref(&shinfo->frags[i]);
a6686f2f 606
ff04a771
ED
607 if (shinfo->frag_list)
608 kfree_skb_list(shinfo->frag_list);
609
1f8b977a 610 skb_zcopy_clear(skb, true);
ff04a771 611 skb_free_head(skb);
1da177e4
LT
612}
613
614/*
615 * Free an skbuff by memory without cleaning the state.
616 */
2d4baff8 617static void kfree_skbmem(struct sk_buff *skb)
1da177e4 618{
d0bf4a9e 619 struct sk_buff_fclones *fclones;
d179cd12 620
d179cd12
DM
621 switch (skb->fclone) {
622 case SKB_FCLONE_UNAVAILABLE:
623 kmem_cache_free(skbuff_head_cache, skb);
6ffe75eb 624 return;
d179cd12
DM
625
626 case SKB_FCLONE_ORIG:
d0bf4a9e 627 fclones = container_of(skb, struct sk_buff_fclones, skb1);
d179cd12 628
6ffe75eb
ED
629 /* We usually free the clone (TX completion) before original skb
630 * This test would have no chance to be true for the clone,
631 * while here, branch prediction will be good.
d179cd12 632 */
2638595a 633 if (refcount_read(&fclones->fclone_ref) == 1)
6ffe75eb
ED
634 goto fastpath;
635 break;
e7820e39 636
6ffe75eb
ED
637 default: /* SKB_FCLONE_CLONE */
638 fclones = container_of(skb, struct sk_buff_fclones, skb2);
d179cd12 639 break;
3ff50b79 640 }
2638595a 641 if (!refcount_dec_and_test(&fclones->fclone_ref))
6ffe75eb
ED
642 return;
643fastpath:
644 kmem_cache_free(skbuff_fclone_cache, fclones);
1da177e4
LT
645}
646
0a463c78 647void skb_release_head_state(struct sk_buff *skb)
1da177e4 648{
adf30907 649 skb_dst_drop(skb);
9c2b3328
SH
650 if (skb->destructor) {
651 WARN_ON(in_irq());
1da177e4
LT
652 skb->destructor(skb);
653 }
a3bf7ae9 654#if IS_ENABLED(CONFIG_NF_CONNTRACK)
cb9c6836 655 nf_conntrack_put(skb_nfct(skb));
1da177e4 656#endif
df5042f4 657 skb_ext_put(skb);
04a4bb55
LB
658}
659
660/* Free everything but the sk_buff shell. */
661static void skb_release_all(struct sk_buff *skb)
662{
663 skb_release_head_state(skb);
a28b1b90
FW
664 if (likely(skb->head))
665 skb_release_data(skb);
2d4baff8
HX
666}
667
668/**
669 * __kfree_skb - private function
670 * @skb: buffer
671 *
672 * Free an sk_buff. Release anything attached to the buffer.
673 * Clean the state. This is an internal helper function. Users should
674 * always call kfree_skb
675 */
1da177e4 676
2d4baff8
HX
677void __kfree_skb(struct sk_buff *skb)
678{
679 skb_release_all(skb);
1da177e4
LT
680 kfree_skbmem(skb);
681}
b4ac530f 682EXPORT_SYMBOL(__kfree_skb);
1da177e4 683