]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/ipv4/ip_fragment.c
inet: frags: break the 2GB limit for frags storage
[mirror_ubuntu-bionic-kernel.git] / net / ipv4 / ip_fragment.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The IP fragmentation functionality.
e905a9ed 8 *
1da177e4 9 * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
113aa838 10 * Alan Cox <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
11 *
12 * Fixes:
13 * Alan Cox : Split from ip.c , see ip_input.c for history.
14 * David S. Miller : Begin massive cleanup...
15 * Andi Kleen : Add sysctls.
16 * xxxx : Overlapfrag bug.
17 * Ultima : ip_expire() kernel panic.
18 * Bill Hawes : Frag accounting and evictor fixes.
19 * John McDonald : 0 length frag bug.
20 * Alexey Kuznetsov: SMP races, threading, cleanup.
21 * Patrick McHardy : LRU queue of frag heads for evictor.
22 */
23
afd46503
JP
24#define pr_fmt(fmt) "IPv4: " fmt
25
89cee8b1 26#include <linux/compiler.h>
1da177e4
LT
27#include <linux/module.h>
28#include <linux/types.h>
29#include <linux/mm.h>
30#include <linux/jiffies.h>
31#include <linux/skbuff.h>
32#include <linux/list.h>
33#include <linux/ip.h>
34#include <linux/icmp.h>
35#include <linux/netdevice.h>
36#include <linux/jhash.h>
37#include <linux/random.h>
5a0e3ad6 38#include <linux/slab.h>
e9017b55
SW
39#include <net/route.h>
40#include <net/dst.h>
1da177e4
LT
41#include <net/sock.h>
42#include <net/ip.h>
43#include <net/icmp.h>
44#include <net/checksum.h>
89cee8b1 45#include <net/inetpeer.h>
5ab11c98 46#include <net/inet_frag.h>
1da177e4
LT
47#include <linux/tcp.h>
48#include <linux/udp.h>
49#include <linux/inet.h>
50#include <linux/netfilter_ipv4.h>
6623e3b2 51#include <net/inet_ecn.h>
385add90 52#include <net/l3mdev.h>
1da177e4
LT
53
54/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
55 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
56 * as well. Or notify me, at least. --ANK
57 */
d4ad4d22 58static const char ip_frag_cache_name[] = "ip4-frags";
89cee8b1 59
1da177e4
LT
60struct ipfrag_skb_cb
61{
62 struct inet_skb_parm h;
63 int offset;
64};
65
fd3f8c4c 66#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
1da177e4
LT
67
68/* Describe an entry in the "incomplete datagrams" queue. */
69struct ipq {
5ab11c98
PE
70 struct inet_frag_queue q;
71
6623e3b2 72 u8 ecn; /* RFC3168 support */
d6b915e2 73 u16 max_df_size; /* largest frag with DF set seen */
89cee8b1
HX
74 int iif;
75 unsigned int rid;
76 struct inet_peer *peer;
1da177e4
LT
77};
78
aa1f731e 79static u8 ip4_frag_ecn(u8 tos)
6623e3b2 80{
5173cc05 81 return 1 << (tos & INET_ECN_MASK);
6623e3b2
ED
82}
83
7eb95156 84static struct inet_frags ip4_frags;
1da177e4 85
1706d587
HX
86static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
87 struct net_device *dev);
88
abd6523d 89
36c77782 90static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
c6fda282
PE
91{
92 struct ipq *qp = container_of(q, struct ipq, q);
54db0cc2
G
93 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
94 frags);
95 struct net *net = container_of(ipv4, struct net, ipv4);
96
4e722036 97 const struct frag_v4_compare_key *key = a;
c6fda282 98
4e722036
ED
99 q->key.v4 = *key;
100 qp->ecn = 0;
0fbf4cb2 101 qp->peer = q->net->max_dist ?
4e722036 102 inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
192132b9 103 NULL;
c6fda282
PE
104}
105
aa1f731e 106static void ip4_frag_free(struct inet_frag_queue *q)
1da177e4 107{
1e4b8287
PE
108 struct ipq *qp;
109
110 qp = container_of(q, struct ipq, q);
111 if (qp->peer)
112 inet_putpeer(qp->peer);
1da177e4
LT
113}
114
1da177e4
LT
115
116/* Destruction primitives. */
117
aa1f731e 118static void ipq_put(struct ipq *ipq)
1da177e4 119{
9bdf9ca9 120 inet_frag_put(&ipq->q);
1da177e4
LT
121}
122
123/* Kill ipq entry. It is not destroyed immediately,
124 * because caller (and someone more) holds reference count.
125 */
126static void ipq_kill(struct ipq *ipq)
127{
9bdf9ca9 128 inet_frag_kill(&ipq->q);
1da177e4
LT
129}
130
5cf42280
AZ
131static bool frag_expire_skip_icmp(u32 user)
132{
133 return user == IP_DEFRAG_AF_PACKET ||
134 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN,
8bc04864
AZ
135 __IP_DEFRAG_CONNTRACK_IN_END) ||
136 ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN,
137 __IP_DEFRAG_CONNTRACK_BRIDGE_IN);
5cf42280
AZ
138}
139
1da177e4
LT
140/*
141 * Oops, a fragment queue timed out. Kill it and send an ICMP reply.
142 */
78802011 143static void ip_expire(struct timer_list *t)
1da177e4 144{
78802011 145 struct inet_frag_queue *frag = from_timer(frag, t, timer);
ea41b4d2
ED
146 struct sk_buff *clone, *head;
147 const struct iphdr *iph;
84a3aa00 148 struct net *net;
ea41b4d2
ED
149 struct ipq *qp;
150 int err;
e521db9d 151
78802011 152 qp = container_of(frag, struct ipq, q);
84a3aa00 153 net = container_of(qp->q.net, struct net, ipv4.frags);
1da177e4 154
ec4fbd64 155 rcu_read_lock();
5ab11c98 156 spin_lock(&qp->q.lock);
1da177e4 157
06aa8b8a 158 if (qp->q.flags & INET_FRAG_COMPLETE)
1da177e4
LT
159 goto out;
160
161 ipq_kill(qp);
b45386ef 162 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
1da177e4 163
ea41b4d2 164 head = qp->q.fragments;
cb84663e 165
ea41b4d2 166 __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
2e404f63 167
ea41b4d2
ED
168 if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !head)
169 goto out;
2e404f63 170
ea41b4d2
ED
171 head->dev = dev_get_by_index_rcu(net, qp->iif);
172 if (!head->dev)
173 goto out;
ec4fbd64 174
e9017b55 175
ea41b4d2
ED
176 /* skb has no dst, perform route lookup again */
177 iph = ip_hdr(head);
178 err = ip_route_input_noref(head, iph->daddr, iph->saddr,
c6cffba4 179 iph->tos, head->dev);
ea41b4d2
ED
180 if (err)
181 goto out;
182
183 /* Only an end host needs to send an ICMP
184 * "Fragment Reassembly Timeout" message, per RFC792.
185 */
186 if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
187 (skb_rtable(head)->rt_type != RTN_LOCAL))
188 goto out;
189
190 clone = skb_clone(head, GFP_ATOMIC);
191
192 /* Send an ICMP "Fragment Reassembly Timeout" message. */
193 if (clone) {
194 spin_unlock(&qp->q.lock);
195 icmp_send(clone, ICMP_TIME_EXCEEDED,
196 ICMP_EXC_FRAGTIME, 0);
197 consume_skb(clone);
198 goto out_rcu_unlock;
d1c9ae6d 199 }
1da177e4 200out:
5ab11c98 201 spin_unlock(&qp->q.lock);
ec4fbd64
ED
202out_rcu_unlock:
203 rcu_read_unlock();
4b6cb5d8 204 ipq_put(qp);
1da177e4
LT
205}
206
abd6523d
PE
207/* Find the correct entry in the "incomplete datagrams" queue for
208 * this IP datagram, and create new one, if nothing is found.
209 */
9972f134
DA
210static struct ipq *ip_find(struct net *net, struct iphdr *iph,
211 u32 user, int vif)
1da177e4 212{
4e722036
ED
213 struct frag_v4_compare_key key = {
214 .saddr = iph->saddr,
215 .daddr = iph->daddr,
216 .user = user,
217 .vif = vif,
218 .id = iph->id,
219 .protocol = iph->protocol,
220 };
c6fda282 221 struct inet_frag_queue *q;
9a375803 222
4e722036 223 q = inet_frag_find(&net->ipv4.frags, &key);
79583fc1 224 if (!q)
5a3da1fe 225 return NULL;
79583fc1 226
c6fda282 227 return container_of(q, struct ipq, q);
1da177e4
LT
228}
229
89cee8b1 230/* Is the fragment too far ahead to be part of ipq? */
aa1f731e 231static int ip_frag_too_far(struct ipq *qp)
89cee8b1
HX
232{
233 struct inet_peer *peer = qp->peer;
0fbf4cb2 234 unsigned int max = qp->q.net->max_dist;
89cee8b1
HX
235 unsigned int start, end;
236
237 int rc;
238
239 if (!peer || !max)
240 return 0;
241
242 start = qp->rid;
243 end = atomic_inc_return(&peer->rid);
244 qp->rid = end;
245
5ab11c98 246 rc = qp->q.fragments && (end - start) > max;
89cee8b1
HX
247
248 if (rc) {
7c73a6fa
PE
249 struct net *net;
250
251 net = container_of(qp->q.net, struct net, ipv4.frags);
b45386ef 252 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
89cee8b1
HX
253 }
254
255 return rc;
256}
257
258static int ip_frag_reinit(struct ipq *qp)
259{
260 struct sk_buff *fp;
d433673e 261 unsigned int sum_truesize = 0;
89cee8b1 262
b2fd5321 263 if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
edcb6918 264 refcount_inc(&qp->q.refcnt);
89cee8b1
HX
265 return -ETIMEDOUT;
266 }
267
5ab11c98 268 fp = qp->q.fragments;
89cee8b1
HX
269 do {
270 struct sk_buff *xp = fp->next;
d433673e
JDB
271
272 sum_truesize += fp->truesize;
273 kfree_skb(fp);
89cee8b1
HX
274 fp = xp;
275 } while (fp);
0e60d245 276 sub_frag_mem_limit(qp->q.net, sum_truesize);
89cee8b1 277
06aa8b8a 278 qp->q.flags = 0;
5ab11c98
PE
279 qp->q.len = 0;
280 qp->q.meat = 0;
281 qp->q.fragments = NULL;
d6bebca9 282 qp->q.fragments_tail = NULL;
89cee8b1 283 qp->iif = 0;
6623e3b2 284 qp->ecn = 0;
89cee8b1
HX
285
286 return 0;
287}
288
1da177e4 289/* Add new segment to existing queue. */
1706d587 290static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
1da177e4
LT
291{
292 struct sk_buff *prev, *next;
1706d587 293 struct net_device *dev;
d6b915e2 294 unsigned int fragsize;
1da177e4
LT
295 int flags, offset;
296 int ihl, end;
1706d587 297 int err = -ENOENT;
6623e3b2 298 u8 ecn;
1da177e4 299
06aa8b8a 300 if (qp->q.flags & INET_FRAG_COMPLETE)
1da177e4
LT
301 goto err;
302
89cee8b1 303 if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
1706d587
HX
304 unlikely(ip_frag_too_far(qp)) &&
305 unlikely(err = ip_frag_reinit(qp))) {
89cee8b1
HX
306 ipq_kill(qp);
307 goto err;
308 }
309
6623e3b2 310 ecn = ip4_frag_ecn(ip_hdr(skb)->tos);
eddc9ec5 311 offset = ntohs(ip_hdr(skb)->frag_off);
1da177e4
LT
312 flags = offset & ~IP_OFFSET;
313 offset &= IP_OFFSET;
314 offset <<= 3; /* offset is in 8-byte chunks */
c9bdd4b5 315 ihl = ip_hdrlen(skb);
1da177e4
LT
316
317 /* Determine the position of this fragment. */
0848f642 318 end = offset + skb->len - skb_network_offset(skb) - ihl;
1706d587 319 err = -EINVAL;
1da177e4
LT
320
321 /* Is this the final fragment? */
322 if ((flags & IP_MF) == 0) {
323 /* If we already have some bits beyond end
42b2aa86 324 * or have different end, the segment is corrupted.
1da177e4 325 */
5ab11c98 326 if (end < qp->q.len ||
06aa8b8a 327 ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
1da177e4 328 goto err;
06aa8b8a 329 qp->q.flags |= INET_FRAG_LAST_IN;
5ab11c98 330 qp->q.len = end;
1da177e4
LT
331 } else {
332 if (end&7) {
333 end &= ~7;
334 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
335 skb->ip_summed = CHECKSUM_NONE;
336 }
5ab11c98 337 if (end > qp->q.len) {
1da177e4 338 /* Some bits beyond end -> corruption. */
06aa8b8a 339 if (qp->q.flags & INET_FRAG_LAST_IN)
1da177e4 340 goto err;
5ab11c98 341 qp->q.len = end;
1da177e4
LT
342 }
343 }
344 if (end == offset)
345 goto err;
346
1706d587 347 err = -ENOMEM;
0848f642 348 if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
1da177e4 349 goto err;
1706d587
HX
350
351 err = pskb_trim_rcsum(skb, end - offset);
352 if (err)
1da177e4
LT
353 goto err;
354
355 /* Find out which fragments are in front and at the back of us
356 * in the chain of fragments so far. We must know where to put
357 * this fragment, right?
358 */
d6bebca9
CG
359 prev = qp->q.fragments_tail;
360 if (!prev || FRAG_CB(prev)->offset < offset) {
361 next = NULL;
362 goto found;
363 }
1da177e4 364 prev = NULL;
5ab11c98 365 for (next = qp->q.fragments; next != NULL; next = next->next) {
1da177e4
LT
366 if (FRAG_CB(next)->offset >= offset)
367 break; /* bingo! */
368 prev = next;
369 }
370
d6bebca9 371found:
1da177e4
LT
372 /* We found where to put this one. Check for overlap with
373 * preceding fragment, and, if needed, align things so that
374 * any overlaps are eliminated.
375 */
376 if (prev) {
377 int i = (FRAG_CB(prev)->offset + prev->len) - offset;
378
379 if (i > 0) {
380 offset += i;
1706d587 381 err = -EINVAL;
1da177e4
LT
382 if (end <= offset)
383 goto err;
1706d587 384 err = -ENOMEM;
1da177e4
LT
385 if (!pskb_pull(skb, i))
386 goto err;
387 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
388 skb->ip_summed = CHECKSUM_NONE;
389 }
390 }
391
1706d587
HX
392 err = -ENOMEM;
393
1da177e4
LT
394 while (next && FRAG_CB(next)->offset < end) {
395 int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
396
397 if (i < next->len) {
148854d4
ED
398 int delta = -next->truesize;
399
1da177e4
LT
400 /* Eat head of the next overlapped fragment
401 * and leave the loop. The next ones cannot overlap.
402 */
403 if (!pskb_pull(next, i))
404 goto err;
148854d4
ED
405 delta += next->truesize;
406 if (delta)
407 add_frag_mem_limit(qp->q.net, delta);
1da177e4 408 FRAG_CB(next)->offset += i;
5ab11c98 409 qp->q.meat -= i;
1da177e4
LT
410 if (next->ip_summed != CHECKSUM_UNNECESSARY)
411 next->ip_summed = CHECKSUM_NONE;
412 break;
413 } else {
414 struct sk_buff *free_it = next;
415
47c6bf77 416 /* Old fragment is completely overridden with
1da177e4
LT
417 * new one drop it.
418 */
419 next = next->next;
420
421 if (prev)
422 prev->next = next;
423 else
5ab11c98 424 qp->q.fragments = next;
1da177e4 425
5ab11c98 426 qp->q.meat -= free_it->len;
0e60d245 427 sub_frag_mem_limit(qp->q.net, free_it->truesize);
d433673e 428 kfree_skb(free_it);
1da177e4
LT
429 }
430 }
431
432 FRAG_CB(skb)->offset = offset;
433
434 /* Insert this fragment in the chain of fragments. */
435 skb->next = next;
d6bebca9
CG
436 if (!next)
437 qp->q.fragments_tail = skb;
1da177e4
LT
438 if (prev)
439 prev->next = skb;
440 else
5ab11c98 441 qp->q.fragments = skb;
1da177e4 442
1706d587
HX
443 dev = skb->dev;
444 if (dev) {
445 qp->iif = dev->ifindex;
446 skb->dev = NULL;
447 }
5ab11c98
PE
448 qp->q.stamp = skb->tstamp;
449 qp->q.meat += skb->len;
6623e3b2 450 qp->ecn |= ecn;
0e60d245 451 add_frag_mem_limit(qp->q.net, skb->truesize);
1da177e4 452 if (offset == 0)
06aa8b8a 453 qp->q.flags |= INET_FRAG_FIRST_IN;
1da177e4 454
d6b915e2
FW
455 fragsize = skb->len + ihl;
456
457 if (fragsize > qp->q.max_size)
458 qp->q.max_size = fragsize;
459
5f2d04f1 460 if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
d6b915e2
FW
461 fragsize > qp->max_df_size)
462 qp->max_df_size = fragsize;
5f2d04f1 463
06aa8b8a 464 if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
97599dc7
ED
465 qp->q.meat == qp->q.len) {
466 unsigned long orefdst = skb->_skb_refdst;
1706d587 467
97599dc7
ED
468 skb->_skb_refdst = 0UL;
469 err = ip_frag_reasm(qp, prev, dev);
470 skb->_skb_refdst = orefdst;
471 return err;
472 }
473
474 skb_dst_drop(skb);
1706d587 475 return -EINPROGRESS;
1da177e4
LT
476
477err:
478 kfree_skb(skb);
1706d587 479 return err;
1da177e4
LT
480}
481
482
483/* Build a new IP datagram from all its fragments. */
484
1706d587
HX
485static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
486 struct net_device *dev)
1da177e4 487{
2bad35b7 488 struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
1da177e4 489 struct iphdr *iph;
5ab11c98 490 struct sk_buff *fp, *head = qp->q.fragments;
1da177e4
LT
491 int len;
492 int ihlen;
1706d587 493 int err;
5173cc05 494 u8 ecn;
1da177e4
LT
495
496 ipq_kill(qp);
497
be991971 498 ecn = ip_frag_ecn_table[qp->ecn];
5173cc05
ED
499 if (unlikely(ecn == 0xff)) {
500 err = -EINVAL;
501 goto out_fail;
502 }
1706d587
HX
503 /* Make the one we just received the head. */
504 if (prev) {
505 head = prev->next;
506 fp = skb_clone(head, GFP_ATOMIC);
1706d587
HX
507 if (!fp)
508 goto out_nomem;
509
510 fp->next = head->next;
d6bebca9
CG
511 if (!fp->next)
512 qp->q.fragments_tail = fp;
1706d587
HX
513 prev->next = fp;
514
5ab11c98
PE
515 skb_morph(head, qp->q.fragments);
516 head->next = qp->q.fragments->next;
1706d587 517
cbf8f7bb 518 consume_skb(qp->q.fragments);
5ab11c98 519 qp->q.fragments = head;
1706d587
HX
520 }
521
51456b29 522 WARN_ON(!head);
547b792c 523 WARN_ON(FRAG_CB(head)->offset != 0);
1da177e4
LT
524
525 /* Allocate a new buffer for the datagram. */
c9bdd4b5 526 ihlen = ip_hdrlen(head);
5ab11c98 527 len = ihlen + qp->q.len;
1da177e4 528
1706d587 529 err = -E2BIG;
132adf54 530 if (len > 65535)
1da177e4
LT
531 goto out_oversize;
532
533 /* Head of list must not be cloned. */
14bbd6a5 534 if (skb_unclone(head, GFP_ATOMIC))
1da177e4
LT
535 goto out_nomem;
536
537 /* If the first fragment is fragmented itself, we split
538 * it to two chunks: the first with data and paged part
539 * and the second, holding only fragments. */
21dc3301 540 if (skb_has_frag_list(head)) {
1da177e4
LT
541 struct sk_buff *clone;
542 int i, plen = 0;
543
51456b29
IM
544 clone = alloc_skb(0, GFP_ATOMIC);
545 if (!clone)
1da177e4
LT
546 goto out_nomem;
547 clone->next = head->next;
548 head->next = clone;
549 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
d7fcf1a5 550 skb_frag_list_init(head);
9e903e08
ED
551 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
552 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
1da177e4
LT
553 clone->len = clone->data_len = head->data_len - plen;
554 head->data_len -= clone->len;
555 head->len -= clone->len;
556 clone->csum = 0;
557 clone->ip_summed = head->ip_summed;
0e60d245 558 add_frag_mem_limit(qp->q.net, clone->truesize);
1da177e4
LT
559 }
560
14fe22e3 561 skb_shinfo(head)->frag_list = head->next;
d56f90a7 562 skb_push(head, head->data - skb_network_header(head));
1da177e4 563
14fe22e3
FW
564 for (fp=head->next; fp; fp = fp->next) {
565 head->data_len += fp->len;
566 head->len += fp->len;
1da177e4
LT
567 if (head->ip_summed != fp->ip_summed)
568 head->ip_summed = CHECKSUM_NONE;
84fa7933 569 else if (head->ip_summed == CHECKSUM_COMPLETE)
1da177e4 570 head->csum = csum_add(head->csum, fp->csum);
14fe22e3 571 head->truesize += fp->truesize;
1da177e4 572 }
5510b3c2 573 sub_frag_mem_limit(qp->q.net, head->truesize);
1da177e4
LT
574
575 head->next = NULL;
576 head->dev = dev;
5ab11c98 577 head->tstamp = qp->q.stamp;
d6b915e2 578 IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
1da177e4 579
eddc9ec5 580 iph = ip_hdr(head);
1da177e4 581 iph->tot_len = htons(len);
5173cc05 582 iph->tos |= ecn;
d6b915e2
FW
583
584 /* When we set IP_DF on a refragmented skb we must also force a
585 * call to ip_fragment to avoid forwarding a DF-skb of size s while
586 * original sender only sent fragments of size f (where f < s).
587 *
588 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
589 * frag seen to avoid sending tiny DF-fragments in case skb was built
590 * from one very small df-fragment and one large non-df frag.
591 */
592 if (qp->max_df_size == qp->q.max_size) {
593 IPCB(head)->flags |= IPSKB_FRAG_PMTU;
594 iph->frag_off = htons(IP_DF);
595 } else {
596 iph->frag_off = 0;
597 }
598
0848f642
EHJ
599 ip_send_check(iph);
600
b45386ef 601 __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
5ab11c98 602 qp->q.fragments = NULL;
d6bebca9 603 qp->q.fragments_tail = NULL;
1706d587 604 return 0;
1da177e4
LT
605
606out_nomem:
ba7a46f1 607 net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
45542479 608 err = -ENOMEM;
1da177e4
LT
609 goto out_fail;
610out_oversize:
4e722036 611 net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
1da177e4 612out_fail:
b45386ef 613 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
1706d587 614 return err;
1da177e4
LT
615}
616
617/* Process an incoming IP datagram fragment. */
19bcf9f2 618int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
1da177e4 619{
9972f134 620 struct net_device *dev = skb->dev ? : skb_dst(skb)->dev;
385add90 621 int vif = l3mdev_master_ifindex_rcu(dev);
1da177e4 622 struct ipq *qp;
e905a9ed 623
b45386ef 624 __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
8282f274 625 skb_orphan(skb);
1da177e4 626
1da177e4 627 /* Lookup (or create) queue header */
9972f134 628 qp = ip_find(net, ip_hdr(skb), user, vif);
00db4124 629 if (qp) {
1706d587 630 int ret;
1da177e4 631
5ab11c98 632 spin_lock(&qp->q.lock);
1da177e4 633
1706d587 634 ret = ip_frag_queue(qp, skb);
1da177e4 635
5ab11c98 636 spin_unlock(&qp->q.lock);
4b6cb5d8 637 ipq_put(qp);
776c729e 638 return ret;
1da177e4
LT
639 }
640
b45386ef 641 __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
1da177e4 642 kfree_skb(skb);
776c729e 643 return -ENOMEM;
1da177e4 644}
4bc2f18b 645EXPORT_SYMBOL(ip_defrag);
1da177e4 646
19bcf9f2 647struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
bc416d97 648{
1bf3751e 649 struct iphdr iph;
3e32e733 650 int netoff;
bc416d97
ED
651 u32 len;
652
653 if (skb->protocol != htons(ETH_P_IP))
654 return skb;
655
3e32e733
AD
656 netoff = skb_network_offset(skb);
657
658 if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
bc416d97
ED
659 return skb;
660
1bf3751e 661 if (iph.ihl < 5 || iph.version != 4)
bc416d97 662 return skb;
1bf3751e
JB
663
664 len = ntohs(iph.tot_len);
3e32e733 665 if (skb->len < netoff + len || len < (iph.ihl * 4))
bc416d97
ED
666 return skb;
667
1bf3751e 668 if (ip_is_fragment(&iph)) {
bc416d97
ED
669 skb = skb_share_check(skb, GFP_ATOMIC);
670 if (skb) {
3e32e733 671 if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
1bf3751e 672 return skb;
3e32e733 673 if (pskb_trim_rcsum(skb, netoff + len))
bc416d97
ED
674 return skb;
675 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
19bcf9f2 676 if (ip_defrag(net, skb, user))
bc416d97 677 return NULL;
7539fadc 678 skb_clear_hash(skb);
bc416d97
ED
679 }
680 }
681 return skb;
682}
683EXPORT_SYMBOL(ip_check_defrag);
684
8d8354d2 685#ifdef CONFIG_SYSCTL
c88be42e 686static long zero;
8d8354d2 687
0a64b4b8 688static struct ctl_table ip4_frags_ns_ctl_table[] = {
8d8354d2 689 {
8d8354d2 690 .procname = "ipfrag_high_thresh",
e31e0bdc 691 .data = &init_net.ipv4.frags.high_thresh,
c88be42e 692 .maxlen = sizeof(unsigned long),
8d8354d2 693 .mode = 0644,
c88be42e 694 .proc_handler = proc_doulongvec_minmax,
1bab4c75 695 .extra1 = &init_net.ipv4.frags.low_thresh
8d8354d2
PE
696 },
697 {
8d8354d2 698 .procname = "ipfrag_low_thresh",
e31e0bdc 699 .data = &init_net.ipv4.frags.low_thresh,
c88be42e 700 .maxlen = sizeof(unsigned long),
8d8354d2 701 .mode = 0644,
c88be42e 702 .proc_handler = proc_doulongvec_minmax,
1bab4c75
NA
703 .extra1 = &zero,
704 .extra2 = &init_net.ipv4.frags.high_thresh
8d8354d2
PE
705 },
706 {
8d8354d2 707 .procname = "ipfrag_time",
b2fd5321 708 .data = &init_net.ipv4.frags.timeout,
8d8354d2
PE
709 .maxlen = sizeof(int),
710 .mode = 0644,
6d9f239a 711 .proc_handler = proc_dointvec_jiffies,
8d8354d2 712 },
0fbf4cb2
NB
713 {
714 .procname = "ipfrag_max_dist",
715 .data = &init_net.ipv4.frags.max_dist,
716 .maxlen = sizeof(int),
717 .mode = 0644,
718 .proc_handler = proc_dointvec_minmax,
719 .extra1 = &zero
720 },
7d291ebb
PE
721 { }
722};
723
e3a57d18
FW
724/* secret interval has been deprecated */
725static int ip4_frags_secret_interval_unused;
7d291ebb 726static struct ctl_table ip4_frags_ctl_table[] = {
8d8354d2 727 {
8d8354d2 728 .procname = "ipfrag_secret_interval",
e3a57d18 729 .data = &ip4_frags_secret_interval_unused,
8d8354d2
PE
730 .maxlen = sizeof(int),
731 .mode = 0644,
6d9f239a 732 .proc_handler = proc_dointvec_jiffies,
8d8354d2 733 },
8d8354d2
PE
734 { }
735};
736
2c8c1e72 737static int __net_init ip4_frags_ns_ctl_register(struct net *net)
8d8354d2 738{
e4a2d5c2 739 struct ctl_table *table;
8d8354d2
PE
740 struct ctl_table_header *hdr;
741
0a64b4b8 742 table = ip4_frags_ns_ctl_table;
09ad9bc7 743 if (!net_eq(net, &init_net)) {
0a64b4b8 744 table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
51456b29 745 if (!table)
e4a2d5c2
PE
746 goto err_alloc;
747
e31e0bdc 748 table[0].data = &net->ipv4.frags.high_thresh;
1bab4c75
NA
749 table[0].extra1 = &net->ipv4.frags.low_thresh;
750 table[0].extra2 = &init_net.ipv4.frags.high_thresh;
e31e0bdc 751 table[1].data = &net->ipv4.frags.low_thresh;
1bab4c75 752 table[1].extra2 = &net->ipv4.frags.high_thresh;
b2fd5321 753 table[2].data = &net->ipv4.frags.timeout;
0fbf4cb2 754 table[3].data = &net->ipv4.frags.max_dist;
e4a2d5c2
PE
755 }
756
ec8f23ce 757 hdr = register_net_sysctl(net, "net/ipv4", table);
51456b29 758 if (!hdr)
e4a2d5c2
PE
759 goto err_reg;
760
761 net->ipv4.frags_hdr = hdr;
762 return 0;
763
764err_reg:
09ad9bc7 765 if (!net_eq(net, &init_net))
e4a2d5c2
PE
766 kfree(table);
767err_alloc:
768 return -ENOMEM;
769}
770
2c8c1e72 771static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
e4a2d5c2
PE
772{
773 struct ctl_table *table;
774
775 table = net->ipv4.frags_hdr->ctl_table_arg;
776 unregister_net_sysctl_table(net->ipv4.frags_hdr);
777 kfree(table);
8d8354d2 778}
7d291ebb 779
57a02c39 780static void __init ip4_frags_ctl_register(void)
7d291ebb 781{
43444757 782 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
7d291ebb 783}
8d8354d2 784#else
aa1f731e 785static int ip4_frags_ns_ctl_register(struct net *net)
8d8354d2
PE
786{
787 return 0;
788}
e4a2d5c2 789
aa1f731e 790static void ip4_frags_ns_ctl_unregister(struct net *net)
e4a2d5c2
PE
791{
792}
7d291ebb 793
aa1f731e 794static void __init ip4_frags_ctl_register(void)
7d291ebb
PE
795{
796}
8d8354d2
PE
797#endif
798
2c8c1e72 799static int __net_init ipv4_frags_init_net(struct net *net)
8d8354d2 800{
1f5c60ac
ED
801 int res;
802
77a24c31
TH
803 /*
804 * Fragment cache limits. We will commit 256K at one time. Should we
805 * cross that limit we will prune down to 192K. This should cope with
806 * even the most extreme cases without allowing an attacker to
807 * measurably harm machine performance.
e31e0bdc 808 */
77a24c31
TH
809 net->ipv4.frags.high_thresh = 256 * 1024;
810 net->ipv4.frags.low_thresh = 192 * 1024;
b2fd5321
PE
811 /*
812 * Important NOTE! Fragment queue must be destroyed before MSL expires.
813 * RFC791 is wrong proposing to prolongate timer each fragment arrival
814 * by TTL.
815 */
816 net->ipv4.frags.timeout = IP_FRAG_TIME;
817
0fbf4cb2 818 net->ipv4.frags.max_dist = 64;
9bdf9ca9 819 net->ipv4.frags.f = &ip4_frags;
0fbf4cb2 820
1f5c60ac
ED
821 res = inet_frags_init_net(&net->ipv4.frags);
822 if (res < 0)
823 return res;
824 res = ip4_frags_ns_ctl_register(net);
825 if (res < 0)
9bdf9ca9 826 inet_frags_exit_net(&net->ipv4.frags);
1f5c60ac 827 return res;
8d8354d2
PE
828}
829
2c8c1e72 830static void __net_exit ipv4_frags_exit_net(struct net *net)
81566e83 831{
0a64b4b8 832 ip4_frags_ns_ctl_unregister(net);
9bdf9ca9 833 inet_frags_exit_net(&net->ipv4.frags);
81566e83
PE
834}
835
836static struct pernet_operations ip4_frags_ops = {
837 .init = ipv4_frags_init_net,
838 .exit = ipv4_frags_exit_net,
839};
840
4e722036
ED
841
842static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
843{
844 return jhash2(data,
845 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
846}
847
848static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
849{
850 const struct inet_frag_queue *fq = data;
851
852 return jhash2((const u32 *)&fq->key.v4,
853 sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
854}
855
856static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
857{
858 const struct frag_v4_compare_key *key = arg->key;
859 const struct inet_frag_queue *fq = ptr;
860
861 return !!memcmp(&fq->key, key, sizeof(*key));
862}
863
864static const struct rhashtable_params ip4_rhash_params = {
865 .head_offset = offsetof(struct inet_frag_queue, node),
866 .key_offset = offsetof(struct inet_frag_queue, key),
867 .key_len = sizeof(struct frag_v4_compare_key),
868 .hashfn = ip4_key_hashfn,
869 .obj_hashfn = ip4_obj_hashfn,
870 .obj_cmpfn = ip4_obj_cmpfn,
871 .automatic_shrinking = true,
872};
873
b7aa0bf7 874void __init ipfrag_init(void)
1da177e4 875{
c6fda282 876 ip4_frags.constructor = ip4_frag_init;
1e4b8287 877 ip4_frags.destructor = ip4_frag_free;
1e4b8287 878 ip4_frags.qsize = sizeof(struct ipq);
e521db9d 879 ip4_frags.frag_expire = ip_expire;
d4ad4d22 880 ip4_frags.frags_cache_name = ip_frag_cache_name;
4e722036 881 ip4_frags.rhash_params = ip4_rhash_params;
d4ad4d22
NA
882 if (inet_frags_init(&ip4_frags))
883 panic("IP: failed to allocate ip4_frags cache\n");
72c16050
ED
884 ip4_frags_ctl_register();
885 register_pernet_subsys(&ip4_frags_ops);
1da177e4 886}