]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv6/netfilter/nf_conntrack_reasm.c
net: convert BUG_TRAP to generic WARN_ON
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / netfilter / nf_conntrack_reasm.c
1 /*
2 * IPv6 fragment reassembly for connection tracking
3 *
4 * Copyright (C)2004 USAGI/WIDE Project
5 *
6 * Author:
7 * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
8 *
9 * Based on: net/ipv6/reassembly.c
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17 #include <linux/errno.h>
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/socket.h>
21 #include <linux/sockios.h>
22 #include <linux/jiffies.h>
23 #include <linux/net.h>
24 #include <linux/list.h>
25 #include <linux/netdevice.h>
26 #include <linux/in6.h>
27 #include <linux/ipv6.h>
28 #include <linux/icmpv6.h>
29 #include <linux/random.h>
30 #include <linux/jhash.h>
31
32 #include <net/sock.h>
33 #include <net/snmp.h>
34 #include <net/inet_frag.h>
35
36 #include <net/ipv6.h>
37 #include <net/protocol.h>
38 #include <net/transp_v6.h>
39 #include <net/rawv6.h>
40 #include <net/ndisc.h>
41 #include <net/addrconf.h>
42 #include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
43 #include <linux/sysctl.h>
44 #include <linux/netfilter.h>
45 #include <linux/netfilter_ipv6.h>
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48
49 #define NF_CT_FRAG6_HIGH_THRESH 262144 /* == 256*1024 */
50 #define NF_CT_FRAG6_LOW_THRESH 196608 /* == 192*1024 */
51 #define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT
52
53 struct nf_ct_frag6_skb_cb
54 {
55 struct inet6_skb_parm h;
56 int offset;
57 struct sk_buff *orig;
58 };
59
60 #define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb*)((skb)->cb))
61
62 struct nf_ct_frag6_queue
63 {
64 struct inet_frag_queue q;
65
66 __be32 id; /* fragment id */
67 struct in6_addr saddr;
68 struct in6_addr daddr;
69
70 unsigned int csum;
71 __u16 nhoffset;
72 };
73
74 static struct inet_frags nf_frags;
75 static struct netns_frags nf_init_frags;
76
77 #ifdef CONFIG_SYSCTL
78 struct ctl_table nf_ct_ipv6_sysctl_table[] = {
79 {
80 .procname = "nf_conntrack_frag6_timeout",
81 .data = &nf_init_frags.timeout,
82 .maxlen = sizeof(unsigned int),
83 .mode = 0644,
84 .proc_handler = &proc_dointvec_jiffies,
85 },
86 {
87 .ctl_name = NET_NF_CONNTRACK_FRAG6_LOW_THRESH,
88 .procname = "nf_conntrack_frag6_low_thresh",
89 .data = &nf_init_frags.low_thresh,
90 .maxlen = sizeof(unsigned int),
91 .mode = 0644,
92 .proc_handler = &proc_dointvec,
93 },
94 {
95 .ctl_name = NET_NF_CONNTRACK_FRAG6_HIGH_THRESH,
96 .procname = "nf_conntrack_frag6_high_thresh",
97 .data = &nf_init_frags.high_thresh,
98 .maxlen = sizeof(unsigned int),
99 .mode = 0644,
100 .proc_handler = &proc_dointvec,
101 },
102 { .ctl_name = 0 }
103 };
104 #endif
105
106 static unsigned int ip6qhashfn(__be32 id, const struct in6_addr *saddr,
107 const struct in6_addr *daddr)
108 {
109 u32 a, b, c;
110
111 a = (__force u32)saddr->s6_addr32[0];
112 b = (__force u32)saddr->s6_addr32[1];
113 c = (__force u32)saddr->s6_addr32[2];
114
115 a += JHASH_GOLDEN_RATIO;
116 b += JHASH_GOLDEN_RATIO;
117 c += nf_frags.rnd;
118 __jhash_mix(a, b, c);
119
120 a += (__force u32)saddr->s6_addr32[3];
121 b += (__force u32)daddr->s6_addr32[0];
122 c += (__force u32)daddr->s6_addr32[1];
123 __jhash_mix(a, b, c);
124
125 a += (__force u32)daddr->s6_addr32[2];
126 b += (__force u32)daddr->s6_addr32[3];
127 c += (__force u32)id;
128 __jhash_mix(a, b, c);
129
130 return c & (INETFRAGS_HASHSZ - 1);
131 }
132
133 static unsigned int nf_hashfn(struct inet_frag_queue *q)
134 {
135 const struct nf_ct_frag6_queue *nq;
136
137 nq = container_of(q, struct nf_ct_frag6_queue, q);
138 return ip6qhashfn(nq->id, &nq->saddr, &nq->daddr);
139 }
140
141 static void nf_skb_free(struct sk_buff *skb)
142 {
143 if (NFCT_FRAG6_CB(skb)->orig)
144 kfree_skb(NFCT_FRAG6_CB(skb)->orig);
145 }
146
147 /* Memory Tracking Functions. */
148 static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work)
149 {
150 if (work)
151 *work -= skb->truesize;
152 atomic_sub(skb->truesize, &nf_init_frags.mem);
153 nf_skb_free(skb);
154 kfree_skb(skb);
155 }
156
157 /* Destruction primitives. */
158
159 static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
160 {
161 inet_frag_put(&fq->q, &nf_frags);
162 }
163
164 /* Kill fq entry. It is not destroyed immediately,
165 * because caller (and someone more) holds reference count.
166 */
167 static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq)
168 {
169 inet_frag_kill(&fq->q, &nf_frags);
170 }
171
172 static void nf_ct_frag6_evictor(void)
173 {
174 local_bh_disable();
175 inet_frag_evictor(&nf_init_frags, &nf_frags);
176 local_bh_enable();
177 }
178
179 static void nf_ct_frag6_expire(unsigned long data)
180 {
181 struct nf_ct_frag6_queue *fq;
182
183 fq = container_of((struct inet_frag_queue *)data,
184 struct nf_ct_frag6_queue, q);
185
186 spin_lock(&fq->q.lock);
187
188 if (fq->q.last_in & INET_FRAG_COMPLETE)
189 goto out;
190
191 fq_kill(fq);
192
193 out:
194 spin_unlock(&fq->q.lock);
195 fq_put(fq);
196 }
197
198 /* Creation primitives. */
199
200 static __inline__ struct nf_ct_frag6_queue *
201 fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
202 {
203 struct inet_frag_queue *q;
204 struct ip6_create_arg arg;
205 unsigned int hash;
206
207 arg.id = id;
208 arg.src = src;
209 arg.dst = dst;
210
211 read_lock_bh(&nf_frags.lock);
212 hash = ip6qhashfn(id, src, dst);
213
214 q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
215 local_bh_enable();
216 if (q == NULL)
217 goto oom;
218
219 return container_of(q, struct nf_ct_frag6_queue, q);
220
221 oom:
222 pr_debug("Can't alloc new queue\n");
223 return NULL;
224 }
225
226
227 static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
228 const struct frag_hdr *fhdr, int nhoff)
229 {
230 struct sk_buff *prev, *next;
231 int offset, end;
232
233 if (fq->q.last_in & INET_FRAG_COMPLETE) {
234 pr_debug("Allready completed\n");
235 goto err;
236 }
237
238 offset = ntohs(fhdr->frag_off) & ~0x7;
239 end = offset + (ntohs(ipv6_hdr(skb)->payload_len) -
240 ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
241
242 if ((unsigned int)end > IPV6_MAXPLEN) {
243 pr_debug("offset is too large.\n");
244 return -1;
245 }
246
247 if (skb->ip_summed == CHECKSUM_COMPLETE) {
248 const unsigned char *nh = skb_network_header(skb);
249 skb->csum = csum_sub(skb->csum,
250 csum_partial(nh, (u8 *)(fhdr + 1) - nh,
251 0));
252 }
253
254 /* Is this the final fragment? */
255 if (!(fhdr->frag_off & htons(IP6_MF))) {
256 /* If we already have some bits beyond end
257 * or have different end, the segment is corrupted.
258 */
259 if (end < fq->q.len ||
260 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) {
261 pr_debug("already received last fragment\n");
262 goto err;
263 }
264 fq->q.last_in |= INET_FRAG_LAST_IN;
265 fq->q.len = end;
266 } else {
267 /* Check if the fragment is rounded to 8 bytes.
268 * Required by the RFC.
269 */
270 if (end & 0x7) {
271 /* RFC2460 says always send parameter problem in
272 * this case. -DaveM
273 */
274 pr_debug("end of fragment not rounded to 8 bytes.\n");
275 return -1;
276 }
277 if (end > fq->q.len) {
278 /* Some bits beyond end -> corruption. */
279 if (fq->q.last_in & INET_FRAG_LAST_IN) {
280 pr_debug("last packet already reached.\n");
281 goto err;
282 }
283 fq->q.len = end;
284 }
285 }
286
287 if (end == offset)
288 goto err;
289
290 /* Point into the IP datagram 'data' part. */
291 if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
292 pr_debug("queue: message is too short.\n");
293 goto err;
294 }
295 if (pskb_trim_rcsum(skb, end - offset)) {
296 pr_debug("Can't trim\n");
297 goto err;
298 }
299
300 /* Find out which fragments are in front and at the back of us
301 * in the chain of fragments so far. We must know where to put
302 * this fragment, right?
303 */
304 prev = NULL;
305 for (next = fq->q.fragments; next != NULL; next = next->next) {
306 if (NFCT_FRAG6_CB(next)->offset >= offset)
307 break; /* bingo! */
308 prev = next;
309 }
310
311 /* We found where to put this one. Check for overlap with
312 * preceding fragment, and, if needed, align things so that
313 * any overlaps are eliminated.
314 */
315 if (prev) {
316 int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset;
317
318 if (i > 0) {
319 offset += i;
320 if (end <= offset) {
321 pr_debug("overlap\n");
322 goto err;
323 }
324 if (!pskb_pull(skb, i)) {
325 pr_debug("Can't pull\n");
326 goto err;
327 }
328 if (skb->ip_summed != CHECKSUM_UNNECESSARY)
329 skb->ip_summed = CHECKSUM_NONE;
330 }
331 }
332
333 /* Look for overlap with succeeding segments.
334 * If we can merge fragments, do it.
335 */
336 while (next && NFCT_FRAG6_CB(next)->offset < end) {
337 /* overlap is 'i' bytes */
338 int i = end - NFCT_FRAG6_CB(next)->offset;
339
340 if (i < next->len) {
341 /* Eat head of the next overlapped fragment
342 * and leave the loop. The next ones cannot overlap.
343 */
344 pr_debug("Eat head of the overlapped parts.: %d", i);
345 if (!pskb_pull(next, i))
346 goto err;
347
348 /* next fragment */
349 NFCT_FRAG6_CB(next)->offset += i;
350 fq->q.meat -= i;
351 if (next->ip_summed != CHECKSUM_UNNECESSARY)
352 next->ip_summed = CHECKSUM_NONE;
353 break;
354 } else {
355 struct sk_buff *free_it = next;
356
357 /* Old fragmnet is completely overridden with
358 * new one drop it.
359 */
360 next = next->next;
361
362 if (prev)
363 prev->next = next;
364 else
365 fq->q.fragments = next;
366
367 fq->q.meat -= free_it->len;
368 frag_kfree_skb(free_it, NULL);
369 }
370 }
371
372 NFCT_FRAG6_CB(skb)->offset = offset;
373
374 /* Insert this fragment in the chain of fragments. */
375 skb->next = next;
376 if (prev)
377 prev->next = skb;
378 else
379 fq->q.fragments = skb;
380
381 skb->dev = NULL;
382 fq->q.stamp = skb->tstamp;
383 fq->q.meat += skb->len;
384 atomic_add(skb->truesize, &nf_init_frags.mem);
385
386 /* The first fragment.
387 * nhoffset is obtained from the first fragment, of course.
388 */
389 if (offset == 0) {
390 fq->nhoffset = nhoff;
391 fq->q.last_in |= INET_FRAG_FIRST_IN;
392 }
393 write_lock(&nf_frags.lock);
394 list_move_tail(&fq->q.lru_list, &nf_init_frags.lru_list);
395 write_unlock(&nf_frags.lock);
396 return 0;
397
398 err:
399 return -1;
400 }
401
402 /*
403 * Check if this packet is complete.
404 * Returns NULL on failure by any reason, and pointer
405 * to current nexthdr field in reassembled frame.
406 *
407 * It is called with locked fq, and caller must check that
408 * queue is eligible for reassembly i.e. it is not COMPLETE,
409 * the last and the first frames arrived and all the bits are here.
410 */
411 static struct sk_buff *
412 nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
413 {
414 struct sk_buff *fp, *op, *head = fq->q.fragments;
415 int payload_len;
416
417 fq_kill(fq);
418
419 WARN_ON(head == NULL);
420 WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
421
422 /* Unfragmented part is taken from the first segment. */
423 payload_len = ((head->data - skb_network_header(head)) -
424 sizeof(struct ipv6hdr) + fq->q.len -
425 sizeof(struct frag_hdr));
426 if (payload_len > IPV6_MAXPLEN) {
427 pr_debug("payload len is too large.\n");
428 goto out_oversize;
429 }
430
431 /* Head of list must not be cloned. */
432 if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) {
433 pr_debug("skb is cloned but can't expand head");
434 goto out_oom;
435 }
436
437 /* If the first fragment is fragmented itself, we split
438 * it to two chunks: the first with data and paged part
439 * and the second, holding only fragments. */
440 if (skb_shinfo(head)->frag_list) {
441 struct sk_buff *clone;
442 int i, plen = 0;
443
444 if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) {
445 pr_debug("Can't alloc skb\n");
446 goto out_oom;
447 }
448 clone->next = head->next;
449 head->next = clone;
450 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
451 skb_shinfo(head)->frag_list = NULL;
452 for (i=0; i<skb_shinfo(head)->nr_frags; i++)
453 plen += skb_shinfo(head)->frags[i].size;
454 clone->len = clone->data_len = head->data_len - plen;
455 head->data_len -= clone->len;
456 head->len -= clone->len;
457 clone->csum = 0;
458 clone->ip_summed = head->ip_summed;
459
460 NFCT_FRAG6_CB(clone)->orig = NULL;
461 atomic_add(clone->truesize, &nf_init_frags.mem);
462 }
463
464 /* We have to remove fragment header from datagram and to relocate
465 * header in order to calculate ICV correctly. */
466 skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0];
467 memmove(head->head + sizeof(struct frag_hdr), head->head,
468 (head->data - head->head) - sizeof(struct frag_hdr));
469 head->mac_header += sizeof(struct frag_hdr);
470 head->network_header += sizeof(struct frag_hdr);
471
472 skb_shinfo(head)->frag_list = head->next;
473 skb_reset_transport_header(head);
474 skb_push(head, head->data - skb_network_header(head));
475 atomic_sub(head->truesize, &nf_init_frags.mem);
476
477 for (fp=head->next; fp; fp = fp->next) {
478 head->data_len += fp->len;
479 head->len += fp->len;
480 if (head->ip_summed != fp->ip_summed)
481 head->ip_summed = CHECKSUM_NONE;
482 else if (head->ip_summed == CHECKSUM_COMPLETE)
483 head->csum = csum_add(head->csum, fp->csum);
484 head->truesize += fp->truesize;
485 atomic_sub(fp->truesize, &nf_init_frags.mem);
486 }
487
488 head->next = NULL;
489 head->dev = dev;
490 head->tstamp = fq->q.stamp;
491 ipv6_hdr(head)->payload_len = htons(payload_len);
492
493 /* Yes, and fold redundant checksum back. 8) */
494 if (head->ip_summed == CHECKSUM_COMPLETE)
495 head->csum = csum_partial(skb_network_header(head),
496 skb_network_header_len(head),
497 head->csum);
498
499 fq->q.fragments = NULL;
500
501 /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
502 fp = skb_shinfo(head)->frag_list;
503 if (NFCT_FRAG6_CB(fp)->orig == NULL)
504 /* at above code, head skb is divided into two skbs. */
505 fp = fp->next;
506
507 op = NFCT_FRAG6_CB(head)->orig;
508 for (; fp; fp = fp->next) {
509 struct sk_buff *orig = NFCT_FRAG6_CB(fp)->orig;
510
511 op->next = orig;
512 op = orig;
513 NFCT_FRAG6_CB(fp)->orig = NULL;
514 }
515
516 return head;
517
518 out_oversize:
519 if (net_ratelimit())
520 printk(KERN_DEBUG "nf_ct_frag6_reasm: payload len = %d\n", payload_len);
521 goto out_fail;
522 out_oom:
523 if (net_ratelimit())
524 printk(KERN_DEBUG "nf_ct_frag6_reasm: no memory for reassembly\n");
525 out_fail:
526 return NULL;
527 }
528
529 /*
530 * find the header just before Fragment Header.
531 *
532 * if success return 0 and set ...
533 * (*prevhdrp): the value of "Next Header Field" in the header
534 * just before Fragment Header.
535 * (*prevhoff): the offset of "Next Header Field" in the header
536 * just before Fragment Header.
537 * (*fhoff) : the offset of Fragment Header.
538 *
539 * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c
540 *
541 */
542 static int
543 find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
544 {
545 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
546 const int netoff = skb_network_offset(skb);
547 u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr);
548 int start = netoff + sizeof(struct ipv6hdr);
549 int len = skb->len - start;
550 u8 prevhdr = NEXTHDR_IPV6;
551
552 while (nexthdr != NEXTHDR_FRAGMENT) {
553 struct ipv6_opt_hdr hdr;
554 int hdrlen;
555
556 if (!ipv6_ext_hdr(nexthdr)) {
557 return -1;
558 }
559 if (len < (int)sizeof(struct ipv6_opt_hdr)) {
560 pr_debug("too short\n");
561 return -1;
562 }
563 if (nexthdr == NEXTHDR_NONE) {
564 pr_debug("next header is none\n");
565 return -1;
566 }
567 if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
568 BUG();
569 if (nexthdr == NEXTHDR_AUTH)
570 hdrlen = (hdr.hdrlen+2)<<2;
571 else
572 hdrlen = ipv6_optlen(&hdr);
573
574 prevhdr = nexthdr;
575 prev_nhoff = start;
576
577 nexthdr = hdr.nexthdr;
578 len -= hdrlen;
579 start += hdrlen;
580 }
581
582 if (len < 0)
583 return -1;
584
585 *prevhdrp = prevhdr;
586 *prevhoff = prev_nhoff;
587 *fhoff = start;
588
589 return 0;
590 }
591
592 struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
593 {
594 struct sk_buff *clone;
595 struct net_device *dev = skb->dev;
596 struct frag_hdr *fhdr;
597 struct nf_ct_frag6_queue *fq;
598 struct ipv6hdr *hdr;
599 int fhoff, nhoff;
600 u8 prevhdr;
601 struct sk_buff *ret_skb = NULL;
602
603 /* Jumbo payload inhibits frag. header */
604 if (ipv6_hdr(skb)->payload_len == 0) {
605 pr_debug("payload len = 0\n");
606 return skb;
607 }
608
609 if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
610 return skb;
611
612 clone = skb_clone(skb, GFP_ATOMIC);
613 if (clone == NULL) {
614 pr_debug("Can't clone skb\n");
615 return skb;
616 }
617
618 NFCT_FRAG6_CB(clone)->orig = skb;
619
620 if (!pskb_may_pull(clone, fhoff + sizeof(*fhdr))) {
621 pr_debug("message is too short.\n");
622 goto ret_orig;
623 }
624
625 skb_set_transport_header(clone, fhoff);
626 hdr = ipv6_hdr(clone);
627 fhdr = (struct frag_hdr *)skb_transport_header(clone);
628
629 if (!(fhdr->frag_off & htons(0xFFF9))) {
630 pr_debug("Invalid fragment offset\n");
631 /* It is not a fragmented frame */
632 goto ret_orig;
633 }
634
635 if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh)
636 nf_ct_frag6_evictor();
637
638 fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
639 if (fq == NULL) {
640 pr_debug("Can't find and can't create new queue\n");
641 goto ret_orig;
642 }
643
644 spin_lock_bh(&fq->q.lock);
645
646 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
647 spin_unlock_bh(&fq->q.lock);
648 pr_debug("Can't insert skb to queue\n");
649 fq_put(fq);
650 goto ret_orig;
651 }
652
653 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
654 fq->q.meat == fq->q.len) {
655 ret_skb = nf_ct_frag6_reasm(fq, dev);
656 if (ret_skb == NULL)
657 pr_debug("Can't reassemble fragmented packets\n");
658 }
659 spin_unlock_bh(&fq->q.lock);
660
661 fq_put(fq);
662 return ret_skb;
663
664 ret_orig:
665 kfree_skb(clone);
666 return skb;
667 }
668
669 void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
670 struct net_device *in, struct net_device *out,
671 int (*okfn)(struct sk_buff *))
672 {
673 struct sk_buff *s, *s2;
674
675 for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
676 nf_conntrack_put_reasm(s->nfct_reasm);
677 nf_conntrack_get_reasm(skb);
678 s->nfct_reasm = skb;
679
680 s2 = s->next;
681 s->next = NULL;
682
683 NF_HOOK_THRESH(PF_INET6, hooknum, s, in, out, okfn,
684 NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
685 s = s2;
686 }
687 nf_conntrack_put_reasm(skb);
688 }
689
690 int nf_ct_frag6_init(void)
691 {
692 nf_frags.hashfn = nf_hashfn;
693 nf_frags.constructor = ip6_frag_init;
694 nf_frags.destructor = NULL;
695 nf_frags.skb_free = nf_skb_free;
696 nf_frags.qsize = sizeof(struct nf_ct_frag6_queue);
697 nf_frags.match = ip6_frag_match;
698 nf_frags.frag_expire = nf_ct_frag6_expire;
699 nf_frags.secret_interval = 10 * 60 * HZ;
700 nf_init_frags.timeout = IPV6_FRAG_TIMEOUT;
701 nf_init_frags.high_thresh = 256 * 1024;
702 nf_init_frags.low_thresh = 192 * 1024;
703 inet_frags_init_net(&nf_init_frags);
704 inet_frags_init(&nf_frags);
705
706 return 0;
707 }
708
709 void nf_ct_frag6_cleanup(void)
710 {
711 inet_frags_fini(&nf_frags);
712
713 nf_init_frags.low_thresh = 0;
714 nf_ct_frag6_evictor();
715 }