]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
4 | * operating system. INET is implemented using the BSD Socket | |
5 | * interface as the means of communication with the user level. | |
6 | * | |
7 | * The IP fragmentation functionality. | |
e905a9ed | 8 | * |
1da177e4 | 9 | * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> |
113aa838 | 10 | * Alan Cox <alan@lxorguk.ukuu.org.uk> |
1da177e4 LT |
11 | * |
12 | * Fixes: | |
13 | * Alan Cox : Split from ip.c , see ip_input.c for history. | |
14 | * David S. Miller : Begin massive cleanup... | |
15 | * Andi Kleen : Add sysctls. | |
16 | * xxxx : Overlapfrag bug. | |
17 | * Ultima : ip_expire() kernel panic. | |
18 | * Bill Hawes : Frag accounting and evictor fixes. | |
19 | * John McDonald : 0 length frag bug. | |
20 | * Alexey Kuznetsov: SMP races, threading, cleanup. | |
21 | * Patrick McHardy : LRU queue of frag heads for evictor. | |
22 | */ | |
23 | ||
afd46503 JP |
24 | #define pr_fmt(fmt) "IPv4: " fmt |
25 | ||
89cee8b1 | 26 | #include <linux/compiler.h> |
1da177e4 LT |
27 | #include <linux/module.h> |
28 | #include <linux/types.h> | |
29 | #include <linux/mm.h> | |
30 | #include <linux/jiffies.h> | |
31 | #include <linux/skbuff.h> | |
32 | #include <linux/list.h> | |
33 | #include <linux/ip.h> | |
34 | #include <linux/icmp.h> | |
35 | #include <linux/netdevice.h> | |
36 | #include <linux/jhash.h> | |
37 | #include <linux/random.h> | |
5a0e3ad6 | 38 | #include <linux/slab.h> |
e9017b55 SW |
39 | #include <net/route.h> |
40 | #include <net/dst.h> | |
1da177e4 LT |
41 | #include <net/sock.h> |
42 | #include <net/ip.h> | |
43 | #include <net/icmp.h> | |
44 | #include <net/checksum.h> | |
89cee8b1 | 45 | #include <net/inetpeer.h> |
5ab11c98 | 46 | #include <net/inet_frag.h> |
1da177e4 LT |
47 | #include <linux/tcp.h> |
48 | #include <linux/udp.h> | |
49 | #include <linux/inet.h> | |
50 | #include <linux/netfilter_ipv4.h> | |
6623e3b2 | 51 | #include <net/inet_ecn.h> |
385add90 | 52 | #include <net/l3mdev.h> |
1da177e4 LT |
53 | |
54 | /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 | |
55 | * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c | |
56 | * as well. Or notify me, at least. --ANK | |
57 | */ | |
d4ad4d22 | 58 | static const char ip_frag_cache_name[] = "ip4-frags"; |
89cee8b1 | 59 | |
353c9cb3 PO |
60 | /* Use skb->cb to track consecutive/adjacent fragments coming at |
61 | * the end of the queue. Nodes in the rb-tree queue will | |
62 | * contain "runs" of one or more adjacent fragments. | |
63 | * | |
64 | * Invariants: | |
65 | * - next_frag is NULL at the tail of a "run"; | |
66 | * - the head of a "run" has the sum of all fragment lengths in frag_run_len. | |
67 | */ | |
68 | struct ipfrag_skb_cb { | |
69 | struct inet_skb_parm h; | |
70 | struct sk_buff *next_frag; | |
71 | int frag_run_len; | |
72 | }; | |
73 | ||
74 | #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) | |
75 | ||
76 | static void ip4_frag_init_run(struct sk_buff *skb) | |
77 | { | |
78 | BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb)); | |
79 | ||
80 | FRAG_CB(skb)->next_frag = NULL; | |
81 | FRAG_CB(skb)->frag_run_len = skb->len; | |
82 | } | |
83 | ||
84 | /* Append skb to the last "run". */ | |
85 | static void ip4_frag_append_to_last_run(struct inet_frag_queue *q, | |
86 | struct sk_buff *skb) | |
87 | { | |
88 | RB_CLEAR_NODE(&skb->rbnode); | |
89 | FRAG_CB(skb)->next_frag = NULL; | |
90 | ||
91 | FRAG_CB(q->last_run_head)->frag_run_len += skb->len; | |
92 | FRAG_CB(q->fragments_tail)->next_frag = skb; | |
93 | q->fragments_tail = skb; | |
94 | } | |
95 | ||
96 | /* Create a new "run" with the skb. */ | |
97 | static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb) | |
98 | { | |
99 | if (q->last_run_head) | |
100 | rb_link_node(&skb->rbnode, &q->last_run_head->rbnode, | |
101 | &q->last_run_head->rbnode.rb_right); | |
102 | else | |
103 | rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node); | |
104 | rb_insert_color(&skb->rbnode, &q->rb_fragments); | |
105 | ||
106 | ip4_frag_init_run(skb); | |
107 | q->fragments_tail = skb; | |
108 | q->last_run_head = skb; | |
109 | } | |
110 | ||
1da177e4 LT |
111 | /* Describe an entry in the "incomplete datagrams" queue. */ |
112 | struct ipq { | |
5ab11c98 PE |
113 | struct inet_frag_queue q; |
114 | ||
6623e3b2 | 115 | u8 ecn; /* RFC3168 support */ |
d6b915e2 | 116 | u16 max_df_size; /* largest frag with DF set seen */ |
89cee8b1 HX |
117 | int iif; |
118 | unsigned int rid; | |
119 | struct inet_peer *peer; | |
1da177e4 LT |
120 | }; |
121 | ||
aa1f731e | 122 | static u8 ip4_frag_ecn(u8 tos) |
6623e3b2 | 123 | { |
5173cc05 | 124 | return 1 << (tos & INET_ECN_MASK); |
6623e3b2 ED |
125 | } |
126 | ||
7eb95156 | 127 | static struct inet_frags ip4_frags; |
1da177e4 | 128 | |
a4fd284a PO |
129 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, |
130 | struct sk_buff *prev_tail, struct net_device *dev); | |
1706d587 | 131 | |
abd6523d | 132 | |
36c77782 | 133 | static void ip4_frag_init(struct inet_frag_queue *q, const void *a) |
c6fda282 PE |
134 | { |
135 | struct ipq *qp = container_of(q, struct ipq, q); | |
54db0cc2 G |
136 | struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, |
137 | frags); | |
138 | struct net *net = container_of(ipv4, struct net, ipv4); | |
139 | ||
648700f7 | 140 | const struct frag_v4_compare_key *key = a; |
c6fda282 | 141 | |
648700f7 ED |
142 | q->key.v4 = *key; |
143 | qp->ecn = 0; | |
0fbf4cb2 | 144 | qp->peer = q->net->max_dist ? |
648700f7 | 145 | inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) : |
192132b9 | 146 | NULL; |
c6fda282 PE |
147 | } |
148 | ||
aa1f731e | 149 | static void ip4_frag_free(struct inet_frag_queue *q) |
1da177e4 | 150 | { |
1e4b8287 PE |
151 | struct ipq *qp; |
152 | ||
153 | qp = container_of(q, struct ipq, q); | |
154 | if (qp->peer) | |
155 | inet_putpeer(qp->peer); | |
1da177e4 LT |
156 | } |
157 | ||
1da177e4 LT |
158 | |
159 | /* Destruction primitives. */ | |
160 | ||
aa1f731e | 161 | static void ipq_put(struct ipq *ipq) |
1da177e4 | 162 | { |
093ba729 | 163 | inet_frag_put(&ipq->q); |
1da177e4 LT |
164 | } |
165 | ||
166 | /* Kill ipq entry. It is not destroyed immediately, | |
167 | * because caller (and someone more) holds reference count. | |
168 | */ | |
169 | static void ipq_kill(struct ipq *ipq) | |
170 | { | |
093ba729 | 171 | inet_frag_kill(&ipq->q); |
1da177e4 LT |
172 | } |
173 | ||
5cf42280 AZ |
174 | static bool frag_expire_skip_icmp(u32 user) |
175 | { | |
176 | return user == IP_DEFRAG_AF_PACKET || | |
177 | ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN, | |
8bc04864 AZ |
178 | __IP_DEFRAG_CONNTRACK_IN_END) || |
179 | ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN, | |
180 | __IP_DEFRAG_CONNTRACK_BRIDGE_IN); | |
5cf42280 AZ |
181 | } |
182 | ||
1da177e4 LT |
183 | /* |
184 | * Oops, a fragment queue timed out. Kill it and send an ICMP reply. | |
185 | */ | |
78802011 | 186 | static void ip_expire(struct timer_list *t) |
1da177e4 | 187 | { |
78802011 | 188 | struct inet_frag_queue *frag = from_timer(frag, t, timer); |
399d1404 | 189 | const struct iphdr *iph; |
fa0f5273 | 190 | struct sk_buff *head = NULL; |
84a3aa00 | 191 | struct net *net; |
399d1404 ED |
192 | struct ipq *qp; |
193 | int err; | |
e521db9d | 194 | |
78802011 | 195 | qp = container_of(frag, struct ipq, q); |
84a3aa00 | 196 | net = container_of(qp->q.net, struct net, ipv4.frags); |
1da177e4 | 197 | |
ec4fbd64 | 198 | rcu_read_lock(); |
5ab11c98 | 199 | spin_lock(&qp->q.lock); |
1da177e4 | 200 | |
06aa8b8a | 201 | if (qp->q.flags & INET_FRAG_COMPLETE) |
1da177e4 LT |
202 | goto out; |
203 | ||
204 | ipq_kill(qp); | |
b45386ef | 205 | __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); |
399d1404 | 206 | __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT); |
2e404f63 | 207 | |
70837ffe | 208 | if (!(qp->q.flags & INET_FRAG_FIRST_IN)) |
399d1404 | 209 | goto out; |
2e404f63 | 210 | |
fa0f5273 PO |
211 | /* sk_buff::dev and sk_buff::rbnode are unionized. So we |
212 | * pull the head out of the tree in order to be able to | |
213 | * deal with head->dev. | |
214 | */ | |
215 | if (qp->q.fragments) { | |
216 | head = qp->q.fragments; | |
217 | qp->q.fragments = head->next; | |
218 | } else { | |
219 | head = skb_rb_first(&qp->q.rb_fragments); | |
220 | if (!head) | |
221 | goto out; | |
a4fd284a PO |
222 | if (FRAG_CB(head)->next_frag) |
223 | rb_replace_node(&head->rbnode, | |
224 | &FRAG_CB(head)->next_frag->rbnode, | |
225 | &qp->q.rb_fragments); | |
226 | else | |
227 | rb_erase(&head->rbnode, &qp->q.rb_fragments); | |
fa0f5273 PO |
228 | memset(&head->rbnode, 0, sizeof(head->rbnode)); |
229 | barrier(); | |
230 | } | |
231 | if (head == qp->q.fragments_tail) | |
232 | qp->q.fragments_tail = NULL; | |
233 | ||
234 | sub_frag_mem_limit(qp->q.net, head->truesize); | |
235 | ||
399d1404 ED |
236 | head->dev = dev_get_by_index_rcu(net, qp->iif); |
237 | if (!head->dev) | |
238 | goto out; | |
ec4fbd64 | 239 | |
e9017b55 | 240 | |
399d1404 ED |
241 | /* skb has no dst, perform route lookup again */ |
242 | iph = ip_hdr(head); | |
243 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, | |
c6cffba4 | 244 | iph->tos, head->dev); |
399d1404 ED |
245 | if (err) |
246 | goto out; | |
247 | ||
248 | /* Only an end host needs to send an ICMP | |
249 | * "Fragment Reassembly Timeout" message, per RFC792. | |
250 | */ | |
251 | if (frag_expire_skip_icmp(qp->q.key.v4.user) && | |
252 | (skb_rtable(head)->rt_type != RTN_LOCAL)) | |
253 | goto out; | |
254 | ||
1eec5d56 ED |
255 | spin_unlock(&qp->q.lock); |
256 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | |
1eec5d56 | 257 | goto out_rcu_unlock; |
399d1404 | 258 | |
1da177e4 | 259 | out: |
5ab11c98 | 260 | spin_unlock(&qp->q.lock); |
ec4fbd64 ED |
261 | out_rcu_unlock: |
262 | rcu_read_unlock(); | |
1d08962f | 263 | kfree_skb(head); |
4b6cb5d8 | 264 | ipq_put(qp); |
1da177e4 LT |
265 | } |
266 | ||
abd6523d PE |
267 | /* Find the correct entry in the "incomplete datagrams" queue for |
268 | * this IP datagram, and create new one, if nothing is found. | |
269 | */ | |
9972f134 DA |
270 | static struct ipq *ip_find(struct net *net, struct iphdr *iph, |
271 | u32 user, int vif) | |
1da177e4 | 272 | { |
648700f7 ED |
273 | struct frag_v4_compare_key key = { |
274 | .saddr = iph->saddr, | |
275 | .daddr = iph->daddr, | |
276 | .user = user, | |
277 | .vif = vif, | |
278 | .id = iph->id, | |
279 | .protocol = iph->protocol, | |
280 | }; | |
c6fda282 | 281 | struct inet_frag_queue *q; |
9a375803 | 282 | |
648700f7 | 283 | q = inet_frag_find(&net->ipv4.frags, &key); |
2d44ed22 | 284 | if (!q) |
5a3da1fe | 285 | return NULL; |
2d44ed22 | 286 | |
c6fda282 | 287 | return container_of(q, struct ipq, q); |
1da177e4 LT |
288 | } |
289 | ||
89cee8b1 | 290 | /* Is the fragment too far ahead to be part of ipq? */ |
aa1f731e | 291 | static int ip_frag_too_far(struct ipq *qp) |
89cee8b1 HX |
292 | { |
293 | struct inet_peer *peer = qp->peer; | |
0fbf4cb2 | 294 | unsigned int max = qp->q.net->max_dist; |
89cee8b1 HX |
295 | unsigned int start, end; |
296 | ||
297 | int rc; | |
298 | ||
299 | if (!peer || !max) | |
300 | return 0; | |
301 | ||
302 | start = qp->rid; | |
303 | end = atomic_inc_return(&peer->rid); | |
304 | qp->rid = end; | |
305 | ||
fa0f5273 | 306 | rc = qp->q.fragments_tail && (end - start) > max; |
89cee8b1 HX |
307 | |
308 | if (rc) { | |
7c73a6fa PE |
309 | struct net *net; |
310 | ||
311 | net = container_of(qp->q.net, struct net, ipv4.frags); | |
b45386ef | 312 | __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); |
89cee8b1 HX |
313 | } |
314 | ||
315 | return rc; | |
316 | } | |
317 | ||
318 | static int ip_frag_reinit(struct ipq *qp) | |
319 | { | |
d433673e | 320 | unsigned int sum_truesize = 0; |
89cee8b1 | 321 | |
b2fd5321 | 322 | if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { |
edcb6918 | 323 | refcount_inc(&qp->q.refcnt); |
89cee8b1 HX |
324 | return -ETIMEDOUT; |
325 | } | |
326 | ||
a4fd284a | 327 | sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments); |
0e60d245 | 328 | sub_frag_mem_limit(qp->q.net, sum_truesize); |
89cee8b1 | 329 | |
06aa8b8a | 330 | qp->q.flags = 0; |
5ab11c98 PE |
331 | qp->q.len = 0; |
332 | qp->q.meat = 0; | |
333 | qp->q.fragments = NULL; | |
fa0f5273 | 334 | qp->q.rb_fragments = RB_ROOT; |
d6bebca9 | 335 | qp->q.fragments_tail = NULL; |
a4fd284a | 336 | qp->q.last_run_head = NULL; |
89cee8b1 | 337 | qp->iif = 0; |
6623e3b2 | 338 | qp->ecn = 0; |
89cee8b1 HX |
339 | |
340 | return 0; | |
341 | } | |
342 | ||
1da177e4 | 343 | /* Add new segment to existing queue. */ |
1706d587 | 344 | static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
1da177e4 | 345 | { |
7969e5c4 | 346 | struct net *net = container_of(qp->q.net, struct net, ipv4.frags); |
fa0f5273 | 347 | struct rb_node **rbn, *parent; |
a4fd284a | 348 | struct sk_buff *skb1, *prev_tail; |
1706d587 | 349 | struct net_device *dev; |
d6b915e2 | 350 | unsigned int fragsize; |
1da177e4 LT |
351 | int flags, offset; |
352 | int ihl, end; | |
1706d587 | 353 | int err = -ENOENT; |
6623e3b2 | 354 | u8 ecn; |
1da177e4 | 355 | |
06aa8b8a | 356 | if (qp->q.flags & INET_FRAG_COMPLETE) |
1da177e4 LT |
357 | goto err; |
358 | ||
89cee8b1 | 359 | if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && |
1706d587 HX |
360 | unlikely(ip_frag_too_far(qp)) && |
361 | unlikely(err = ip_frag_reinit(qp))) { | |
89cee8b1 HX |
362 | ipq_kill(qp); |
363 | goto err; | |
364 | } | |
365 | ||
6623e3b2 | 366 | ecn = ip4_frag_ecn(ip_hdr(skb)->tos); |
eddc9ec5 | 367 | offset = ntohs(ip_hdr(skb)->frag_off); |
1da177e4 LT |
368 | flags = offset & ~IP_OFFSET; |
369 | offset &= IP_OFFSET; | |
370 | offset <<= 3; /* offset is in 8-byte chunks */ | |
c9bdd4b5 | 371 | ihl = ip_hdrlen(skb); |
1da177e4 LT |
372 | |
373 | /* Determine the position of this fragment. */ | |
0848f642 | 374 | end = offset + skb->len - skb_network_offset(skb) - ihl; |
1706d587 | 375 | err = -EINVAL; |
1da177e4 LT |
376 | |
377 | /* Is this the final fragment? */ | |
378 | if ((flags & IP_MF) == 0) { | |
379 | /* If we already have some bits beyond end | |
42b2aa86 | 380 | * or have different end, the segment is corrupted. |
1da177e4 | 381 | */ |
5ab11c98 | 382 | if (end < qp->q.len || |
06aa8b8a | 383 | ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) |
0ff89efb | 384 | goto discard_qp; |
06aa8b8a | 385 | qp->q.flags |= INET_FRAG_LAST_IN; |
5ab11c98 | 386 | qp->q.len = end; |
1da177e4 LT |
387 | } else { |
388 | if (end&7) { | |
389 | end &= ~7; | |
390 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | |
391 | skb->ip_summed = CHECKSUM_NONE; | |
392 | } | |
5ab11c98 | 393 | if (end > qp->q.len) { |
1da177e4 | 394 | /* Some bits beyond end -> corruption. */ |
06aa8b8a | 395 | if (qp->q.flags & INET_FRAG_LAST_IN) |
0ff89efb | 396 | goto discard_qp; |
5ab11c98 | 397 | qp->q.len = end; |
1da177e4 LT |
398 | } |
399 | } | |
400 | if (end == offset) | |
0ff89efb | 401 | goto discard_qp; |
1da177e4 | 402 | |
1706d587 | 403 | err = -ENOMEM; |
0848f642 | 404 | if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) |
0ff89efb | 405 | goto discard_qp; |
1706d587 HX |
406 | |
407 | err = pskb_trim_rcsum(skb, end - offset); | |
408 | if (err) | |
0ff89efb | 409 | goto discard_qp; |
1da177e4 | 410 | |
fa0f5273 PO |
411 | /* Note : skb->rbnode and skb->dev share the same location. */ |
412 | dev = skb->dev; | |
413 | /* Makes sure compiler wont do silly aliasing games */ | |
414 | barrier(); | |
1da177e4 | 415 | |
7969e5c4 PO |
416 | /* RFC5722, Section 4, amended by Errata ID : 3089 |
417 | * When reassembling an IPv6 datagram, if | |
418 | * one or more its constituent fragments is determined to be an | |
419 | * overlapping fragment, the entire datagram (and any constituent | |
420 | * fragments) MUST be silently discarded. | |
421 | * | |
fa0f5273 | 422 | * We do the same here for IPv4 (and increment an snmp counter). |
1da177e4 | 423 | */ |
1da177e4 | 424 | |
0ff89efb | 425 | err = -EINVAL; |
fa0f5273 | 426 | /* Find out where to put this fragment. */ |
a4fd284a PO |
427 | prev_tail = qp->q.fragments_tail; |
428 | if (!prev_tail) | |
429 | ip4_frag_create_run(&qp->q, skb); /* First fragment. */ | |
430 | else if (prev_tail->ip_defrag_offset + prev_tail->len < end) { | |
431 | /* This is the common case: skb goes to the end. */ | |
fa0f5273 | 432 | /* Detect and discard overlaps. */ |
a4fd284a | 433 | if (offset < prev_tail->ip_defrag_offset + prev_tail->len) |
0ff89efb | 434 | goto overlap; |
a4fd284a PO |
435 | if (offset == prev_tail->ip_defrag_offset + prev_tail->len) |
436 | ip4_frag_append_to_last_run(&qp->q, skb); | |
437 | else | |
438 | ip4_frag_create_run(&qp->q, skb); | |
fa0f5273 | 439 | } else { |
a4fd284a PO |
440 | /* Binary search. Note that skb can become the first fragment, |
441 | * but not the last (covered above). | |
442 | */ | |
fa0f5273 PO |
443 | rbn = &qp->q.rb_fragments.rb_node; |
444 | do { | |
445 | parent = *rbn; | |
446 | skb1 = rb_to_skb(parent); | |
447 | if (end <= skb1->ip_defrag_offset) | |
448 | rbn = &parent->rb_left; | |
a4fd284a PO |
449 | else if (offset >= skb1->ip_defrag_offset + |
450 | FRAG_CB(skb1)->frag_run_len) | |
fa0f5273 PO |
451 | rbn = &parent->rb_right; |
452 | else /* Found an overlap with skb1. */ | |
0ff89efb | 453 | goto overlap; |
fa0f5273 PO |
454 | } while (*rbn); |
455 | /* Here we have parent properly set, and rbn pointing to | |
a4fd284a PO |
456 | * one of its NULL left/right children. Insert skb. |
457 | */ | |
458 | ip4_frag_init_run(skb); | |
fa0f5273 | 459 | rb_link_node(&skb->rbnode, parent, rbn); |
a4fd284a | 460 | rb_insert_color(&skb->rbnode, &qp->q.rb_fragments); |
fa0f5273 | 461 | } |
1da177e4 | 462 | |
bf663371 ED |
463 | if (dev) |
464 | qp->iif = dev->ifindex; | |
bf663371 | 465 | skb->ip_defrag_offset = offset; |
1da177e4 | 466 | |
5ab11c98 PE |
467 | qp->q.stamp = skb->tstamp; |
468 | qp->q.meat += skb->len; | |
6623e3b2 | 469 | qp->ecn |= ecn; |
0e60d245 | 470 | add_frag_mem_limit(qp->q.net, skb->truesize); |
1da177e4 | 471 | if (offset == 0) |
06aa8b8a | 472 | qp->q.flags |= INET_FRAG_FIRST_IN; |
1da177e4 | 473 | |
d6b915e2 FW |
474 | fragsize = skb->len + ihl; |
475 | ||
476 | if (fragsize > qp->q.max_size) | |
477 | qp->q.max_size = fragsize; | |
478 | ||
5f2d04f1 | 479 | if (ip_hdr(skb)->frag_off & htons(IP_DF) && |
d6b915e2 FW |
480 | fragsize > qp->max_df_size) |
481 | qp->max_df_size = fragsize; | |
5f2d04f1 | 482 | |
06aa8b8a | 483 | if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
97599dc7 ED |
484 | qp->q.meat == qp->q.len) { |
485 | unsigned long orefdst = skb->_skb_refdst; | |
1706d587 | 486 | |
97599dc7 | 487 | skb->_skb_refdst = 0UL; |
a4fd284a | 488 | err = ip_frag_reasm(qp, skb, prev_tail, dev); |
97599dc7 | 489 | skb->_skb_refdst = orefdst; |
0ff89efb PO |
490 | if (err) |
491 | inet_frag_kill(&qp->q); | |
97599dc7 ED |
492 | return err; |
493 | } | |
494 | ||
495 | skb_dst_drop(skb); | |
1706d587 | 496 | return -EINPROGRESS; |
1da177e4 | 497 | |
0ff89efb PO |
498 | overlap: |
499 | __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS); | |
7969e5c4 PO |
500 | discard_qp: |
501 | inet_frag_kill(&qp->q); | |
1da177e4 LT |
502 | err: |
503 | kfree_skb(skb); | |
1706d587 | 504 | return err; |
1da177e4 LT |
505 | } |
506 | ||
1da177e4 | 507 | /* Build a new IP datagram from all its fragments. */ |
fa0f5273 | 508 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb, |
a4fd284a | 509 | struct sk_buff *prev_tail, struct net_device *dev) |
1da177e4 | 510 | { |
2bad35b7 | 511 | struct net *net = container_of(qp->q.net, struct net, ipv4.frags); |
1da177e4 | 512 | struct iphdr *iph; |
fa0f5273 PO |
513 | struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments); |
514 | struct sk_buff **nextp; /* To build frag_list. */ | |
515 | struct rb_node *rbn; | |
1da177e4 LT |
516 | int len; |
517 | int ihlen; | |
1706d587 | 518 | int err; |
5173cc05 | 519 | u8 ecn; |
1da177e4 LT |
520 | |
521 | ipq_kill(qp); | |
522 | ||
be991971 | 523 | ecn = ip_frag_ecn_table[qp->ecn]; |
5173cc05 ED |
524 | if (unlikely(ecn == 0xff)) { |
525 | err = -EINVAL; | |
526 | goto out_fail; | |
527 | } | |
1706d587 | 528 | /* Make the one we just received the head. */ |
fa0f5273 PO |
529 | if (head != skb) { |
530 | fp = skb_clone(skb, GFP_ATOMIC); | |
1706d587 HX |
531 | if (!fp) |
532 | goto out_nomem; | |
a4fd284a PO |
533 | FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag; |
534 | if (RB_EMPTY_NODE(&skb->rbnode)) | |
535 | FRAG_CB(prev_tail)->next_frag = fp; | |
536 | else | |
537 | rb_replace_node(&skb->rbnode, &fp->rbnode, | |
538 | &qp->q.rb_fragments); | |
fa0f5273 | 539 | if (qp->q.fragments_tail == skb) |
d6bebca9 | 540 | qp->q.fragments_tail = fp; |
fa0f5273 | 541 | skb_morph(skb, head); |
a4fd284a | 542 | FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag; |
fa0f5273 PO |
543 | rb_replace_node(&head->rbnode, &skb->rbnode, |
544 | &qp->q.rb_fragments); | |
545 | consume_skb(head); | |
546 | head = skb; | |
1706d587 HX |
547 | } |
548 | ||
bf663371 | 549 | WARN_ON(head->ip_defrag_offset != 0); |
1da177e4 LT |
550 | |
551 | /* Allocate a new buffer for the datagram. */ | |
c9bdd4b5 | 552 | ihlen = ip_hdrlen(head); |
5ab11c98 | 553 | len = ihlen + qp->q.len; |
1da177e4 | 554 | |
1706d587 | 555 | err = -E2BIG; |
132adf54 | 556 | if (len > 65535) |
1da177e4 LT |
557 | goto out_oversize; |
558 | ||
559 | /* Head of list must not be cloned. */ | |
14bbd6a5 | 560 | if (skb_unclone(head, GFP_ATOMIC)) |
1da177e4 LT |
561 | goto out_nomem; |
562 | ||
563 | /* If the first fragment is fragmented itself, we split | |
564 | * it to two chunks: the first with data and paged part | |
565 | * and the second, holding only fragments. */ | |
21dc3301 | 566 | if (skb_has_frag_list(head)) { |
1da177e4 LT |
567 | struct sk_buff *clone; |
568 | int i, plen = 0; | |
569 | ||
51456b29 IM |
570 | clone = alloc_skb(0, GFP_ATOMIC); |
571 | if (!clone) | |
1da177e4 | 572 | goto out_nomem; |
1da177e4 | 573 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; |
d7fcf1a5 | 574 | skb_frag_list_init(head); |
9e903e08 ED |
575 | for (i = 0; i < skb_shinfo(head)->nr_frags; i++) |
576 | plen += skb_frag_size(&skb_shinfo(head)->frags[i]); | |
1da177e4 | 577 | clone->len = clone->data_len = head->data_len - plen; |
a4fd284a | 578 | head->truesize += clone->truesize; |
1da177e4 LT |
579 | clone->csum = 0; |
580 | clone->ip_summed = head->ip_summed; | |
0e60d245 | 581 | add_frag_mem_limit(qp->q.net, clone->truesize); |
fa0f5273 PO |
582 | skb_shinfo(head)->frag_list = clone; |
583 | nextp = &clone->next; | |
584 | } else { | |
585 | nextp = &skb_shinfo(head)->frag_list; | |
1da177e4 LT |
586 | } |
587 | ||
d56f90a7 | 588 | skb_push(head, head->data - skb_network_header(head)); |
1da177e4 | 589 | |
fa0f5273 | 590 | /* Traverse the tree in order, to build frag_list. */ |
a4fd284a | 591 | fp = FRAG_CB(head)->next_frag; |
fa0f5273 PO |
592 | rbn = rb_next(&head->rbnode); |
593 | rb_erase(&head->rbnode, &qp->q.rb_fragments); | |
a4fd284a PO |
594 | while (rbn || fp) { |
595 | /* fp points to the next sk_buff in the current run; | |
596 | * rbn points to the next run. | |
597 | */ | |
598 | /* Go through the current run. */ | |
599 | while (fp) { | |
600 | *nextp = fp; | |
601 | nextp = &fp->next; | |
602 | fp->prev = NULL; | |
603 | memset(&fp->rbnode, 0, sizeof(fp->rbnode)); | |
5d407b07 | 604 | fp->sk = NULL; |
a4fd284a PO |
605 | head->data_len += fp->len; |
606 | head->len += fp->len; | |
607 | if (head->ip_summed != fp->ip_summed) | |
608 | head->ip_summed = CHECKSUM_NONE; | |
609 | else if (head->ip_summed == CHECKSUM_COMPLETE) | |
610 | head->csum = csum_add(head->csum, fp->csum); | |
611 | head->truesize += fp->truesize; | |
612 | fp = FRAG_CB(fp)->next_frag; | |
613 | } | |
614 | /* Move to the next run. */ | |
615 | if (rbn) { | |
616 | struct rb_node *rbnext = rb_next(rbn); | |
617 | ||
618 | fp = rb_to_skb(rbn); | |
619 | rb_erase(rbn, &qp->q.rb_fragments); | |
620 | rbn = rbnext; | |
621 | } | |
1da177e4 | 622 | } |
5510b3c2 | 623 | sub_frag_mem_limit(qp->q.net, head->truesize); |
1da177e4 | 624 | |
fa0f5273 | 625 | *nextp = NULL; |
a8305bff | 626 | skb_mark_not_on_list(head); |
fa0f5273 | 627 | head->prev = NULL; |
1da177e4 | 628 | head->dev = dev; |
5ab11c98 | 629 | head->tstamp = qp->q.stamp; |
d6b915e2 | 630 | IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size); |
1da177e4 | 631 | |
eddc9ec5 | 632 | iph = ip_hdr(head); |
1da177e4 | 633 | iph->tot_len = htons(len); |
5173cc05 | 634 | iph->tos |= ecn; |
d6b915e2 FW |
635 | |
636 | /* When we set IP_DF on a refragmented skb we must also force a | |
637 | * call to ip_fragment to avoid forwarding a DF-skb of size s while | |
638 | * original sender only sent fragments of size f (where f < s). | |
639 | * | |
640 | * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest | |
641 | * frag seen to avoid sending tiny DF-fragments in case skb was built | |
642 | * from one very small df-fragment and one large non-df frag. | |
643 | */ | |
644 | if (qp->max_df_size == qp->q.max_size) { | |
645 | IPCB(head)->flags |= IPSKB_FRAG_PMTU; | |
646 | iph->frag_off = htons(IP_DF); | |
647 | } else { | |
648 | iph->frag_off = 0; | |
649 | } | |
650 | ||
0848f642 EHJ |
651 | ip_send_check(iph); |
652 | ||
b45386ef | 653 | __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS); |
5ab11c98 | 654 | qp->q.fragments = NULL; |
fa0f5273 | 655 | qp->q.rb_fragments = RB_ROOT; |
d6bebca9 | 656 | qp->q.fragments_tail = NULL; |
a4fd284a | 657 | qp->q.last_run_head = NULL; |
1706d587 | 658 | return 0; |
1da177e4 LT |
659 | |
660 | out_nomem: | |
ba7a46f1 | 661 | net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); |
45542479 | 662 | err = -ENOMEM; |
1da177e4 LT |
663 | goto out_fail; |
664 | out_oversize: | |
648700f7 | 665 | net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr); |
1da177e4 | 666 | out_fail: |
b45386ef | 667 | __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); |
1706d587 | 668 | return err; |
1da177e4 LT |
669 | } |
670 | ||
671 | /* Process an incoming IP datagram fragment. */ | |
19bcf9f2 | 672 | int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) |
1da177e4 | 673 | { |
9972f134 | 674 | struct net_device *dev = skb->dev ? : skb_dst(skb)->dev; |
385add90 | 675 | int vif = l3mdev_master_ifindex_rcu(dev); |
1da177e4 | 676 | struct ipq *qp; |
e905a9ed | 677 | |
b45386ef | 678 | __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS); |
8282f274 | 679 | skb_orphan(skb); |
1da177e4 | 680 | |
1da177e4 | 681 | /* Lookup (or create) queue header */ |
9972f134 | 682 | qp = ip_find(net, ip_hdr(skb), user, vif); |
00db4124 | 683 | if (qp) { |
1706d587 | 684 | int ret; |
1da177e4 | 685 | |
5ab11c98 | 686 | spin_lock(&qp->q.lock); |
1da177e4 | 687 | |
1706d587 | 688 | ret = ip_frag_queue(qp, skb); |
1da177e4 | 689 | |
5ab11c98 | 690 | spin_unlock(&qp->q.lock); |
4b6cb5d8 | 691 | ipq_put(qp); |
776c729e | 692 | return ret; |
1da177e4 LT |
693 | } |
694 | ||
b45386ef | 695 | __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); |
1da177e4 | 696 | kfree_skb(skb); |
776c729e | 697 | return -ENOMEM; |
1da177e4 | 698 | } |
4bc2f18b | 699 | EXPORT_SYMBOL(ip_defrag); |
1da177e4 | 700 | |
19bcf9f2 | 701 | struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user) |
bc416d97 | 702 | { |
1bf3751e | 703 | struct iphdr iph; |
3e32e733 | 704 | int netoff; |
bc416d97 ED |
705 | u32 len; |
706 | ||
707 | if (skb->protocol != htons(ETH_P_IP)) | |
708 | return skb; | |
709 | ||
3e32e733 AD |
710 | netoff = skb_network_offset(skb); |
711 | ||
712 | if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) | |
bc416d97 ED |
713 | return skb; |
714 | ||
1bf3751e | 715 | if (iph.ihl < 5 || iph.version != 4) |
bc416d97 | 716 | return skb; |
1bf3751e JB |
717 | |
718 | len = ntohs(iph.tot_len); | |
3e32e733 | 719 | if (skb->len < netoff + len || len < (iph.ihl * 4)) |
bc416d97 ED |
720 | return skb; |
721 | ||
1bf3751e | 722 | if (ip_is_fragment(&iph)) { |
bc416d97 ED |
723 | skb = skb_share_check(skb, GFP_ATOMIC); |
724 | if (skb) { | |
7de414a9 CW |
725 | if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) { |
726 | kfree_skb(skb); | |
727 | return NULL; | |
728 | } | |
729 | if (pskb_trim_rcsum(skb, netoff + len)) { | |
730 | kfree_skb(skb); | |
731 | return NULL; | |
732 | } | |
bc416d97 | 733 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); |
19bcf9f2 | 734 | if (ip_defrag(net, skb, user)) |
bc416d97 | 735 | return NULL; |
7539fadc | 736 | skb_clear_hash(skb); |
bc416d97 ED |
737 | } |
738 | } | |
739 | return skb; | |
740 | } | |
741 | EXPORT_SYMBOL(ip_check_defrag); | |
742 | ||
353c9cb3 PO |
743 | unsigned int inet_frag_rbtree_purge(struct rb_root *root) |
744 | { | |
745 | struct rb_node *p = rb_first(root); | |
746 | unsigned int sum = 0; | |
747 | ||
748 | while (p) { | |
749 | struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); | |
750 | ||
751 | p = rb_next(p); | |
752 | rb_erase(&skb->rbnode, root); | |
753 | while (skb) { | |
754 | struct sk_buff *next = FRAG_CB(skb)->next_frag; | |
755 | ||
756 | sum += skb->truesize; | |
757 | kfree_skb(skb); | |
758 | skb = next; | |
759 | } | |
760 | } | |
761 | return sum; | |
762 | } | |
763 | EXPORT_SYMBOL(inet_frag_rbtree_purge); | |
764 | ||
8d8354d2 | 765 | #ifdef CONFIG_SYSCTL |
3d234012 | 766 | static int dist_min; |
8d8354d2 | 767 | |
0a64b4b8 | 768 | static struct ctl_table ip4_frags_ns_ctl_table[] = { |
8d8354d2 | 769 | { |
8d8354d2 | 770 | .procname = "ipfrag_high_thresh", |
e31e0bdc | 771 | .data = &init_net.ipv4.frags.high_thresh, |
3e67f106 | 772 | .maxlen = sizeof(unsigned long), |
8d8354d2 | 773 | .mode = 0644, |
3e67f106 | 774 | .proc_handler = proc_doulongvec_minmax, |
1bab4c75 | 775 | .extra1 = &init_net.ipv4.frags.low_thresh |
8d8354d2 PE |
776 | }, |
777 | { | |
8d8354d2 | 778 | .procname = "ipfrag_low_thresh", |
e31e0bdc | 779 | .data = &init_net.ipv4.frags.low_thresh, |
3e67f106 | 780 | .maxlen = sizeof(unsigned long), |
8d8354d2 | 781 | .mode = 0644, |
3e67f106 | 782 | .proc_handler = proc_doulongvec_minmax, |
1bab4c75 | 783 | .extra2 = &init_net.ipv4.frags.high_thresh |
8d8354d2 PE |
784 | }, |
785 | { | |
8d8354d2 | 786 | .procname = "ipfrag_time", |
b2fd5321 | 787 | .data = &init_net.ipv4.frags.timeout, |
8d8354d2 PE |
788 | .maxlen = sizeof(int), |
789 | .mode = 0644, | |
6d9f239a | 790 | .proc_handler = proc_dointvec_jiffies, |
8d8354d2 | 791 | }, |
0fbf4cb2 NB |
792 | { |
793 | .procname = "ipfrag_max_dist", | |
794 | .data = &init_net.ipv4.frags.max_dist, | |
795 | .maxlen = sizeof(int), | |
796 | .mode = 0644, | |
797 | .proc_handler = proc_dointvec_minmax, | |
3d234012 | 798 | .extra1 = &dist_min, |
0fbf4cb2 | 799 | }, |
7d291ebb PE |
800 | { } |
801 | }; | |
802 | ||
e3a57d18 FW |
803 | /* secret interval has been deprecated */ |
804 | static int ip4_frags_secret_interval_unused; | |
7d291ebb | 805 | static struct ctl_table ip4_frags_ctl_table[] = { |
8d8354d2 | 806 | { |
8d8354d2 | 807 | .procname = "ipfrag_secret_interval", |
e3a57d18 | 808 | .data = &ip4_frags_secret_interval_unused, |
8d8354d2 PE |
809 | .maxlen = sizeof(int), |
810 | .mode = 0644, | |
6d9f239a | 811 | .proc_handler = proc_dointvec_jiffies, |
8d8354d2 | 812 | }, |
8d8354d2 PE |
813 | { } |
814 | }; | |
815 | ||
2c8c1e72 | 816 | static int __net_init ip4_frags_ns_ctl_register(struct net *net) |
8d8354d2 | 817 | { |
e4a2d5c2 | 818 | struct ctl_table *table; |
8d8354d2 PE |
819 | struct ctl_table_header *hdr; |
820 | ||
0a64b4b8 | 821 | table = ip4_frags_ns_ctl_table; |
09ad9bc7 | 822 | if (!net_eq(net, &init_net)) { |
0a64b4b8 | 823 | table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); |
51456b29 | 824 | if (!table) |
e4a2d5c2 PE |
825 | goto err_alloc; |
826 | ||
e31e0bdc | 827 | table[0].data = &net->ipv4.frags.high_thresh; |
1bab4c75 | 828 | table[0].extra1 = &net->ipv4.frags.low_thresh; |
e31e0bdc | 829 | table[1].data = &net->ipv4.frags.low_thresh; |
1bab4c75 | 830 | table[1].extra2 = &net->ipv4.frags.high_thresh; |
b2fd5321 | 831 | table[2].data = &net->ipv4.frags.timeout; |
0fbf4cb2 | 832 | table[3].data = &net->ipv4.frags.max_dist; |
e4a2d5c2 PE |
833 | } |
834 | ||
ec8f23ce | 835 | hdr = register_net_sysctl(net, "net/ipv4", table); |
51456b29 | 836 | if (!hdr) |
e4a2d5c2 PE |
837 | goto err_reg; |
838 | ||
839 | net->ipv4.frags_hdr = hdr; | |
840 | return 0; | |
841 | ||
842 | err_reg: | |
09ad9bc7 | 843 | if (!net_eq(net, &init_net)) |
e4a2d5c2 PE |
844 | kfree(table); |
845 | err_alloc: | |
846 | return -ENOMEM; | |
847 | } | |
848 | ||
2c8c1e72 | 849 | static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) |
e4a2d5c2 PE |
850 | { |
851 | struct ctl_table *table; | |
852 | ||
853 | table = net->ipv4.frags_hdr->ctl_table_arg; | |
854 | unregister_net_sysctl_table(net->ipv4.frags_hdr); | |
855 | kfree(table); | |
8d8354d2 | 856 | } |
7d291ebb | 857 | |
57a02c39 | 858 | static void __init ip4_frags_ctl_register(void) |
7d291ebb | 859 | { |
43444757 | 860 | register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); |
7d291ebb | 861 | } |
8d8354d2 | 862 | #else |
aa1f731e | 863 | static int ip4_frags_ns_ctl_register(struct net *net) |
8d8354d2 PE |
864 | { |
865 | return 0; | |
866 | } | |
e4a2d5c2 | 867 | |
aa1f731e | 868 | static void ip4_frags_ns_ctl_unregister(struct net *net) |
e4a2d5c2 PE |
869 | { |
870 | } | |
7d291ebb | 871 | |
aa1f731e | 872 | static void __init ip4_frags_ctl_register(void) |
7d291ebb PE |
873 | { |
874 | } | |
8d8354d2 PE |
875 | #endif |
876 | ||
2c8c1e72 | 877 | static int __net_init ipv4_frags_init_net(struct net *net) |
8d8354d2 | 878 | { |
787bea77 ED |
879 | int res; |
880 | ||
c2a93660 JDB |
881 | /* Fragment cache limits. |
882 | * | |
883 | * The fragment memory accounting code, (tries to) account for | |
884 | * the real memory usage, by measuring both the size of frag | |
885 | * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue)) | |
886 | * and the SKB's truesize. | |
887 | * | |
888 | * A 64K fragment consumes 129736 bytes (44*2944)+200 | |
889 | * (1500 truesize == 2944, sizeof(struct ipq) == 200) | |
890 | * | |
891 | * We will commit 4MB at one time. Should we cross that limit | |
892 | * we will prune down to 3MB, making room for approx 8 big 64K | |
893 | * fragments 8x128k. | |
e31e0bdc | 894 | */ |
c2a93660 JDB |
895 | net->ipv4.frags.high_thresh = 4 * 1024 * 1024; |
896 | net->ipv4.frags.low_thresh = 3 * 1024 * 1024; | |
b2fd5321 PE |
897 | /* |
898 | * Important NOTE! Fragment queue must be destroyed before MSL expires. | |
899 | * RFC791 is wrong proposing to prolongate timer each fragment arrival | |
900 | * by TTL. | |
901 | */ | |
902 | net->ipv4.frags.timeout = IP_FRAG_TIME; | |
903 | ||
0fbf4cb2 | 904 | net->ipv4.frags.max_dist = 64; |
093ba729 | 905 | net->ipv4.frags.f = &ip4_frags; |
0fbf4cb2 | 906 | |
787bea77 ED |
907 | res = inet_frags_init_net(&net->ipv4.frags); |
908 | if (res < 0) | |
909 | return res; | |
910 | res = ip4_frags_ns_ctl_register(net); | |
911 | if (res < 0) | |
093ba729 | 912 | inet_frags_exit_net(&net->ipv4.frags); |
787bea77 | 913 | return res; |
8d8354d2 PE |
914 | } |
915 | ||
2c8c1e72 | 916 | static void __net_exit ipv4_frags_exit_net(struct net *net) |
81566e83 | 917 | { |
0a64b4b8 | 918 | ip4_frags_ns_ctl_unregister(net); |
093ba729 | 919 | inet_frags_exit_net(&net->ipv4.frags); |
81566e83 PE |
920 | } |
921 | ||
922 | static struct pernet_operations ip4_frags_ops = { | |
923 | .init = ipv4_frags_init_net, | |
924 | .exit = ipv4_frags_exit_net, | |
925 | }; | |
926 | ||
648700f7 ED |
927 | |
928 | static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed) | |
929 | { | |
930 | return jhash2(data, | |
931 | sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); | |
932 | } | |
933 | ||
934 | static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed) | |
935 | { | |
936 | const struct inet_frag_queue *fq = data; | |
937 | ||
938 | return jhash2((const u32 *)&fq->key.v4, | |
939 | sizeof(struct frag_v4_compare_key) / sizeof(u32), seed); | |
940 | } | |
941 | ||
942 | static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr) | |
943 | { | |
944 | const struct frag_v4_compare_key *key = arg->key; | |
945 | const struct inet_frag_queue *fq = ptr; | |
946 | ||
947 | return !!memcmp(&fq->key, key, sizeof(*key)); | |
948 | } | |
949 | ||
950 | static const struct rhashtable_params ip4_rhash_params = { | |
951 | .head_offset = offsetof(struct inet_frag_queue, node), | |
952 | .key_offset = offsetof(struct inet_frag_queue, key), | |
953 | .key_len = sizeof(struct frag_v4_compare_key), | |
954 | .hashfn = ip4_key_hashfn, | |
955 | .obj_hashfn = ip4_obj_hashfn, | |
956 | .obj_cmpfn = ip4_obj_cmpfn, | |
957 | .automatic_shrinking = true, | |
958 | }; | |
959 | ||
b7aa0bf7 | 960 | void __init ipfrag_init(void) |
1da177e4 | 961 | { |
c6fda282 | 962 | ip4_frags.constructor = ip4_frag_init; |
1e4b8287 | 963 | ip4_frags.destructor = ip4_frag_free; |
1e4b8287 | 964 | ip4_frags.qsize = sizeof(struct ipq); |
e521db9d | 965 | ip4_frags.frag_expire = ip_expire; |
d4ad4d22 | 966 | ip4_frags.frags_cache_name = ip_frag_cache_name; |
648700f7 | 967 | ip4_frags.rhash_params = ip4_rhash_params; |
d4ad4d22 NA |
968 | if (inet_frags_init(&ip4_frags)) |
969 | panic("IP: failed to allocate ip4_frags cache\n"); | |
483a6e4f ED |
970 | ip4_frags_ctl_register(); |
971 | register_pernet_subsys(&ip4_frags_ops); | |
1da177e4 | 972 | } |