]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ieee802154/reassembly.c
inet: frag: constify match, hashfn and constructor arguments
[mirror_ubuntu-artful-kernel.git] / net / ieee802154 / reassembly.c
1 /* 6LoWPAN fragment reassembly
2 *
3 *
4 * Authors:
5 * Alexander Aring <aar@pengutronix.de>
6 *
7 * Based on: net/ipv6/reassembly.c
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #define pr_fmt(fmt) "6LoWPAN: " fmt
16
17 #include <linux/net.h>
18 #include <linux/list.h>
19 #include <linux/netdevice.h>
20 #include <linux/random.h>
21 #include <linux/jhash.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
25
26 #include <net/ieee802154_netdev.h>
27 #include <net/6lowpan.h>
28 #include <net/ipv6.h>
29 #include <net/inet_frag.h>
30
31 #include "reassembly.h"
32
33 struct lowpan_frag_info {
34 __be16 d_tag;
35 u16 d_size;
36 u8 d_offset;
37 };
38
39 static struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
40 {
41 return (struct lowpan_frag_info *)skb->cb;
42 }
43
44 static struct inet_frags lowpan_frags;
45
46 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
47 struct sk_buff *prev, struct net_device *dev);
48
49 static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size,
50 const struct ieee802154_addr *saddr,
51 const struct ieee802154_addr *daddr)
52 {
53 u32 c;
54
55 net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
56 c = jhash_3words(ieee802154_addr_hash(saddr),
57 ieee802154_addr_hash(daddr),
58 (__force u32)(tag + (d_size << 16)),
59 lowpan_frags.rnd);
60
61 return c & (INETFRAGS_HASHSZ - 1);
62 }
63
64 static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
65 {
66 const struct lowpan_frag_queue *fq;
67
68 fq = container_of(q, struct lowpan_frag_queue, q);
69 return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
70 }
71
72 static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
73 {
74 const struct lowpan_frag_queue *fq;
75 const struct lowpan_create_arg *arg = a;
76
77 fq = container_of(q, struct lowpan_frag_queue, q);
78 return fq->tag == arg->tag && fq->d_size == arg->d_size &&
79 ieee802154_addr_equal(&fq->saddr, arg->src) &&
80 ieee802154_addr_equal(&fq->daddr, arg->dst);
81 }
82
83 static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
84 {
85 const struct lowpan_create_arg *arg = a;
86 struct lowpan_frag_queue *fq;
87
88 fq = container_of(q, struct lowpan_frag_queue, q);
89
90 fq->tag = arg->tag;
91 fq->d_size = arg->d_size;
92 fq->saddr = *arg->src;
93 fq->daddr = *arg->dst;
94 }
95
96 static void lowpan_frag_expire(unsigned long data)
97 {
98 struct frag_queue *fq;
99 struct net *net;
100
101 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
102 net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
103
104 spin_lock(&fq->q.lock);
105
106 if (fq->q.last_in & INET_FRAG_COMPLETE)
107 goto out;
108
109 inet_frag_kill(&fq->q, &lowpan_frags);
110 out:
111 spin_unlock(&fq->q.lock);
112 inet_frag_put(&fq->q, &lowpan_frags);
113 }
114
115 static inline struct lowpan_frag_queue *
116 fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
117 const struct ieee802154_addr *src,
118 const struct ieee802154_addr *dst)
119 {
120 struct inet_frag_queue *q;
121 struct lowpan_create_arg arg;
122 unsigned int hash;
123 struct netns_ieee802154_lowpan *ieee802154_lowpan =
124 net_ieee802154_lowpan(net);
125
126 arg.tag = frag_info->d_tag;
127 arg.d_size = frag_info->d_size;
128 arg.src = src;
129 arg.dst = dst;
130
131 read_lock(&lowpan_frags.lock);
132 hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
133
134 q = inet_frag_find(&ieee802154_lowpan->frags,
135 &lowpan_frags, &arg, hash);
136 if (IS_ERR_OR_NULL(q)) {
137 inet_frag_maybe_warn_overflow(q, pr_fmt());
138 return NULL;
139 }
140 return container_of(q, struct lowpan_frag_queue, q);
141 }
142
143 static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
144 struct sk_buff *skb, const u8 frag_type)
145 {
146 struct sk_buff *prev, *next;
147 struct net_device *dev;
148 int end, offset;
149
150 if (fq->q.last_in & INET_FRAG_COMPLETE)
151 goto err;
152
153 offset = lowpan_cb(skb)->d_offset << 3;
154 end = lowpan_cb(skb)->d_size;
155
156 /* Is this the final fragment? */
157 if (offset + skb->len == end) {
158 /* If we already have some bits beyond end
159 * or have different end, the segment is corrupted.
160 */
161 if (end < fq->q.len ||
162 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
163 goto err;
164 fq->q.last_in |= INET_FRAG_LAST_IN;
165 fq->q.len = end;
166 } else {
167 if (end > fq->q.len) {
168 /* Some bits beyond end -> corruption. */
169 if (fq->q.last_in & INET_FRAG_LAST_IN)
170 goto err;
171 fq->q.len = end;
172 }
173 }
174
175 /* Find out which fragments are in front and at the back of us
176 * in the chain of fragments so far. We must know where to put
177 * this fragment, right?
178 */
179 prev = fq->q.fragments_tail;
180 if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) {
181 next = NULL;
182 goto found;
183 }
184 prev = NULL;
185 for (next = fq->q.fragments; next != NULL; next = next->next) {
186 if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset)
187 break; /* bingo! */
188 prev = next;
189 }
190
191 found:
192 /* Insert this fragment in the chain of fragments. */
193 skb->next = next;
194 if (!next)
195 fq->q.fragments_tail = skb;
196 if (prev)
197 prev->next = skb;
198 else
199 fq->q.fragments = skb;
200
201 dev = skb->dev;
202 if (dev)
203 skb->dev = NULL;
204
205 fq->q.stamp = skb->tstamp;
206 if (frag_type == LOWPAN_DISPATCH_FRAG1) {
207 /* Calculate uncomp. 6lowpan header to estimate full size */
208 fq->q.meat += lowpan_uncompress_size(skb, NULL);
209 fq->q.last_in |= INET_FRAG_FIRST_IN;
210 } else {
211 fq->q.meat += skb->len;
212 }
213 add_frag_mem_limit(&fq->q, skb->truesize);
214
215 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
216 fq->q.meat == fq->q.len) {
217 int res;
218 unsigned long orefdst = skb->_skb_refdst;
219
220 skb->_skb_refdst = 0UL;
221 res = lowpan_frag_reasm(fq, prev, dev);
222 skb->_skb_refdst = orefdst;
223 return res;
224 }
225
226 inet_frag_lru_move(&fq->q);
227 return -1;
228 err:
229 kfree_skb(skb);
230 return -1;
231 }
232
233 /* Check if this packet is complete.
234 * Returns NULL on failure by any reason, and pointer
235 * to current nexthdr field in reassembled frame.
236 *
237 * It is called with locked fq, and caller must check that
238 * queue is eligible for reassembly i.e. it is not COMPLETE,
239 * the last and the first frames arrived and all the bits are here.
240 */
241 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
242 struct net_device *dev)
243 {
244 struct sk_buff *fp, *head = fq->q.fragments;
245 int sum_truesize;
246
247 inet_frag_kill(&fq->q, &lowpan_frags);
248
249 /* Make the one we just received the head. */
250 if (prev) {
251 head = prev->next;
252 fp = skb_clone(head, GFP_ATOMIC);
253
254 if (!fp)
255 goto out_oom;
256
257 fp->next = head->next;
258 if (!fp->next)
259 fq->q.fragments_tail = fp;
260 prev->next = fp;
261
262 skb_morph(head, fq->q.fragments);
263 head->next = fq->q.fragments->next;
264
265 consume_skb(fq->q.fragments);
266 fq->q.fragments = head;
267 }
268
269 /* Head of list must not be cloned. */
270 if (skb_unclone(head, GFP_ATOMIC))
271 goto out_oom;
272
273 /* If the first fragment is fragmented itself, we split
274 * it to two chunks: the first with data and paged part
275 * and the second, holding only fragments.
276 */
277 if (skb_has_frag_list(head)) {
278 struct sk_buff *clone;
279 int i, plen = 0;
280
281 clone = alloc_skb(0, GFP_ATOMIC);
282 if (!clone)
283 goto out_oom;
284 clone->next = head->next;
285 head->next = clone;
286 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
287 skb_frag_list_init(head);
288 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
289 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
290 clone->len = head->data_len - plen;
291 clone->data_len = clone->len;
292 head->data_len -= clone->len;
293 head->len -= clone->len;
294 add_frag_mem_limit(&fq->q, clone->truesize);
295 }
296
297 WARN_ON(head == NULL);
298
299 sum_truesize = head->truesize;
300 for (fp = head->next; fp;) {
301 bool headstolen;
302 int delta;
303 struct sk_buff *next = fp->next;
304
305 sum_truesize += fp->truesize;
306 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
307 kfree_skb_partial(fp, headstolen);
308 } else {
309 if (!skb_shinfo(head)->frag_list)
310 skb_shinfo(head)->frag_list = fp;
311 head->data_len += fp->len;
312 head->len += fp->len;
313 head->truesize += fp->truesize;
314 }
315 fp = next;
316 }
317 sub_frag_mem_limit(&fq->q, sum_truesize);
318
319 head->next = NULL;
320 head->dev = dev;
321 head->tstamp = fq->q.stamp;
322
323 fq->q.fragments = NULL;
324 fq->q.fragments_tail = NULL;
325
326 return 1;
327 out_oom:
328 net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
329 return -1;
330 }
331
332 static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
333 struct lowpan_frag_info *frag_info)
334 {
335 bool fail;
336 u8 pattern = 0, low = 0;
337
338 fail = lowpan_fetch_skb(skb, &pattern, 1);
339 fail |= lowpan_fetch_skb(skb, &low, 1);
340 frag_info->d_size = (pattern & 7) << 8 | low;
341 fail |= lowpan_fetch_skb(skb, &frag_info->d_tag, 2);
342
343 if (frag_type == LOWPAN_DISPATCH_FRAGN) {
344 fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
345 } else {
346 skb_reset_network_header(skb);
347 frag_info->d_offset = 0;
348 }
349
350 if (unlikely(fail))
351 return -EIO;
352
353 return 0;
354 }
355
356 int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
357 {
358 struct lowpan_frag_queue *fq;
359 struct net *net = dev_net(skb->dev);
360 struct lowpan_frag_info *frag_info = lowpan_cb(skb);
361 struct ieee802154_addr source, dest;
362 struct netns_ieee802154_lowpan *ieee802154_lowpan =
363 net_ieee802154_lowpan(net);
364 int err;
365
366 source = mac_cb(skb)->source;
367 dest = mac_cb(skb)->dest;
368
369 err = lowpan_get_frag_info(skb, frag_type, frag_info);
370 if (err < 0)
371 goto err;
372
373 if (frag_info->d_size > ieee802154_lowpan->max_dsize)
374 goto err;
375
376 inet_frag_evictor(&ieee802154_lowpan->frags, &lowpan_frags, false);
377
378 fq = fq_find(net, frag_info, &source, &dest);
379 if (fq != NULL) {
380 int ret;
381
382 spin_lock(&fq->q.lock);
383 ret = lowpan_frag_queue(fq, skb, frag_type);
384 spin_unlock(&fq->q.lock);
385
386 inet_frag_put(&fq->q, &lowpan_frags);
387 return ret;
388 }
389
390 err:
391 kfree_skb(skb);
392 return -1;
393 }
394 EXPORT_SYMBOL(lowpan_frag_rcv);
395
396 #ifdef CONFIG_SYSCTL
397 static struct ctl_table lowpan_frags_ns_ctl_table[] = {
398 {
399 .procname = "6lowpanfrag_high_thresh",
400 .data = &init_net.ieee802154_lowpan.frags.high_thresh,
401 .maxlen = sizeof(int),
402 .mode = 0644,
403 .proc_handler = proc_dointvec
404 },
405 {
406 .procname = "6lowpanfrag_low_thresh",
407 .data = &init_net.ieee802154_lowpan.frags.low_thresh,
408 .maxlen = sizeof(int),
409 .mode = 0644,
410 .proc_handler = proc_dointvec
411 },
412 {
413 .procname = "6lowpanfrag_time",
414 .data = &init_net.ieee802154_lowpan.frags.timeout,
415 .maxlen = sizeof(int),
416 .mode = 0644,
417 .proc_handler = proc_dointvec_jiffies,
418 },
419 {
420 .procname = "6lowpanfrag_max_datagram_size",
421 .data = &init_net.ieee802154_lowpan.max_dsize,
422 .maxlen = sizeof(int),
423 .mode = 0644,
424 .proc_handler = proc_dointvec
425 },
426 { }
427 };
428
429 static struct ctl_table lowpan_frags_ctl_table[] = {
430 {
431 .procname = "6lowpanfrag_secret_interval",
432 .data = &lowpan_frags.secret_interval,
433 .maxlen = sizeof(int),
434 .mode = 0644,
435 .proc_handler = proc_dointvec_jiffies,
436 },
437 { }
438 };
439
440 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
441 {
442 struct ctl_table *table;
443 struct ctl_table_header *hdr;
444 struct netns_ieee802154_lowpan *ieee802154_lowpan =
445 net_ieee802154_lowpan(net);
446
447 table = lowpan_frags_ns_ctl_table;
448 if (!net_eq(net, &init_net)) {
449 table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
450 GFP_KERNEL);
451 if (table == NULL)
452 goto err_alloc;
453
454 table[0].data = &ieee802154_lowpan->frags.high_thresh;
455 table[1].data = &ieee802154_lowpan->frags.low_thresh;
456 table[2].data = &ieee802154_lowpan->frags.timeout;
457 table[3].data = &ieee802154_lowpan->max_dsize;
458
459 /* Don't export sysctls to unprivileged users */
460 if (net->user_ns != &init_user_ns)
461 table[0].procname = NULL;
462 }
463
464 hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
465 if (hdr == NULL)
466 goto err_reg;
467
468 ieee802154_lowpan->sysctl.frags_hdr = hdr;
469 return 0;
470
471 err_reg:
472 if (!net_eq(net, &init_net))
473 kfree(table);
474 err_alloc:
475 return -ENOMEM;
476 }
477
478 static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
479 {
480 struct ctl_table *table;
481 struct netns_ieee802154_lowpan *ieee802154_lowpan =
482 net_ieee802154_lowpan(net);
483
484 table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
485 unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
486 if (!net_eq(net, &init_net))
487 kfree(table);
488 }
489
490 static struct ctl_table_header *lowpan_ctl_header;
491
492 static int lowpan_frags_sysctl_register(void)
493 {
494 lowpan_ctl_header = register_net_sysctl(&init_net,
495 "net/ieee802154/6lowpan",
496 lowpan_frags_ctl_table);
497 return lowpan_ctl_header == NULL ? -ENOMEM : 0;
498 }
499
500 static void lowpan_frags_sysctl_unregister(void)
501 {
502 unregister_net_sysctl_table(lowpan_ctl_header);
503 }
504 #else
505 static inline int lowpan_frags_ns_sysctl_register(struct net *net)
506 {
507 return 0;
508 }
509
510 static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
511 {
512 }
513
514 static inline int lowpan_frags_sysctl_register(void)
515 {
516 return 0;
517 }
518
519 static inline void lowpan_frags_sysctl_unregister(void)
520 {
521 }
522 #endif
523
524 static int __net_init lowpan_frags_init_net(struct net *net)
525 {
526 struct netns_ieee802154_lowpan *ieee802154_lowpan =
527 net_ieee802154_lowpan(net);
528
529 ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
530 ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
531 ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
532 ieee802154_lowpan->max_dsize = 0xFFFF;
533
534 inet_frags_init_net(&ieee802154_lowpan->frags);
535
536 return lowpan_frags_ns_sysctl_register(net);
537 }
538
539 static void __net_exit lowpan_frags_exit_net(struct net *net)
540 {
541 struct netns_ieee802154_lowpan *ieee802154_lowpan =
542 net_ieee802154_lowpan(net);
543
544 lowpan_frags_ns_sysctl_unregister(net);
545 inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
546 }
547
548 static struct pernet_operations lowpan_frags_ops = {
549 .init = lowpan_frags_init_net,
550 .exit = lowpan_frags_exit_net,
551 };
552
553 int __init lowpan_net_frag_init(void)
554 {
555 int ret;
556
557 ret = lowpan_frags_sysctl_register();
558 if (ret)
559 return ret;
560
561 ret = register_pernet_subsys(&lowpan_frags_ops);
562 if (ret)
563 goto err_pernet;
564
565 lowpan_frags.hashfn = lowpan_hashfn;
566 lowpan_frags.constructor = lowpan_frag_init;
567 lowpan_frags.destructor = NULL;
568 lowpan_frags.skb_free = NULL;
569 lowpan_frags.qsize = sizeof(struct frag_queue);
570 lowpan_frags.match = lowpan_frag_match;
571 lowpan_frags.frag_expire = lowpan_frag_expire;
572 lowpan_frags.secret_interval = 10 * 60 * HZ;
573 inet_frags_init(&lowpan_frags);
574
575 return ret;
576 err_pernet:
577 lowpan_frags_sysctl_unregister();
578 return ret;
579 }
580
581 void lowpan_net_frag_exit(void)
582 {
583 inet_frags_fini(&lowpan_frags);
584 lowpan_frags_sysctl_unregister();
585 unregister_pernet_subsys(&lowpan_frags_ops);
586 }