]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ieee802154/6lowpan/reassembly.c
ec7a5da561290ac92c234338d306fb300e46e62c
[mirror_ubuntu-bionic-kernel.git] / net / ieee802154 / 6lowpan / reassembly.c
1 /* 6LoWPAN fragment reassembly
2 *
3 *
4 * Authors:
5 * Alexander Aring <aar@pengutronix.de>
6 *
7 * Based on: net/ipv6/reassembly.c
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #define pr_fmt(fmt) "6LoWPAN: " fmt
16
17 #include <linux/net.h>
18 #include <linux/list.h>
19 #include <linux/netdevice.h>
20 #include <linux/random.h>
21 #include <linux/jhash.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
25
26 #include <net/ieee802154_netdev.h>
27 #include <net/6lowpan.h>
28 #include <net/ipv6_frag.h>
29 #include <net/inet_frag.h>
30
31 #include "6lowpan_i.h"
32
33 static const char lowpan_frags_cache_name[] = "lowpan-frags";
34
35 static struct inet_frags lowpan_frags;
36
37 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
38 struct sk_buff *prev, struct net_device *ldev);
39
40 static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
41 {
42 const struct frag_lowpan_compare_key *key = a;
43 struct lowpan_frag_queue *fq;
44
45 fq = container_of(q, struct lowpan_frag_queue, q);
46
47 BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
48 memcpy(&q->key, key, sizeof(*key));
49 }
50
51 static void lowpan_frag_expire(struct timer_list *t)
52 {
53 struct inet_frag_queue *frag = from_timer(frag, t, timer);
54 struct frag_queue *fq;
55 struct net *net;
56
57 fq = container_of(frag, struct frag_queue, q);
58 net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
59
60 spin_lock(&fq->q.lock);
61
62 if (fq->q.flags & INET_FRAG_COMPLETE)
63 goto out;
64
65 inet_frag_kill(&fq->q);
66 out:
67 spin_unlock(&fq->q.lock);
68 inet_frag_put(&fq->q);
69 }
70
71 static inline struct lowpan_frag_queue *
72 fq_find(struct net *net, const struct lowpan_802154_cb *cb,
73 const struct ieee802154_addr *src,
74 const struct ieee802154_addr *dst)
75 {
76 struct netns_ieee802154_lowpan *ieee802154_lowpan =
77 net_ieee802154_lowpan(net);
78 struct frag_lowpan_compare_key key = {};
79 struct inet_frag_queue *q;
80
81 key.tag = cb->d_tag;
82 key.d_size = cb->d_size;
83 key.src = *src;
84 key.dst = *dst;
85
86 q = inet_frag_find(&ieee802154_lowpan->frags, &key);
87 if (!q)
88 return NULL;
89
90 return container_of(q, struct lowpan_frag_queue, q);
91 }
92
93 static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
94 struct sk_buff *skb, u8 frag_type)
95 {
96 struct sk_buff *prev, *next;
97 struct net_device *ldev;
98 int end, offset;
99
100 if (fq->q.flags & INET_FRAG_COMPLETE)
101 goto err;
102
103 offset = lowpan_802154_cb(skb)->d_offset << 3;
104 end = lowpan_802154_cb(skb)->d_size;
105
106 /* Is this the final fragment? */
107 if (offset + skb->len == end) {
108 /* If we already have some bits beyond end
109 * or have different end, the segment is corrupted.
110 */
111 if (end < fq->q.len ||
112 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
113 goto err;
114 fq->q.flags |= INET_FRAG_LAST_IN;
115 fq->q.len = end;
116 } else {
117 if (end > fq->q.len) {
118 /* Some bits beyond end -> corruption. */
119 if (fq->q.flags & INET_FRAG_LAST_IN)
120 goto err;
121 fq->q.len = end;
122 }
123 }
124
125 /* Find out which fragments are in front and at the back of us
126 * in the chain of fragments so far. We must know where to put
127 * this fragment, right?
128 */
129 prev = fq->q.fragments_tail;
130 if (!prev ||
131 lowpan_802154_cb(prev)->d_offset <
132 lowpan_802154_cb(skb)->d_offset) {
133 next = NULL;
134 goto found;
135 }
136 prev = NULL;
137 for (next = fq->q.fragments; next != NULL; next = next->next) {
138 if (lowpan_802154_cb(next)->d_offset >=
139 lowpan_802154_cb(skb)->d_offset)
140 break; /* bingo! */
141 prev = next;
142 }
143
144 found:
145 /* Insert this fragment in the chain of fragments. */
146 skb->next = next;
147 if (!next)
148 fq->q.fragments_tail = skb;
149 if (prev)
150 prev->next = skb;
151 else
152 fq->q.fragments = skb;
153
154 ldev = skb->dev;
155 if (ldev)
156 skb->dev = NULL;
157
158 fq->q.stamp = skb->tstamp;
159 if (frag_type == LOWPAN_DISPATCH_FRAG1)
160 fq->q.flags |= INET_FRAG_FIRST_IN;
161
162 fq->q.meat += skb->len;
163 add_frag_mem_limit(fq->q.net, skb->truesize);
164
165 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
166 fq->q.meat == fq->q.len) {
167 int res;
168 unsigned long orefdst = skb->_skb_refdst;
169
170 skb->_skb_refdst = 0UL;
171 res = lowpan_frag_reasm(fq, prev, ldev);
172 skb->_skb_refdst = orefdst;
173 return res;
174 }
175
176 return -1;
177 err:
178 kfree_skb(skb);
179 return -1;
180 }
181
182 /* Check if this packet is complete.
183 * Returns NULL on failure by any reason, and pointer
184 * to current nexthdr field in reassembled frame.
185 *
186 * It is called with locked fq, and caller must check that
187 * queue is eligible for reassembly i.e. it is not COMPLETE,
188 * the last and the first frames arrived and all the bits are here.
189 */
190 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
191 struct net_device *ldev)
192 {
193 struct sk_buff *fp, *head = fq->q.fragments;
194 int sum_truesize;
195
196 inet_frag_kill(&fq->q);
197
198 /* Make the one we just received the head. */
199 if (prev) {
200 head = prev->next;
201 fp = skb_clone(head, GFP_ATOMIC);
202
203 if (!fp)
204 goto out_oom;
205
206 fp->next = head->next;
207 if (!fp->next)
208 fq->q.fragments_tail = fp;
209 prev->next = fp;
210
211 skb_morph(head, fq->q.fragments);
212 head->next = fq->q.fragments->next;
213
214 consume_skb(fq->q.fragments);
215 fq->q.fragments = head;
216 }
217
218 /* Head of list must not be cloned. */
219 if (skb_unclone(head, GFP_ATOMIC))
220 goto out_oom;
221
222 /* If the first fragment is fragmented itself, we split
223 * it to two chunks: the first with data and paged part
224 * and the second, holding only fragments.
225 */
226 if (skb_has_frag_list(head)) {
227 struct sk_buff *clone;
228 int i, plen = 0;
229
230 clone = alloc_skb(0, GFP_ATOMIC);
231 if (!clone)
232 goto out_oom;
233 clone->next = head->next;
234 head->next = clone;
235 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
236 skb_frag_list_init(head);
237 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
238 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
239 clone->len = head->data_len - plen;
240 clone->data_len = clone->len;
241 head->data_len -= clone->len;
242 head->len -= clone->len;
243 add_frag_mem_limit(fq->q.net, clone->truesize);
244 }
245
246 WARN_ON(head == NULL);
247
248 sum_truesize = head->truesize;
249 for (fp = head->next; fp;) {
250 bool headstolen;
251 int delta;
252 struct sk_buff *next = fp->next;
253
254 sum_truesize += fp->truesize;
255 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
256 kfree_skb_partial(fp, headstolen);
257 } else {
258 if (!skb_shinfo(head)->frag_list)
259 skb_shinfo(head)->frag_list = fp;
260 head->data_len += fp->len;
261 head->len += fp->len;
262 head->truesize += fp->truesize;
263 }
264 fp = next;
265 }
266 sub_frag_mem_limit(fq->q.net, sum_truesize);
267
268 head->next = NULL;
269 head->dev = ldev;
270 head->tstamp = fq->q.stamp;
271
272 fq->q.fragments = NULL;
273 fq->q.fragments_tail = NULL;
274
275 return 1;
276 out_oom:
277 net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
278 return -1;
279 }
280
281 static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
282 lowpan_rx_result res)
283 {
284 switch (res) {
285 case RX_QUEUED:
286 return NET_RX_SUCCESS;
287 case RX_CONTINUE:
288 /* nobody cared about this packet */
289 net_warn_ratelimited("%s: received unknown dispatch\n",
290 __func__);
291
292 /* fall-through */
293 default:
294 /* all others failure */
295 return NET_RX_DROP;
296 }
297 }
298
299 static lowpan_rx_result lowpan_frag_rx_h_iphc(struct sk_buff *skb)
300 {
301 int ret;
302
303 if (!lowpan_is_iphc(*skb_network_header(skb)))
304 return RX_CONTINUE;
305
306 ret = lowpan_iphc_decompress(skb);
307 if (ret < 0)
308 return RX_DROP;
309
310 return RX_QUEUED;
311 }
312
313 static int lowpan_invoke_frag_rx_handlers(struct sk_buff *skb)
314 {
315 lowpan_rx_result res;
316
317 #define CALL_RXH(rxh) \
318 do { \
319 res = rxh(skb); \
320 if (res != RX_CONTINUE) \
321 goto rxh_next; \
322 } while (0)
323
324 /* likely at first */
325 CALL_RXH(lowpan_frag_rx_h_iphc);
326 CALL_RXH(lowpan_rx_h_ipv6);
327
328 rxh_next:
329 return lowpan_frag_rx_handlers_result(skb, res);
330 #undef CALL_RXH
331 }
332
333 #define LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK 0x07
334 #define LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT 8
335
336 static int lowpan_get_cb(struct sk_buff *skb, u8 frag_type,
337 struct lowpan_802154_cb *cb)
338 {
339 bool fail;
340 u8 high = 0, low = 0;
341 __be16 d_tag = 0;
342
343 fail = lowpan_fetch_skb(skb, &high, 1);
344 fail |= lowpan_fetch_skb(skb, &low, 1);
345 /* remove the dispatch value and use first three bits as high value
346 * for the datagram size
347 */
348 cb->d_size = (high & LOWPAN_FRAG_DGRAM_SIZE_HIGH_MASK) <<
349 LOWPAN_FRAG_DGRAM_SIZE_HIGH_SHIFT | low;
350 fail |= lowpan_fetch_skb(skb, &d_tag, 2);
351 cb->d_tag = ntohs(d_tag);
352
353 if (frag_type == LOWPAN_DISPATCH_FRAGN) {
354 fail |= lowpan_fetch_skb(skb, &cb->d_offset, 1);
355 } else {
356 skb_reset_network_header(skb);
357 cb->d_offset = 0;
358 /* check if datagram_size has ipv6hdr on FRAG1 */
359 fail |= cb->d_size < sizeof(struct ipv6hdr);
360 /* check if we can dereference the dispatch value */
361 fail |= !skb->len;
362 }
363
364 if (unlikely(fail))
365 return -EIO;
366
367 return 0;
368 }
369
370 int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
371 {
372 struct lowpan_frag_queue *fq;
373 struct net *net = dev_net(skb->dev);
374 struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
375 struct ieee802154_hdr hdr = {};
376 int err;
377
378 if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
379 goto err;
380
381 err = lowpan_get_cb(skb, frag_type, cb);
382 if (err < 0)
383 goto err;
384
385 if (frag_type == LOWPAN_DISPATCH_FRAG1) {
386 err = lowpan_invoke_frag_rx_handlers(skb);
387 if (err == NET_RX_DROP)
388 goto err;
389 }
390
391 if (cb->d_size > IPV6_MIN_MTU) {
392 net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
393 goto err;
394 }
395
396 fq = fq_find(net, cb, &hdr.source, &hdr.dest);
397 if (fq != NULL) {
398 int ret;
399
400 spin_lock(&fq->q.lock);
401 ret = lowpan_frag_queue(fq, skb, frag_type);
402 spin_unlock(&fq->q.lock);
403
404 inet_frag_put(&fq->q);
405 return ret;
406 }
407
408 err:
409 kfree_skb(skb);
410 return -1;
411 }
412
413 #ifdef CONFIG_SYSCTL
414
415 static struct ctl_table lowpan_frags_ns_ctl_table[] = {
416 {
417 .procname = "6lowpanfrag_high_thresh",
418 .data = &init_net.ieee802154_lowpan.frags.high_thresh,
419 .maxlen = sizeof(unsigned long),
420 .mode = 0644,
421 .proc_handler = proc_doulongvec_minmax,
422 .extra1 = &init_net.ieee802154_lowpan.frags.low_thresh
423 },
424 {
425 .procname = "6lowpanfrag_low_thresh",
426 .data = &init_net.ieee802154_lowpan.frags.low_thresh,
427 .maxlen = sizeof(unsigned long),
428 .mode = 0644,
429 .proc_handler = proc_doulongvec_minmax,
430 .extra2 = &init_net.ieee802154_lowpan.frags.high_thresh
431 },
432 {
433 .procname = "6lowpanfrag_time",
434 .data = &init_net.ieee802154_lowpan.frags.timeout,
435 .maxlen = sizeof(int),
436 .mode = 0644,
437 .proc_handler = proc_dointvec_jiffies,
438 },
439 { }
440 };
441
442 /* secret interval has been deprecated */
443 static int lowpan_frags_secret_interval_unused;
444 static struct ctl_table lowpan_frags_ctl_table[] = {
445 {
446 .procname = "6lowpanfrag_secret_interval",
447 .data = &lowpan_frags_secret_interval_unused,
448 .maxlen = sizeof(int),
449 .mode = 0644,
450 .proc_handler = proc_dointvec_jiffies,
451 },
452 { }
453 };
454
455 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
456 {
457 struct ctl_table *table;
458 struct ctl_table_header *hdr;
459 struct netns_ieee802154_lowpan *ieee802154_lowpan =
460 net_ieee802154_lowpan(net);
461
462 table = lowpan_frags_ns_ctl_table;
463 if (!net_eq(net, &init_net)) {
464 table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
465 GFP_KERNEL);
466 if (table == NULL)
467 goto err_alloc;
468
469 table[0].data = &ieee802154_lowpan->frags.high_thresh;
470 table[0].extra1 = &ieee802154_lowpan->frags.low_thresh;
471 table[0].extra2 = &init_net.ieee802154_lowpan.frags.high_thresh;
472 table[1].data = &ieee802154_lowpan->frags.low_thresh;
473 table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
474 table[2].data = &ieee802154_lowpan->frags.timeout;
475
476 /* Don't export sysctls to unprivileged users */
477 if (net->user_ns != &init_user_ns)
478 table[0].procname = NULL;
479 }
480
481 hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
482 if (hdr == NULL)
483 goto err_reg;
484
485 ieee802154_lowpan->sysctl.frags_hdr = hdr;
486 return 0;
487
488 err_reg:
489 if (!net_eq(net, &init_net))
490 kfree(table);
491 err_alloc:
492 return -ENOMEM;
493 }
494
495 static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
496 {
497 struct ctl_table *table;
498 struct netns_ieee802154_lowpan *ieee802154_lowpan =
499 net_ieee802154_lowpan(net);
500
501 table = ieee802154_lowpan->sysctl.frags_hdr->ctl_table_arg;
502 unregister_net_sysctl_table(ieee802154_lowpan->sysctl.frags_hdr);
503 if (!net_eq(net, &init_net))
504 kfree(table);
505 }
506
507 static struct ctl_table_header *lowpan_ctl_header;
508
509 static int __init lowpan_frags_sysctl_register(void)
510 {
511 lowpan_ctl_header = register_net_sysctl(&init_net,
512 "net/ieee802154/6lowpan",
513 lowpan_frags_ctl_table);
514 return lowpan_ctl_header == NULL ? -ENOMEM : 0;
515 }
516
517 static void lowpan_frags_sysctl_unregister(void)
518 {
519 unregister_net_sysctl_table(lowpan_ctl_header);
520 }
521 #else
522 static inline int lowpan_frags_ns_sysctl_register(struct net *net)
523 {
524 return 0;
525 }
526
527 static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
528 {
529 }
530
531 static inline int __init lowpan_frags_sysctl_register(void)
532 {
533 return 0;
534 }
535
536 static inline void lowpan_frags_sysctl_unregister(void)
537 {
538 }
539 #endif
540
541 static int __net_init lowpan_frags_init_net(struct net *net)
542 {
543 struct netns_ieee802154_lowpan *ieee802154_lowpan =
544 net_ieee802154_lowpan(net);
545 int res;
546
547 ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
548 ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
549 ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
550 ieee802154_lowpan->frags.f = &lowpan_frags;
551
552 res = inet_frags_init_net(&ieee802154_lowpan->frags);
553 if (res < 0)
554 return res;
555 res = lowpan_frags_ns_sysctl_register(net);
556 if (res < 0)
557 inet_frags_exit_net(&ieee802154_lowpan->frags);
558 return res;
559 }
560
561 static void __net_exit lowpan_frags_exit_net(struct net *net)
562 {
563 struct netns_ieee802154_lowpan *ieee802154_lowpan =
564 net_ieee802154_lowpan(net);
565
566 lowpan_frags_ns_sysctl_unregister(net);
567 inet_frags_exit_net(&ieee802154_lowpan->frags);
568 }
569
570 static struct pernet_operations lowpan_frags_ops = {
571 .init = lowpan_frags_init_net,
572 .exit = lowpan_frags_exit_net,
573 };
574
575 static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
576 {
577 return jhash2(data,
578 sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
579 }
580
581 static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed)
582 {
583 const struct inet_frag_queue *fq = data;
584
585 return jhash2((const u32 *)&fq->key,
586 sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
587 }
588
589 static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
590 {
591 const struct frag_lowpan_compare_key *key = arg->key;
592 const struct inet_frag_queue *fq = ptr;
593
594 return !!memcmp(&fq->key, key, sizeof(*key));
595 }
596
597 static const struct rhashtable_params lowpan_rhash_params = {
598 .head_offset = offsetof(struct inet_frag_queue, node),
599 .hashfn = lowpan_key_hashfn,
600 .obj_hashfn = lowpan_obj_hashfn,
601 .obj_cmpfn = lowpan_obj_cmpfn,
602 .automatic_shrinking = true,
603 };
604
605 int __init lowpan_net_frag_init(void)
606 {
607 int ret;
608
609 lowpan_frags.constructor = lowpan_frag_init;
610 lowpan_frags.destructor = NULL;
611 lowpan_frags.qsize = sizeof(struct frag_queue);
612 lowpan_frags.frag_expire = lowpan_frag_expire;
613 lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
614 lowpan_frags.rhash_params = lowpan_rhash_params;
615 ret = inet_frags_init(&lowpan_frags);
616 if (ret)
617 goto out;
618
619 ret = lowpan_frags_sysctl_register();
620 if (ret)
621 goto err_sysctl;
622
623 ret = register_pernet_subsys(&lowpan_frags_ops);
624 if (ret)
625 goto err_pernet;
626 out:
627 return ret;
628 err_pernet:
629 lowpan_frags_sysctl_unregister();
630 err_sysctl:
631 inet_frags_fini(&lowpan_frags);
632 return ret;
633 }
634
635 void lowpan_net_frag_exit(void)
636 {
637 inet_frags_fini(&lowpan_frags);
638 lowpan_frags_sysctl_unregister();
639 unregister_pernet_subsys(&lowpan_frags_ops);
640 }