]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ieee802154/reassembly.c
Merge branch 'for-john' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi...
[mirror_ubuntu-bionic-kernel.git] / net / ieee802154 / reassembly.c
1 /* 6LoWPAN fragment reassembly
2 *
3 *
4 * Authors:
5 * Alexander Aring <aar@pengutronix.de>
6 *
7 * Based on: net/ipv6/reassembly.c
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 #define pr_fmt(fmt) "6LoWPAN: " fmt
16
17 #include <linux/net.h>
18 #include <linux/list.h>
19 #include <linux/netdevice.h>
20 #include <linux/random.h>
21 #include <linux/jhash.h>
22 #include <linux/skbuff.h>
23 #include <linux/slab.h>
24 #include <linux/export.h>
25
26 #include <net/ieee802154_netdev.h>
27 #include <net/6lowpan.h>
28 #include <net/ipv6.h>
29 #include <net/inet_frag.h>
30
31 #include "reassembly.h"
32
33 struct lowpan_frag_info {
34 __be16 d_tag;
35 u16 d_size;
36 u8 d_offset;
37 };
38
39 struct lowpan_frag_info *lowpan_cb(struct sk_buff *skb)
40 {
41 return (struct lowpan_frag_info *)skb->cb;
42 }
43
44 static struct inet_frags lowpan_frags;
45
46 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
47 struct sk_buff *prev, struct net_device *dev);
48
49 static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size,
50 const struct ieee802154_addr *saddr,
51 const struct ieee802154_addr *daddr)
52 {
53 u32 c;
54
55 net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
56 c = jhash_3words(ieee802154_addr_hash(saddr),
57 ieee802154_addr_hash(daddr),
58 (__force u32)(tag + (d_size << 16)),
59 lowpan_frags.rnd);
60
61 return c & (INETFRAGS_HASHSZ - 1);
62 }
63
64 static unsigned int lowpan_hashfn(struct inet_frag_queue *q)
65 {
66 struct lowpan_frag_queue *fq;
67
68 fq = container_of(q, struct lowpan_frag_queue, q);
69 return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
70 }
71
72 static bool lowpan_frag_match(struct inet_frag_queue *q, void *a)
73 {
74 struct lowpan_frag_queue *fq;
75 struct lowpan_create_arg *arg = a;
76
77 fq = container_of(q, struct lowpan_frag_queue, q);
78 return fq->tag == arg->tag && fq->d_size == arg->d_size &&
79 ieee802154_addr_equal(&fq->saddr, arg->src) &&
80 ieee802154_addr_equal(&fq->daddr, arg->dst);
81 }
82
83 static void lowpan_frag_init(struct inet_frag_queue *q, void *a)
84 {
85 struct lowpan_frag_queue *fq;
86 struct lowpan_create_arg *arg = a;
87
88 fq = container_of(q, struct lowpan_frag_queue, q);
89
90 fq->tag = arg->tag;
91 fq->d_size = arg->d_size;
92 fq->saddr = *arg->src;
93 fq->daddr = *arg->dst;
94 }
95
96 static void lowpan_frag_expire(unsigned long data)
97 {
98 struct frag_queue *fq;
99 struct net *net;
100
101 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
102 net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
103
104 spin_lock(&fq->q.lock);
105
106 if (fq->q.last_in & INET_FRAG_COMPLETE)
107 goto out;
108
109 inet_frag_kill(&fq->q, &lowpan_frags);
110 out:
111 spin_unlock(&fq->q.lock);
112 inet_frag_put(&fq->q, &lowpan_frags);
113 }
114
115 static inline struct lowpan_frag_queue *
116 fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
117 const struct ieee802154_addr *src,
118 const struct ieee802154_addr *dst)
119 {
120 struct inet_frag_queue *q;
121 struct lowpan_create_arg arg;
122 unsigned int hash;
123
124 arg.tag = frag_info->d_tag;
125 arg.d_size = frag_info->d_size;
126 arg.src = src;
127 arg.dst = dst;
128
129 read_lock(&lowpan_frags.lock);
130 hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
131
132 q = inet_frag_find(&net->ieee802154_lowpan.frags,
133 &lowpan_frags, &arg, hash);
134 if (IS_ERR_OR_NULL(q)) {
135 inet_frag_maybe_warn_overflow(q, pr_fmt());
136 return NULL;
137 }
138 return container_of(q, struct lowpan_frag_queue, q);
139 }
140
141 static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
142 struct sk_buff *skb, const u8 frag_type)
143 {
144 struct sk_buff *prev, *next;
145 struct net_device *dev;
146 int end, offset;
147
148 if (fq->q.last_in & INET_FRAG_COMPLETE)
149 goto err;
150
151 offset = lowpan_cb(skb)->d_offset << 3;
152 end = lowpan_cb(skb)->d_size;
153
154 /* Is this the final fragment? */
155 if (offset + skb->len == end) {
156 /* If we already have some bits beyond end
157 * or have different end, the segment is corrupted.
158 */
159 if (end < fq->q.len ||
160 ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
161 goto err;
162 fq->q.last_in |= INET_FRAG_LAST_IN;
163 fq->q.len = end;
164 } else {
165 if (end > fq->q.len) {
166 /* Some bits beyond end -> corruption. */
167 if (fq->q.last_in & INET_FRAG_LAST_IN)
168 goto err;
169 fq->q.len = end;
170 }
171 }
172
173 /* Find out which fragments are in front and at the back of us
174 * in the chain of fragments so far. We must know where to put
175 * this fragment, right?
176 */
177 prev = fq->q.fragments_tail;
178 if (!prev || lowpan_cb(prev)->d_offset < lowpan_cb(skb)->d_offset) {
179 next = NULL;
180 goto found;
181 }
182 prev = NULL;
183 for (next = fq->q.fragments; next != NULL; next = next->next) {
184 if (lowpan_cb(next)->d_offset >= lowpan_cb(skb)->d_offset)
185 break; /* bingo! */
186 prev = next;
187 }
188
189 found:
190 /* Insert this fragment in the chain of fragments. */
191 skb->next = next;
192 if (!next)
193 fq->q.fragments_tail = skb;
194 if (prev)
195 prev->next = skb;
196 else
197 fq->q.fragments = skb;
198
199 dev = skb->dev;
200 if (dev)
201 skb->dev = NULL;
202
203 fq->q.stamp = skb->tstamp;
204 if (frag_type == LOWPAN_DISPATCH_FRAG1) {
205 /* Calculate uncomp. 6lowpan header to estimate full size */
206 fq->q.meat += lowpan_uncompress_size(skb, NULL);
207 fq->q.last_in |= INET_FRAG_FIRST_IN;
208 } else {
209 fq->q.meat += skb->len;
210 }
211 add_frag_mem_limit(&fq->q, skb->truesize);
212
213 if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
214 fq->q.meat == fq->q.len) {
215 int res;
216 unsigned long orefdst = skb->_skb_refdst;
217
218 skb->_skb_refdst = 0UL;
219 res = lowpan_frag_reasm(fq, prev, dev);
220 skb->_skb_refdst = orefdst;
221 return res;
222 }
223
224 inet_frag_lru_move(&fq->q);
225 return -1;
226 err:
227 kfree_skb(skb);
228 return -1;
229 }
230
231 /* Check if this packet is complete.
232 * Returns NULL on failure by any reason, and pointer
233 * to current nexthdr field in reassembled frame.
234 *
235 * It is called with locked fq, and caller must check that
236 * queue is eligible for reassembly i.e. it is not COMPLETE,
237 * the last and the first frames arrived and all the bits are here.
238 */
239 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
240 struct net_device *dev)
241 {
242 struct sk_buff *fp, *head = fq->q.fragments;
243 int sum_truesize;
244
245 inet_frag_kill(&fq->q, &lowpan_frags);
246
247 /* Make the one we just received the head. */
248 if (prev) {
249 head = prev->next;
250 fp = skb_clone(head, GFP_ATOMIC);
251
252 if (!fp)
253 goto out_oom;
254
255 fp->next = head->next;
256 if (!fp->next)
257 fq->q.fragments_tail = fp;
258 prev->next = fp;
259
260 skb_morph(head, fq->q.fragments);
261 head->next = fq->q.fragments->next;
262
263 consume_skb(fq->q.fragments);
264 fq->q.fragments = head;
265 }
266
267 /* Head of list must not be cloned. */
268 if (skb_unclone(head, GFP_ATOMIC))
269 goto out_oom;
270
271 /* If the first fragment is fragmented itself, we split
272 * it to two chunks: the first with data and paged part
273 * and the second, holding only fragments.
274 */
275 if (skb_has_frag_list(head)) {
276 struct sk_buff *clone;
277 int i, plen = 0;
278
279 clone = alloc_skb(0, GFP_ATOMIC);
280 if (!clone)
281 goto out_oom;
282 clone->next = head->next;
283 head->next = clone;
284 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
285 skb_frag_list_init(head);
286 for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
287 plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
288 clone->len = head->data_len - plen;
289 clone->data_len = clone->len;
290 head->data_len -= clone->len;
291 head->len -= clone->len;
292 add_frag_mem_limit(&fq->q, clone->truesize);
293 }
294
295 WARN_ON(head == NULL);
296
297 sum_truesize = head->truesize;
298 for (fp = head->next; fp;) {
299 bool headstolen;
300 int delta;
301 struct sk_buff *next = fp->next;
302
303 sum_truesize += fp->truesize;
304 if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
305 kfree_skb_partial(fp, headstolen);
306 } else {
307 if (!skb_shinfo(head)->frag_list)
308 skb_shinfo(head)->frag_list = fp;
309 head->data_len += fp->len;
310 head->len += fp->len;
311 head->truesize += fp->truesize;
312 }
313 fp = next;
314 }
315 sub_frag_mem_limit(&fq->q, sum_truesize);
316
317 head->next = NULL;
318 head->dev = dev;
319 head->tstamp = fq->q.stamp;
320
321 fq->q.fragments = NULL;
322 fq->q.fragments_tail = NULL;
323
324 return 1;
325 out_oom:
326 net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
327 return -1;
328 }
329
330 static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
331 struct lowpan_frag_info *frag_info)
332 {
333 bool fail;
334 u8 pattern = 0, low = 0;
335
336 fail = lowpan_fetch_skb(skb, &pattern, 1);
337 fail |= lowpan_fetch_skb(skb, &low, 1);
338 frag_info->d_size = (pattern & 7) << 8 | low;
339 fail |= lowpan_fetch_skb(skb, &frag_info->d_tag, 2);
340
341 if (frag_type == LOWPAN_DISPATCH_FRAGN) {
342 fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
343 } else {
344 skb_reset_network_header(skb);
345 frag_info->d_offset = 0;
346 }
347
348 if (unlikely(fail))
349 return -EIO;
350
351 return 0;
352 }
353
354 int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
355 {
356 struct lowpan_frag_queue *fq;
357 struct net *net = dev_net(skb->dev);
358 struct lowpan_frag_info *frag_info = lowpan_cb(skb);
359 struct ieee802154_addr source, dest;
360 int err;
361
362 source = mac_cb(skb)->source;
363 dest = mac_cb(skb)->dest;
364
365 err = lowpan_get_frag_info(skb, frag_type, frag_info);
366 if (err < 0)
367 goto err;
368
369 if (frag_info->d_size > net->ieee802154_lowpan.max_dsize)
370 goto err;
371
372 inet_frag_evictor(&net->ieee802154_lowpan.frags, &lowpan_frags, false);
373
374 fq = fq_find(net, frag_info, &source, &dest);
375 if (fq != NULL) {
376 int ret;
377 spin_lock(&fq->q.lock);
378 ret = lowpan_frag_queue(fq, skb, frag_type);
379 spin_unlock(&fq->q.lock);
380
381 inet_frag_put(&fq->q, &lowpan_frags);
382 return ret;
383 }
384
385 err:
386 kfree_skb(skb);
387 return -1;
388 }
389 EXPORT_SYMBOL(lowpan_frag_rcv);
390
391 #ifdef CONFIG_SYSCTL
392 static struct ctl_table lowpan_frags_ns_ctl_table[] = {
393 {
394 .procname = "6lowpanfrag_high_thresh",
395 .data = &init_net.ieee802154_lowpan.frags.high_thresh,
396 .maxlen = sizeof(int),
397 .mode = 0644,
398 .proc_handler = proc_dointvec
399 },
400 {
401 .procname = "6lowpanfrag_low_thresh",
402 .data = &init_net.ieee802154_lowpan.frags.low_thresh,
403 .maxlen = sizeof(int),
404 .mode = 0644,
405 .proc_handler = proc_dointvec
406 },
407 {
408 .procname = "6lowpanfrag_time",
409 .data = &init_net.ieee802154_lowpan.frags.timeout,
410 .maxlen = sizeof(int),
411 .mode = 0644,
412 .proc_handler = proc_dointvec_jiffies,
413 },
414 {
415 .procname = "6lowpanfrag_max_datagram_size",
416 .data = &init_net.ieee802154_lowpan.max_dsize,
417 .maxlen = sizeof(int),
418 .mode = 0644,
419 .proc_handler = proc_dointvec
420 },
421 { }
422 };
423
424 static struct ctl_table lowpan_frags_ctl_table[] = {
425 {
426 .procname = "6lowpanfrag_secret_interval",
427 .data = &lowpan_frags.secret_interval,
428 .maxlen = sizeof(int),
429 .mode = 0644,
430 .proc_handler = proc_dointvec_jiffies,
431 },
432 { }
433 };
434
435 static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
436 {
437 struct ctl_table *table;
438 struct ctl_table_header *hdr;
439
440 table = lowpan_frags_ns_ctl_table;
441 if (!net_eq(net, &init_net)) {
442 table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
443 GFP_KERNEL);
444 if (table == NULL)
445 goto err_alloc;
446
447 table[0].data = &net->ieee802154_lowpan.frags.high_thresh;
448 table[1].data = &net->ieee802154_lowpan.frags.low_thresh;
449 table[2].data = &net->ieee802154_lowpan.frags.timeout;
450 table[3].data = &net->ieee802154_lowpan.max_dsize;
451
452 /* Don't export sysctls to unprivileged users */
453 if (net->user_ns != &init_user_ns)
454 table[0].procname = NULL;
455 }
456
457 hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
458 if (hdr == NULL)
459 goto err_reg;
460
461 net->ieee802154_lowpan.sysctl.frags_hdr = hdr;
462 return 0;
463
464 err_reg:
465 if (!net_eq(net, &init_net))
466 kfree(table);
467 err_alloc:
468 return -ENOMEM;
469 }
470
471 static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
472 {
473 struct ctl_table *table;
474
475 table = net->ieee802154_lowpan.sysctl.frags_hdr->ctl_table_arg;
476 unregister_net_sysctl_table(net->ieee802154_lowpan.sysctl.frags_hdr);
477 if (!net_eq(net, &init_net))
478 kfree(table);
479 }
480
481 static struct ctl_table_header *lowpan_ctl_header;
482
483 static int lowpan_frags_sysctl_register(void)
484 {
485 lowpan_ctl_header = register_net_sysctl(&init_net,
486 "net/ieee802154/6lowpan",
487 lowpan_frags_ctl_table);
488 return lowpan_ctl_header == NULL ? -ENOMEM : 0;
489 }
490
491 static void lowpan_frags_sysctl_unregister(void)
492 {
493 unregister_net_sysctl_table(lowpan_ctl_header);
494 }
495 #else
496 static inline int lowpan_frags_ns_sysctl_register(struct net *net)
497 {
498 return 0;
499 }
500
501 static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
502 {
503 }
504
505 static inline int lowpan_frags_sysctl_register(void)
506 {
507 return 0;
508 }
509
510 static inline void lowpan_frags_sysctl_unregister(void)
511 {
512 }
513 #endif
514
515 static int __net_init lowpan_frags_init_net(struct net *net)
516 {
517 net->ieee802154_lowpan.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
518 net->ieee802154_lowpan.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
519 net->ieee802154_lowpan.frags.timeout = IPV6_FRAG_TIMEOUT;
520 net->ieee802154_lowpan.max_dsize = 0xFFFF;
521
522 inet_frags_init_net(&net->ieee802154_lowpan.frags);
523
524 return lowpan_frags_ns_sysctl_register(net);
525 }
526
527 static void __net_exit lowpan_frags_exit_net(struct net *net)
528 {
529 lowpan_frags_ns_sysctl_unregister(net);
530 inet_frags_exit_net(&net->ieee802154_lowpan.frags, &lowpan_frags);
531 }
532
533 static struct pernet_operations lowpan_frags_ops = {
534 .init = lowpan_frags_init_net,
535 .exit = lowpan_frags_exit_net,
536 };
537
538 int __init lowpan_net_frag_init(void)
539 {
540 int ret;
541
542 ret = lowpan_frags_sysctl_register();
543 if (ret)
544 return ret;
545
546 ret = register_pernet_subsys(&lowpan_frags_ops);
547 if (ret)
548 goto err_pernet;
549
550 lowpan_frags.hashfn = lowpan_hashfn;
551 lowpan_frags.constructor = lowpan_frag_init;
552 lowpan_frags.destructor = NULL;
553 lowpan_frags.skb_free = NULL;
554 lowpan_frags.qsize = sizeof(struct frag_queue);
555 lowpan_frags.match = lowpan_frag_match;
556 lowpan_frags.frag_expire = lowpan_frag_expire;
557 lowpan_frags.secret_interval = 10 * 60 * HZ;
558 inet_frags_init(&lowpan_frags);
559
560 return ret;
561 err_pernet:
562 lowpan_frags_sysctl_unregister();
563 return ret;
564 }
565
566 void lowpan_net_frag_exit(void)
567 {
568 inet_frags_fini(&lowpan_frags);
569 lowpan_frags_sysctl_unregister();
570 unregister_pernet_subsys(&lowpan_frags_ops);
571 }