]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/ipv6/ip6mr.c
ip6mr: Fix potential Spectre v1 vulnerability
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / ip6mr.c
1 /*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19 #include <linux/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
24 #include <linux/mm.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
39 #include <net/sock.h>
40 #include <net/raw.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
46
47 #include <net/ipv6.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
56 #include <linux/nospec.h>
57
58 struct mr6_table {
59 struct list_head list;
60 possible_net_t net;
61 u32 id;
62 struct sock *mroute6_sk;
63 struct timer_list ipmr_expire_timer;
64 struct list_head mfc6_unres_queue;
65 struct list_head mfc6_cache_array[MFC6_LINES];
66 struct mif_device vif6_table[MAXMIFS];
67 int maxvif;
68 atomic_t cache_resolve_queue_len;
69 bool mroute_do_assert;
70 bool mroute_do_pim;
71 #ifdef CONFIG_IPV6_PIMSM_V2
72 int mroute_reg_vif_num;
73 #endif
74 };
75
76 struct ip6mr_rule {
77 struct fib_rule common;
78 };
79
80 struct ip6mr_result {
81 struct mr6_table *mrt;
82 };
83
84 /* Big lock, protecting vif table, mrt cache and mroute socket state.
85 Note that the changes are semaphored via rtnl_lock.
86 */
87
88 static DEFINE_RWLOCK(mrt_lock);
89
90 /*
91 * Multicast router control variables
92 */
93
94 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
95
96 /* Special spinlock for queue of unresolved entries */
97 static DEFINE_SPINLOCK(mfc_unres_lock);
98
99 /* We return to original Alan's scheme. Hash table of resolved
100 entries is changed only in process context and protected
101 with weak lock mrt_lock. Queue of unresolved entries is protected
102 with strong spinlock mfc_unres_lock.
103
104 In this case data path is free of exclusive locks at all.
105 */
106
107 static struct kmem_cache *mrt_cachep __read_mostly;
108
109 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
110 static void ip6mr_free_table(struct mr6_table *mrt);
111
112 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
113 struct sk_buff *skb, struct mfc6_cache *cache);
114 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
115 mifi_t mifi, int assert);
116 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
117 struct mfc6_cache *c, struct rtmsg *rtm);
118 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
119 int cmd);
120 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt);
121 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
122 struct netlink_callback *cb);
123 static void mroute_clean_tables(struct mr6_table *mrt, bool all);
124 static void ipmr_expire_process(struct timer_list *t);
125
126 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
127 #define ip6mr_for_each_table(mrt, net) \
128 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
129
130 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
131 {
132 struct mr6_table *mrt;
133
134 ip6mr_for_each_table(mrt, net) {
135 if (mrt->id == id)
136 return mrt;
137 }
138 return NULL;
139 }
140
141 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
142 struct mr6_table **mrt)
143 {
144 int err;
145 struct ip6mr_result res;
146 struct fib_lookup_arg arg = {
147 .result = &res,
148 .flags = FIB_LOOKUP_NOREF,
149 };
150
151 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
152 flowi6_to_flowi(flp6), 0, &arg);
153 if (err < 0)
154 return err;
155 *mrt = res.mrt;
156 return 0;
157 }
158
159 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
160 int flags, struct fib_lookup_arg *arg)
161 {
162 struct ip6mr_result *res = arg->result;
163 struct mr6_table *mrt;
164
165 switch (rule->action) {
166 case FR_ACT_TO_TBL:
167 break;
168 case FR_ACT_UNREACHABLE:
169 return -ENETUNREACH;
170 case FR_ACT_PROHIBIT:
171 return -EACCES;
172 case FR_ACT_BLACKHOLE:
173 default:
174 return -EINVAL;
175 }
176
177 mrt = ip6mr_get_table(rule->fr_net, rule->table);
178 if (!mrt)
179 return -EAGAIN;
180 res->mrt = mrt;
181 return 0;
182 }
183
184 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
185 {
186 return 1;
187 }
188
189 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
190 FRA_GENERIC_POLICY,
191 };
192
193 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
194 struct fib_rule_hdr *frh, struct nlattr **tb)
195 {
196 return 0;
197 }
198
199 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
200 struct nlattr **tb)
201 {
202 return 1;
203 }
204
205 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
206 struct fib_rule_hdr *frh)
207 {
208 frh->dst_len = 0;
209 frh->src_len = 0;
210 frh->tos = 0;
211 return 0;
212 }
213
214 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
215 .family = RTNL_FAMILY_IP6MR,
216 .rule_size = sizeof(struct ip6mr_rule),
217 .addr_size = sizeof(struct in6_addr),
218 .action = ip6mr_rule_action,
219 .match = ip6mr_rule_match,
220 .configure = ip6mr_rule_configure,
221 .compare = ip6mr_rule_compare,
222 .fill = ip6mr_rule_fill,
223 .nlgroup = RTNLGRP_IPV6_RULE,
224 .policy = ip6mr_rule_policy,
225 .owner = THIS_MODULE,
226 };
227
228 static int __net_init ip6mr_rules_init(struct net *net)
229 {
230 struct fib_rules_ops *ops;
231 struct mr6_table *mrt;
232 int err;
233
234 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
235 if (IS_ERR(ops))
236 return PTR_ERR(ops);
237
238 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
239
240 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
241 if (!mrt) {
242 err = -ENOMEM;
243 goto err1;
244 }
245
246 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
247 if (err < 0)
248 goto err2;
249
250 net->ipv6.mr6_rules_ops = ops;
251 return 0;
252
253 err2:
254 ip6mr_free_table(mrt);
255 err1:
256 fib_rules_unregister(ops);
257 return err;
258 }
259
260 static void __net_exit ip6mr_rules_exit(struct net *net)
261 {
262 struct mr6_table *mrt, *next;
263
264 rtnl_lock();
265 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
266 list_del(&mrt->list);
267 ip6mr_free_table(mrt);
268 }
269 fib_rules_unregister(net->ipv6.mr6_rules_ops);
270 rtnl_unlock();
271 }
272 #else
273 #define ip6mr_for_each_table(mrt, net) \
274 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
275
276 static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
277 {
278 return net->ipv6.mrt6;
279 }
280
281 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
282 struct mr6_table **mrt)
283 {
284 *mrt = net->ipv6.mrt6;
285 return 0;
286 }
287
288 static int __net_init ip6mr_rules_init(struct net *net)
289 {
290 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
291 return net->ipv6.mrt6 ? 0 : -ENOMEM;
292 }
293
294 static void __net_exit ip6mr_rules_exit(struct net *net)
295 {
296 rtnl_lock();
297 ip6mr_free_table(net->ipv6.mrt6);
298 net->ipv6.mrt6 = NULL;
299 rtnl_unlock();
300 }
301 #endif
302
303 static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
304 {
305 struct mr6_table *mrt;
306 unsigned int i;
307
308 mrt = ip6mr_get_table(net, id);
309 if (mrt)
310 return mrt;
311
312 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
313 if (!mrt)
314 return NULL;
315 mrt->id = id;
316 write_pnet(&mrt->net, net);
317
318 /* Forwarding cache */
319 for (i = 0; i < MFC6_LINES; i++)
320 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
321
322 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
323
324 timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
325
326 #ifdef CONFIG_IPV6_PIMSM_V2
327 mrt->mroute_reg_vif_num = -1;
328 #endif
329 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
330 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
331 #endif
332 return mrt;
333 }
334
335 static void ip6mr_free_table(struct mr6_table *mrt)
336 {
337 del_timer_sync(&mrt->ipmr_expire_timer);
338 mroute_clean_tables(mrt, true);
339 kfree(mrt);
340 }
341
342 #ifdef CONFIG_PROC_FS
343
344 struct ipmr_mfc_iter {
345 struct seq_net_private p;
346 struct mr6_table *mrt;
347 struct list_head *cache;
348 int ct;
349 };
350
351
352 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
353 struct ipmr_mfc_iter *it, loff_t pos)
354 {
355 struct mr6_table *mrt = it->mrt;
356 struct mfc6_cache *mfc;
357
358 read_lock(&mrt_lock);
359 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
360 it->cache = &mrt->mfc6_cache_array[it->ct];
361 list_for_each_entry(mfc, it->cache, list)
362 if (pos-- == 0)
363 return mfc;
364 }
365 read_unlock(&mrt_lock);
366
367 spin_lock_bh(&mfc_unres_lock);
368 it->cache = &mrt->mfc6_unres_queue;
369 list_for_each_entry(mfc, it->cache, list)
370 if (pos-- == 0)
371 return mfc;
372 spin_unlock_bh(&mfc_unres_lock);
373
374 it->cache = NULL;
375 return NULL;
376 }
377
378 /*
379 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
380 */
381
382 struct ipmr_vif_iter {
383 struct seq_net_private p;
384 struct mr6_table *mrt;
385 int ct;
386 };
387
388 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
389 struct ipmr_vif_iter *iter,
390 loff_t pos)
391 {
392 struct mr6_table *mrt = iter->mrt;
393
394 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
395 if (!MIF_EXISTS(mrt, iter->ct))
396 continue;
397 if (pos-- == 0)
398 return &mrt->vif6_table[iter->ct];
399 }
400 return NULL;
401 }
402
403 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
404 __acquires(mrt_lock)
405 {
406 struct ipmr_vif_iter *iter = seq->private;
407 struct net *net = seq_file_net(seq);
408 struct mr6_table *mrt;
409
410 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
411 if (!mrt)
412 return ERR_PTR(-ENOENT);
413
414 iter->mrt = mrt;
415
416 read_lock(&mrt_lock);
417 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
418 : SEQ_START_TOKEN;
419 }
420
421 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
422 {
423 struct ipmr_vif_iter *iter = seq->private;
424 struct net *net = seq_file_net(seq);
425 struct mr6_table *mrt = iter->mrt;
426
427 ++*pos;
428 if (v == SEQ_START_TOKEN)
429 return ip6mr_vif_seq_idx(net, iter, 0);
430
431 while (++iter->ct < mrt->maxvif) {
432 if (!MIF_EXISTS(mrt, iter->ct))
433 continue;
434 return &mrt->vif6_table[iter->ct];
435 }
436 return NULL;
437 }
438
439 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
440 __releases(mrt_lock)
441 {
442 read_unlock(&mrt_lock);
443 }
444
445 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
446 {
447 struct ipmr_vif_iter *iter = seq->private;
448 struct mr6_table *mrt = iter->mrt;
449
450 if (v == SEQ_START_TOKEN) {
451 seq_puts(seq,
452 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
453 } else {
454 const struct mif_device *vif = v;
455 const char *name = vif->dev ? vif->dev->name : "none";
456
457 seq_printf(seq,
458 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
459 vif - mrt->vif6_table,
460 name, vif->bytes_in, vif->pkt_in,
461 vif->bytes_out, vif->pkt_out,
462 vif->flags);
463 }
464 return 0;
465 }
466
467 static const struct seq_operations ip6mr_vif_seq_ops = {
468 .start = ip6mr_vif_seq_start,
469 .next = ip6mr_vif_seq_next,
470 .stop = ip6mr_vif_seq_stop,
471 .show = ip6mr_vif_seq_show,
472 };
473
474 static int ip6mr_vif_open(struct inode *inode, struct file *file)
475 {
476 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
477 sizeof(struct ipmr_vif_iter));
478 }
479
480 static const struct file_operations ip6mr_vif_fops = {
481 .owner = THIS_MODULE,
482 .open = ip6mr_vif_open,
483 .read = seq_read,
484 .llseek = seq_lseek,
485 .release = seq_release_net,
486 };
487
488 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
489 {
490 struct ipmr_mfc_iter *it = seq->private;
491 struct net *net = seq_file_net(seq);
492 struct mr6_table *mrt;
493
494 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
495 if (!mrt)
496 return ERR_PTR(-ENOENT);
497
498 it->mrt = mrt;
499 it->cache = NULL;
500 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
501 : SEQ_START_TOKEN;
502 }
503
504 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
505 {
506 struct mfc6_cache *mfc = v;
507 struct ipmr_mfc_iter *it = seq->private;
508 struct net *net = seq_file_net(seq);
509 struct mr6_table *mrt = it->mrt;
510
511 ++*pos;
512
513 if (v == SEQ_START_TOKEN)
514 return ipmr_mfc_seq_idx(net, seq->private, 0);
515
516 if (mfc->list.next != it->cache)
517 return list_entry(mfc->list.next, struct mfc6_cache, list);
518
519 if (it->cache == &mrt->mfc6_unres_queue)
520 goto end_of_list;
521
522 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
523
524 while (++it->ct < MFC6_LINES) {
525 it->cache = &mrt->mfc6_cache_array[it->ct];
526 if (list_empty(it->cache))
527 continue;
528 return list_first_entry(it->cache, struct mfc6_cache, list);
529 }
530
531 /* exhausted cache_array, show unresolved */
532 read_unlock(&mrt_lock);
533 it->cache = &mrt->mfc6_unres_queue;
534 it->ct = 0;
535
536 spin_lock_bh(&mfc_unres_lock);
537 if (!list_empty(it->cache))
538 return list_first_entry(it->cache, struct mfc6_cache, list);
539
540 end_of_list:
541 spin_unlock_bh(&mfc_unres_lock);
542 it->cache = NULL;
543
544 return NULL;
545 }
546
547 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
548 {
549 struct ipmr_mfc_iter *it = seq->private;
550 struct mr6_table *mrt = it->mrt;
551
552 if (it->cache == &mrt->mfc6_unres_queue)
553 spin_unlock_bh(&mfc_unres_lock);
554 else if (it->cache == &mrt->mfc6_cache_array[it->ct])
555 read_unlock(&mrt_lock);
556 }
557
558 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
559 {
560 int n;
561
562 if (v == SEQ_START_TOKEN) {
563 seq_puts(seq,
564 "Group "
565 "Origin "
566 "Iif Pkts Bytes Wrong Oifs\n");
567 } else {
568 const struct mfc6_cache *mfc = v;
569 const struct ipmr_mfc_iter *it = seq->private;
570 struct mr6_table *mrt = it->mrt;
571
572 seq_printf(seq, "%pI6 %pI6 %-3hd",
573 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
574 mfc->mf6c_parent);
575
576 if (it->cache != &mrt->mfc6_unres_queue) {
577 seq_printf(seq, " %8lu %8lu %8lu",
578 mfc->mfc_un.res.pkt,
579 mfc->mfc_un.res.bytes,
580 mfc->mfc_un.res.wrong_if);
581 for (n = mfc->mfc_un.res.minvif;
582 n < mfc->mfc_un.res.maxvif; n++) {
583 if (MIF_EXISTS(mrt, n) &&
584 mfc->mfc_un.res.ttls[n] < 255)
585 seq_printf(seq,
586 " %2d:%-3d",
587 n, mfc->mfc_un.res.ttls[n]);
588 }
589 } else {
590 /* unresolved mfc_caches don't contain
591 * pkt, bytes and wrong_if values
592 */
593 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
594 }
595 seq_putc(seq, '\n');
596 }
597 return 0;
598 }
599
600 static const struct seq_operations ipmr_mfc_seq_ops = {
601 .start = ipmr_mfc_seq_start,
602 .next = ipmr_mfc_seq_next,
603 .stop = ipmr_mfc_seq_stop,
604 .show = ipmr_mfc_seq_show,
605 };
606
607 static int ipmr_mfc_open(struct inode *inode, struct file *file)
608 {
609 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
610 sizeof(struct ipmr_mfc_iter));
611 }
612
613 static const struct file_operations ip6mr_mfc_fops = {
614 .owner = THIS_MODULE,
615 .open = ipmr_mfc_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = seq_release_net,
619 };
620 #endif
621
622 #ifdef CONFIG_IPV6_PIMSM_V2
623
624 static int pim6_rcv(struct sk_buff *skb)
625 {
626 struct pimreghdr *pim;
627 struct ipv6hdr *encap;
628 struct net_device *reg_dev = NULL;
629 struct net *net = dev_net(skb->dev);
630 struct mr6_table *mrt;
631 struct flowi6 fl6 = {
632 .flowi6_iif = skb->dev->ifindex,
633 .flowi6_mark = skb->mark,
634 };
635 int reg_vif_num;
636
637 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
638 goto drop;
639
640 pim = (struct pimreghdr *)skb_transport_header(skb);
641 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
642 (pim->flags & PIM_NULL_REGISTER) ||
643 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
644 sizeof(*pim), IPPROTO_PIM,
645 csum_partial((void *)pim, sizeof(*pim), 0)) &&
646 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
647 goto drop;
648
649 /* check if the inner packet is destined to mcast group */
650 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
651 sizeof(*pim));
652
653 if (!ipv6_addr_is_multicast(&encap->daddr) ||
654 encap->payload_len == 0 ||
655 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
656 goto drop;
657
658 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
659 goto drop;
660 reg_vif_num = mrt->mroute_reg_vif_num;
661
662 read_lock(&mrt_lock);
663 if (reg_vif_num >= 0)
664 reg_dev = mrt->vif6_table[reg_vif_num].dev;
665 if (reg_dev)
666 dev_hold(reg_dev);
667 read_unlock(&mrt_lock);
668
669 if (!reg_dev)
670 goto drop;
671
672 skb->mac_header = skb->network_header;
673 skb_pull(skb, (u8 *)encap - skb->data);
674 skb_reset_network_header(skb);
675 skb->protocol = htons(ETH_P_IPV6);
676 skb->ip_summed = CHECKSUM_NONE;
677
678 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
679
680 netif_rx(skb);
681
682 dev_put(reg_dev);
683 return 0;
684 drop:
685 kfree_skb(skb);
686 return 0;
687 }
688
689 static const struct inet6_protocol pim6_protocol = {
690 .handler = pim6_rcv,
691 };
692
693 /* Service routines creating virtual interfaces: PIMREG */
694
695 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
696 struct net_device *dev)
697 {
698 struct net *net = dev_net(dev);
699 struct mr6_table *mrt;
700 struct flowi6 fl6 = {
701 .flowi6_oif = dev->ifindex,
702 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
703 .flowi6_mark = skb->mark,
704 };
705 int err;
706
707 err = ip6mr_fib_lookup(net, &fl6, &mrt);
708 if (err < 0) {
709 kfree_skb(skb);
710 return err;
711 }
712
713 read_lock(&mrt_lock);
714 dev->stats.tx_bytes += skb->len;
715 dev->stats.tx_packets++;
716 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
717 read_unlock(&mrt_lock);
718 kfree_skb(skb);
719 return NETDEV_TX_OK;
720 }
721
722 static int reg_vif_get_iflink(const struct net_device *dev)
723 {
724 return 0;
725 }
726
727 static const struct net_device_ops reg_vif_netdev_ops = {
728 .ndo_start_xmit = reg_vif_xmit,
729 .ndo_get_iflink = reg_vif_get_iflink,
730 };
731
732 static void reg_vif_setup(struct net_device *dev)
733 {
734 dev->type = ARPHRD_PIMREG;
735 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
736 dev->flags = IFF_NOARP;
737 dev->netdev_ops = &reg_vif_netdev_ops;
738 dev->needs_free_netdev = true;
739 dev->features |= NETIF_F_NETNS_LOCAL;
740 }
741
742 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
743 {
744 struct net_device *dev;
745 char name[IFNAMSIZ];
746
747 if (mrt->id == RT6_TABLE_DFLT)
748 sprintf(name, "pim6reg");
749 else
750 sprintf(name, "pim6reg%u", mrt->id);
751
752 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
753 if (!dev)
754 return NULL;
755
756 dev_net_set(dev, net);
757
758 if (register_netdevice(dev)) {
759 free_netdev(dev);
760 return NULL;
761 }
762
763 if (dev_open(dev))
764 goto failure;
765
766 dev_hold(dev);
767 return dev;
768
769 failure:
770 unregister_netdevice(dev);
771 return NULL;
772 }
773 #endif
774
775 /*
776 * Delete a VIF entry
777 */
778
779 static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
780 struct list_head *head)
781 {
782 struct mif_device *v;
783 struct net_device *dev;
784 struct inet6_dev *in6_dev;
785
786 if (vifi < 0 || vifi >= mrt->maxvif)
787 return -EADDRNOTAVAIL;
788
789 v = &mrt->vif6_table[vifi];
790
791 write_lock_bh(&mrt_lock);
792 dev = v->dev;
793 v->dev = NULL;
794
795 if (!dev) {
796 write_unlock_bh(&mrt_lock);
797 return -EADDRNOTAVAIL;
798 }
799
800 #ifdef CONFIG_IPV6_PIMSM_V2
801 if (vifi == mrt->mroute_reg_vif_num)
802 mrt->mroute_reg_vif_num = -1;
803 #endif
804
805 if (vifi + 1 == mrt->maxvif) {
806 int tmp;
807 for (tmp = vifi - 1; tmp >= 0; tmp--) {
808 if (MIF_EXISTS(mrt, tmp))
809 break;
810 }
811 mrt->maxvif = tmp + 1;
812 }
813
814 write_unlock_bh(&mrt_lock);
815
816 dev_set_allmulti(dev, -1);
817
818 in6_dev = __in6_dev_get(dev);
819 if (in6_dev) {
820 in6_dev->cnf.mc_forwarding--;
821 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
822 NETCONFA_MC_FORWARDING,
823 dev->ifindex, &in6_dev->cnf);
824 }
825
826 if ((v->flags & MIFF_REGISTER) && !notify)
827 unregister_netdevice_queue(dev, head);
828
829 dev_put(dev);
830 return 0;
831 }
832
833 static inline void ip6mr_cache_free(struct mfc6_cache *c)
834 {
835 kmem_cache_free(mrt_cachep, c);
836 }
837
838 /* Destroy an unresolved cache entry, killing queued skbs
839 and reporting error to netlink readers.
840 */
841
842 static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
843 {
844 struct net *net = read_pnet(&mrt->net);
845 struct sk_buff *skb;
846
847 atomic_dec(&mrt->cache_resolve_queue_len);
848
849 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
850 if (ipv6_hdr(skb)->version == 0) {
851 struct nlmsghdr *nlh = skb_pull(skb,
852 sizeof(struct ipv6hdr));
853 nlh->nlmsg_type = NLMSG_ERROR;
854 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
855 skb_trim(skb, nlh->nlmsg_len);
856 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
857 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
858 } else
859 kfree_skb(skb);
860 }
861
862 ip6mr_cache_free(c);
863 }
864
865
866 /* Timer process for all the unresolved queue. */
867
868 static void ipmr_do_expire_process(struct mr6_table *mrt)
869 {
870 unsigned long now = jiffies;
871 unsigned long expires = 10 * HZ;
872 struct mfc6_cache *c, *next;
873
874 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
875 if (time_after(c->mfc_un.unres.expires, now)) {
876 /* not yet... */
877 unsigned long interval = c->mfc_un.unres.expires - now;
878 if (interval < expires)
879 expires = interval;
880 continue;
881 }
882
883 list_del(&c->list);
884 mr6_netlink_event(mrt, c, RTM_DELROUTE);
885 ip6mr_destroy_unres(mrt, c);
886 }
887
888 if (!list_empty(&mrt->mfc6_unres_queue))
889 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
890 }
891
892 static void ipmr_expire_process(struct timer_list *t)
893 {
894 struct mr6_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
895
896 if (!spin_trylock(&mfc_unres_lock)) {
897 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
898 return;
899 }
900
901 if (!list_empty(&mrt->mfc6_unres_queue))
902 ipmr_do_expire_process(mrt);
903
904 spin_unlock(&mfc_unres_lock);
905 }
906
907 /* Fill oifs list. It is called under write locked mrt_lock. */
908
909 static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
910 unsigned char *ttls)
911 {
912 int vifi;
913
914 cache->mfc_un.res.minvif = MAXMIFS;
915 cache->mfc_un.res.maxvif = 0;
916 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
917
918 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
919 if (MIF_EXISTS(mrt, vifi) &&
920 ttls[vifi] && ttls[vifi] < 255) {
921 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
922 if (cache->mfc_un.res.minvif > vifi)
923 cache->mfc_un.res.minvif = vifi;
924 if (cache->mfc_un.res.maxvif <= vifi)
925 cache->mfc_un.res.maxvif = vifi + 1;
926 }
927 }
928 cache->mfc_un.res.lastuse = jiffies;
929 }
930
931 static int mif6_add(struct net *net, struct mr6_table *mrt,
932 struct mif6ctl *vifc, int mrtsock)
933 {
934 int vifi = vifc->mif6c_mifi;
935 struct mif_device *v = &mrt->vif6_table[vifi];
936 struct net_device *dev;
937 struct inet6_dev *in6_dev;
938 int err;
939
940 /* Is vif busy ? */
941 if (MIF_EXISTS(mrt, vifi))
942 return -EADDRINUSE;
943
944 switch (vifc->mif6c_flags) {
945 #ifdef CONFIG_IPV6_PIMSM_V2
946 case MIFF_REGISTER:
947 /*
948 * Special Purpose VIF in PIM
949 * All the packets will be sent to the daemon
950 */
951 if (mrt->mroute_reg_vif_num >= 0)
952 return -EADDRINUSE;
953 dev = ip6mr_reg_vif(net, mrt);
954 if (!dev)
955 return -ENOBUFS;
956 err = dev_set_allmulti(dev, 1);
957 if (err) {
958 unregister_netdevice(dev);
959 dev_put(dev);
960 return err;
961 }
962 break;
963 #endif
964 case 0:
965 dev = dev_get_by_index(net, vifc->mif6c_pifi);
966 if (!dev)
967 return -EADDRNOTAVAIL;
968 err = dev_set_allmulti(dev, 1);
969 if (err) {
970 dev_put(dev);
971 return err;
972 }
973 break;
974 default:
975 return -EINVAL;
976 }
977
978 in6_dev = __in6_dev_get(dev);
979 if (in6_dev) {
980 in6_dev->cnf.mc_forwarding++;
981 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
982 NETCONFA_MC_FORWARDING,
983 dev->ifindex, &in6_dev->cnf);
984 }
985
986 /*
987 * Fill in the VIF structures
988 */
989 v->rate_limit = vifc->vifc_rate_limit;
990 v->flags = vifc->mif6c_flags;
991 if (!mrtsock)
992 v->flags |= VIFF_STATIC;
993 v->threshold = vifc->vifc_threshold;
994 v->bytes_in = 0;
995 v->bytes_out = 0;
996 v->pkt_in = 0;
997 v->pkt_out = 0;
998 v->link = dev->ifindex;
999 if (v->flags & MIFF_REGISTER)
1000 v->link = dev_get_iflink(dev);
1001
1002 /* And finish update writing critical data */
1003 write_lock_bh(&mrt_lock);
1004 v->dev = dev;
1005 #ifdef CONFIG_IPV6_PIMSM_V2
1006 if (v->flags & MIFF_REGISTER)
1007 mrt->mroute_reg_vif_num = vifi;
1008 #endif
1009 if (vifi + 1 > mrt->maxvif)
1010 mrt->maxvif = vifi + 1;
1011 write_unlock_bh(&mrt_lock);
1012 return 0;
1013 }
1014
1015 static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
1016 const struct in6_addr *origin,
1017 const struct in6_addr *mcastgrp)
1018 {
1019 int line = MFC6_HASH(mcastgrp, origin);
1020 struct mfc6_cache *c;
1021
1022 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1023 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1024 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
1025 return c;
1026 }
1027 return NULL;
1028 }
1029
1030 /* Look for a (*,*,oif) entry */
1031 static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1032 mifi_t mifi)
1033 {
1034 int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1035 struct mfc6_cache *c;
1036
1037 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1038 if (ipv6_addr_any(&c->mf6c_origin) &&
1039 ipv6_addr_any(&c->mf6c_mcastgrp) &&
1040 (c->mfc_un.res.ttls[mifi] < 255))
1041 return c;
1042
1043 return NULL;
1044 }
1045
1046 /* Look for a (*,G) entry */
1047 static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1048 struct in6_addr *mcastgrp,
1049 mifi_t mifi)
1050 {
1051 int line = MFC6_HASH(mcastgrp, &in6addr_any);
1052 struct mfc6_cache *c, *proxy;
1053
1054 if (ipv6_addr_any(mcastgrp))
1055 goto skip;
1056
1057 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1058 if (ipv6_addr_any(&c->mf6c_origin) &&
1059 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1060 if (c->mfc_un.res.ttls[mifi] < 255)
1061 return c;
1062
1063 /* It's ok if the mifi is part of the static tree */
1064 proxy = ip6mr_cache_find_any_parent(mrt,
1065 c->mf6c_parent);
1066 if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1067 return c;
1068 }
1069
1070 skip:
1071 return ip6mr_cache_find_any_parent(mrt, mifi);
1072 }
1073
1074 /*
1075 * Allocate a multicast cache entry
1076 */
1077 static struct mfc6_cache *ip6mr_cache_alloc(void)
1078 {
1079 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
1080 if (!c)
1081 return NULL;
1082 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
1083 c->mfc_un.res.minvif = MAXMIFS;
1084 return c;
1085 }
1086
1087 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
1088 {
1089 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1090 if (!c)
1091 return NULL;
1092 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1093 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1094 return c;
1095 }
1096
1097 /*
1098 * A cache entry has gone into a resolved state from queued
1099 */
1100
1101 static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1102 struct mfc6_cache *uc, struct mfc6_cache *c)
1103 {
1104 struct sk_buff *skb;
1105
1106 /*
1107 * Play the pending entries through our router
1108 */
1109
1110 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
1111 if (ipv6_hdr(skb)->version == 0) {
1112 struct nlmsghdr *nlh = skb_pull(skb,
1113 sizeof(struct ipv6hdr));
1114
1115 if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1116 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1117 } else {
1118 nlh->nlmsg_type = NLMSG_ERROR;
1119 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1120 skb_trim(skb, nlh->nlmsg_len);
1121 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1122 }
1123 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1124 } else
1125 ip6_mr_forward(net, mrt, skb, c);
1126 }
1127 }
1128
1129 /*
1130 * Bounce a cache query up to pim6sd and netlink.
1131 *
1132 * Called under mrt_lock.
1133 */
1134
1135 static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1136 mifi_t mifi, int assert)
1137 {
1138 struct sk_buff *skb;
1139 struct mrt6msg *msg;
1140 int ret;
1141
1142 #ifdef CONFIG_IPV6_PIMSM_V2
1143 if (assert == MRT6MSG_WHOLEPKT)
1144 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1145 +sizeof(*msg));
1146 else
1147 #endif
1148 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1149
1150 if (!skb)
1151 return -ENOBUFS;
1152
1153 /* I suppose that internal messages
1154 * do not require checksums */
1155
1156 skb->ip_summed = CHECKSUM_UNNECESSARY;
1157
1158 #ifdef CONFIG_IPV6_PIMSM_V2
1159 if (assert == MRT6MSG_WHOLEPKT) {
1160 /* Ugly, but we have no choice with this interface.
1161 Duplicate old header, fix length etc.
1162 And all this only to mangle msg->im6_msgtype and
1163 to set msg->im6_mbz to "mbz" :-)
1164 */
1165 skb_push(skb, -skb_network_offset(pkt));
1166
1167 skb_push(skb, sizeof(*msg));
1168 skb_reset_transport_header(skb);
1169 msg = (struct mrt6msg *)skb_transport_header(skb);
1170 msg->im6_mbz = 0;
1171 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1172 msg->im6_mif = mrt->mroute_reg_vif_num;
1173 msg->im6_pad = 0;
1174 msg->im6_src = ipv6_hdr(pkt)->saddr;
1175 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1176
1177 skb->ip_summed = CHECKSUM_UNNECESSARY;
1178 } else
1179 #endif
1180 {
1181 /*
1182 * Copy the IP header
1183 */
1184
1185 skb_put(skb, sizeof(struct ipv6hdr));
1186 skb_reset_network_header(skb);
1187 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1188
1189 /*
1190 * Add our header
1191 */
1192 skb_put(skb, sizeof(*msg));
1193 skb_reset_transport_header(skb);
1194 msg = (struct mrt6msg *)skb_transport_header(skb);
1195
1196 msg->im6_mbz = 0;
1197 msg->im6_msgtype = assert;
1198 msg->im6_mif = mifi;
1199 msg->im6_pad = 0;
1200 msg->im6_src = ipv6_hdr(pkt)->saddr;
1201 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1202
1203 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1204 skb->ip_summed = CHECKSUM_UNNECESSARY;
1205 }
1206
1207 if (!mrt->mroute6_sk) {
1208 kfree_skb(skb);
1209 return -EINVAL;
1210 }
1211
1212 mrt6msg_netlink_event(mrt, skb);
1213
1214 /*
1215 * Deliver to user space multicast routing algorithms
1216 */
1217 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
1218 if (ret < 0) {
1219 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1220 kfree_skb(skb);
1221 }
1222
1223 return ret;
1224 }
1225
1226 /*
1227 * Queue a packet for resolution. It gets locked cache entry!
1228 */
1229
1230 static int
1231 ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
1232 {
1233 bool found = false;
1234 int err;
1235 struct mfc6_cache *c;
1236
1237 spin_lock_bh(&mfc_unres_lock);
1238 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
1239 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1240 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1241 found = true;
1242 break;
1243 }
1244 }
1245
1246 if (!found) {
1247 /*
1248 * Create a new entry if allowable
1249 */
1250
1251 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1252 (c = ip6mr_cache_alloc_unres()) == NULL) {
1253 spin_unlock_bh(&mfc_unres_lock);
1254
1255 kfree_skb(skb);
1256 return -ENOBUFS;
1257 }
1258
1259 /*
1260 * Fill in the new cache entry
1261 */
1262 c->mf6c_parent = -1;
1263 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1264 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1265
1266 /*
1267 * Reflect first query at pim6sd
1268 */
1269 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1270 if (err < 0) {
1271 /* If the report failed throw the cache entry
1272 out - Brad Parker
1273 */
1274 spin_unlock_bh(&mfc_unres_lock);
1275
1276 ip6mr_cache_free(c);
1277 kfree_skb(skb);
1278 return err;
1279 }
1280
1281 atomic_inc(&mrt->cache_resolve_queue_len);
1282 list_add(&c->list, &mrt->mfc6_unres_queue);
1283 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1284
1285 ipmr_do_expire_process(mrt);
1286 }
1287
1288 /*
1289 * See if we can append the packet
1290 */
1291 if (c->mfc_un.unres.unresolved.qlen > 3) {
1292 kfree_skb(skb);
1293 err = -ENOBUFS;
1294 } else {
1295 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1296 err = 0;
1297 }
1298
1299 spin_unlock_bh(&mfc_unres_lock);
1300 return err;
1301 }
1302
1303 /*
1304 * MFC6 cache manipulation by user space
1305 */
1306
1307 static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1308 int parent)
1309 {
1310 int line;
1311 struct mfc6_cache *c, *next;
1312
1313 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1314
1315 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
1316 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1317 ipv6_addr_equal(&c->mf6c_mcastgrp,
1318 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1319 (parent == -1 || parent == c->mf6c_parent)) {
1320 write_lock_bh(&mrt_lock);
1321 list_del(&c->list);
1322 write_unlock_bh(&mrt_lock);
1323
1324 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1325 ip6mr_cache_free(c);
1326 return 0;
1327 }
1328 }
1329 return -ENOENT;
1330 }
1331
1332 static int ip6mr_device_event(struct notifier_block *this,
1333 unsigned long event, void *ptr)
1334 {
1335 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1336 struct net *net = dev_net(dev);
1337 struct mr6_table *mrt;
1338 struct mif_device *v;
1339 int ct;
1340
1341 if (event != NETDEV_UNREGISTER)
1342 return NOTIFY_DONE;
1343
1344 ip6mr_for_each_table(mrt, net) {
1345 v = &mrt->vif6_table[0];
1346 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1347 if (v->dev == dev)
1348 mif6_delete(mrt, ct, 1, NULL);
1349 }
1350 }
1351
1352 return NOTIFY_DONE;
1353 }
1354
1355 static struct notifier_block ip6_mr_notifier = {
1356 .notifier_call = ip6mr_device_event
1357 };
1358
1359 /*
1360 * Setup for IP multicast routing
1361 */
1362
1363 static int __net_init ip6mr_net_init(struct net *net)
1364 {
1365 int err;
1366
1367 err = ip6mr_rules_init(net);
1368 if (err < 0)
1369 goto fail;
1370
1371 #ifdef CONFIG_PROC_FS
1372 err = -ENOMEM;
1373 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1374 goto proc_vif_fail;
1375 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1376 goto proc_cache_fail;
1377 #endif
1378
1379 return 0;
1380
1381 #ifdef CONFIG_PROC_FS
1382 proc_cache_fail:
1383 remove_proc_entry("ip6_mr_vif", net->proc_net);
1384 proc_vif_fail:
1385 ip6mr_rules_exit(net);
1386 #endif
1387 fail:
1388 return err;
1389 }
1390
1391 static void __net_exit ip6mr_net_exit(struct net *net)
1392 {
1393 #ifdef CONFIG_PROC_FS
1394 remove_proc_entry("ip6_mr_cache", net->proc_net);
1395 remove_proc_entry("ip6_mr_vif", net->proc_net);
1396 #endif
1397 ip6mr_rules_exit(net);
1398 }
1399
1400 static struct pernet_operations ip6mr_net_ops = {
1401 .init = ip6mr_net_init,
1402 .exit = ip6mr_net_exit,
1403 };
1404
1405 int __init ip6_mr_init(void)
1406 {
1407 int err;
1408
1409 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1410 sizeof(struct mfc6_cache),
1411 0, SLAB_HWCACHE_ALIGN,
1412 NULL);
1413 if (!mrt_cachep)
1414 return -ENOMEM;
1415
1416 err = register_pernet_subsys(&ip6mr_net_ops);
1417 if (err)
1418 goto reg_pernet_fail;
1419
1420 err = register_netdevice_notifier(&ip6_mr_notifier);
1421 if (err)
1422 goto reg_notif_fail;
1423 #ifdef CONFIG_IPV6_PIMSM_V2
1424 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1425 pr_err("%s: can't add PIM protocol\n", __func__);
1426 err = -EAGAIN;
1427 goto add_proto_fail;
1428 }
1429 #endif
1430 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
1431 ip6mr_rtm_dumproute, 0);
1432 return 0;
1433 #ifdef CONFIG_IPV6_PIMSM_V2
1434 add_proto_fail:
1435 unregister_netdevice_notifier(&ip6_mr_notifier);
1436 #endif
1437 reg_notif_fail:
1438 unregister_pernet_subsys(&ip6mr_net_ops);
1439 reg_pernet_fail:
1440 kmem_cache_destroy(mrt_cachep);
1441 return err;
1442 }
1443
1444 void ip6_mr_cleanup(void)
1445 {
1446 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1447 #ifdef CONFIG_IPV6_PIMSM_V2
1448 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1449 #endif
1450 unregister_netdevice_notifier(&ip6_mr_notifier);
1451 unregister_pernet_subsys(&ip6mr_net_ops);
1452 kmem_cache_destroy(mrt_cachep);
1453 }
1454
1455 static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
1456 struct mf6cctl *mfc, int mrtsock, int parent)
1457 {
1458 bool found = false;
1459 int line;
1460 struct mfc6_cache *uc, *c;
1461 unsigned char ttls[MAXMIFS];
1462 int i;
1463
1464 if (mfc->mf6cc_parent >= MAXMIFS)
1465 return -ENFILE;
1466
1467 memset(ttls, 255, MAXMIFS);
1468 for (i = 0; i < MAXMIFS; i++) {
1469 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1470 ttls[i] = 1;
1471
1472 }
1473
1474 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1475
1476 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
1477 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1478 ipv6_addr_equal(&c->mf6c_mcastgrp,
1479 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1480 (parent == -1 || parent == mfc->mf6cc_parent)) {
1481 found = true;
1482 break;
1483 }
1484 }
1485
1486 if (found) {
1487 write_lock_bh(&mrt_lock);
1488 c->mf6c_parent = mfc->mf6cc_parent;
1489 ip6mr_update_thresholds(mrt, c, ttls);
1490 if (!mrtsock)
1491 c->mfc_flags |= MFC_STATIC;
1492 write_unlock_bh(&mrt_lock);
1493 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1494 return 0;
1495 }
1496
1497 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1498 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1499 return -EINVAL;
1500
1501 c = ip6mr_cache_alloc();
1502 if (!c)
1503 return -ENOMEM;
1504
1505 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1506 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1507 c->mf6c_parent = mfc->mf6cc_parent;
1508 ip6mr_update_thresholds(mrt, c, ttls);
1509 if (!mrtsock)
1510 c->mfc_flags |= MFC_STATIC;
1511
1512 write_lock_bh(&mrt_lock);
1513 list_add(&c->list, &mrt->mfc6_cache_array[line]);
1514 write_unlock_bh(&mrt_lock);
1515
1516 /*
1517 * Check to see if we resolved a queued list. If so we
1518 * need to send on the frames and tidy up.
1519 */
1520 found = false;
1521 spin_lock_bh(&mfc_unres_lock);
1522 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
1523 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1524 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1525 list_del(&uc->list);
1526 atomic_dec(&mrt->cache_resolve_queue_len);
1527 found = true;
1528 break;
1529 }
1530 }
1531 if (list_empty(&mrt->mfc6_unres_queue))
1532 del_timer(&mrt->ipmr_expire_timer);
1533 spin_unlock_bh(&mfc_unres_lock);
1534
1535 if (found) {
1536 ip6mr_cache_resolve(net, mrt, uc, c);
1537 ip6mr_cache_free(uc);
1538 }
1539 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1540 return 0;
1541 }
1542
1543 /*
1544 * Close the multicast socket, and clear the vif tables etc
1545 */
1546
1547 static void mroute_clean_tables(struct mr6_table *mrt, bool all)
1548 {
1549 int i;
1550 LIST_HEAD(list);
1551 struct mfc6_cache *c, *next;
1552
1553 /*
1554 * Shut down all active vif entries
1555 */
1556 for (i = 0; i < mrt->maxvif; i++) {
1557 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1558 continue;
1559 mif6_delete(mrt, i, 0, &list);
1560 }
1561 unregister_netdevice_many(&list);
1562
1563 /*
1564 * Wipe the cache
1565 */
1566 for (i = 0; i < MFC6_LINES; i++) {
1567 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
1568 if (!all && (c->mfc_flags & MFC_STATIC))
1569 continue;
1570 write_lock_bh(&mrt_lock);
1571 list_del(&c->list);
1572 write_unlock_bh(&mrt_lock);
1573
1574 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1575 ip6mr_cache_free(c);
1576 }
1577 }
1578
1579 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1580 spin_lock_bh(&mfc_unres_lock);
1581 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
1582 list_del(&c->list);
1583 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1584 ip6mr_destroy_unres(mrt, c);
1585 }
1586 spin_unlock_bh(&mfc_unres_lock);
1587 }
1588 }
1589
1590 static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
1591 {
1592 int err = 0;
1593 struct net *net = sock_net(sk);
1594
1595 rtnl_lock();
1596 write_lock_bh(&mrt_lock);
1597 if (likely(mrt->mroute6_sk == NULL)) {
1598 mrt->mroute6_sk = sk;
1599 net->ipv6.devconf_all->mc_forwarding++;
1600 } else {
1601 err = -EADDRINUSE;
1602 }
1603 write_unlock_bh(&mrt_lock);
1604
1605 if (!err)
1606 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1607 NETCONFA_MC_FORWARDING,
1608 NETCONFA_IFINDEX_ALL,
1609 net->ipv6.devconf_all);
1610 rtnl_unlock();
1611
1612 return err;
1613 }
1614
1615 int ip6mr_sk_done(struct sock *sk)
1616 {
1617 int err = -EACCES;
1618 struct net *net = sock_net(sk);
1619 struct mr6_table *mrt;
1620
1621 if (sk->sk_type != SOCK_RAW ||
1622 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1623 return err;
1624
1625 rtnl_lock();
1626 ip6mr_for_each_table(mrt, net) {
1627 if (sk == mrt->mroute6_sk) {
1628 write_lock_bh(&mrt_lock);
1629 mrt->mroute6_sk = NULL;
1630 net->ipv6.devconf_all->mc_forwarding--;
1631 write_unlock_bh(&mrt_lock);
1632 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1633 NETCONFA_MC_FORWARDING,
1634 NETCONFA_IFINDEX_ALL,
1635 net->ipv6.devconf_all);
1636
1637 mroute_clean_tables(mrt, false);
1638 err = 0;
1639 break;
1640 }
1641 }
1642 rtnl_unlock();
1643
1644 return err;
1645 }
1646
1647 struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
1648 {
1649 struct mr6_table *mrt;
1650 struct flowi6 fl6 = {
1651 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1652 .flowi6_oif = skb->dev->ifindex,
1653 .flowi6_mark = skb->mark,
1654 };
1655
1656 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1657 return NULL;
1658
1659 return mrt->mroute6_sk;
1660 }
1661
1662 /*
1663 * Socket options and virtual interface manipulation. The whole
1664 * virtual interface system is a complete heap, but unfortunately
1665 * that's how BSD mrouted happens to think. Maybe one day with a proper
1666 * MOSPF/PIM router set up we can clean this up.
1667 */
1668
1669 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1670 {
1671 int ret, parent = 0;
1672 struct mif6ctl vif;
1673 struct mf6cctl mfc;
1674 mifi_t mifi;
1675 struct net *net = sock_net(sk);
1676 struct mr6_table *mrt;
1677
1678 if (sk->sk_type != SOCK_RAW ||
1679 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1680 return -EOPNOTSUPP;
1681
1682 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1683 if (!mrt)
1684 return -ENOENT;
1685
1686 if (optname != MRT6_INIT) {
1687 if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
1688 return -EACCES;
1689 }
1690
1691 switch (optname) {
1692 case MRT6_INIT:
1693 if (optlen < sizeof(int))
1694 return -EINVAL;
1695
1696 return ip6mr_sk_init(mrt, sk);
1697
1698 case MRT6_DONE:
1699 return ip6mr_sk_done(sk);
1700
1701 case MRT6_ADD_MIF:
1702 if (optlen < sizeof(vif))
1703 return -EINVAL;
1704 if (copy_from_user(&vif, optval, sizeof(vif)))
1705 return -EFAULT;
1706 if (vif.mif6c_mifi >= MAXMIFS)
1707 return -ENFILE;
1708 rtnl_lock();
1709 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
1710 rtnl_unlock();
1711 return ret;
1712
1713 case MRT6_DEL_MIF:
1714 if (optlen < sizeof(mifi_t))
1715 return -EINVAL;
1716 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1717 return -EFAULT;
1718 rtnl_lock();
1719 ret = mif6_delete(mrt, mifi, 0, NULL);
1720 rtnl_unlock();
1721 return ret;
1722
1723 /*
1724 * Manipulate the forwarding caches. These live
1725 * in a sort of kernel/user symbiosis.
1726 */
1727 case MRT6_ADD_MFC:
1728 case MRT6_DEL_MFC:
1729 parent = -1;
1730 /* fall through */
1731 case MRT6_ADD_MFC_PROXY:
1732 case MRT6_DEL_MFC_PROXY:
1733 if (optlen < sizeof(mfc))
1734 return -EINVAL;
1735 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1736 return -EFAULT;
1737 if (parent == 0)
1738 parent = mfc.mf6cc_parent;
1739 rtnl_lock();
1740 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1741 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1742 else
1743 ret = ip6mr_mfc_add(net, mrt, &mfc,
1744 sk == mrt->mroute6_sk, parent);
1745 rtnl_unlock();
1746 return ret;
1747
1748 /*
1749 * Control PIM assert (to activate pim will activate assert)
1750 */
1751 case MRT6_ASSERT:
1752 {
1753 int v;
1754
1755 if (optlen != sizeof(v))
1756 return -EINVAL;
1757 if (get_user(v, (int __user *)optval))
1758 return -EFAULT;
1759 mrt->mroute_do_assert = v;
1760 return 0;
1761 }
1762
1763 #ifdef CONFIG_IPV6_PIMSM_V2
1764 case MRT6_PIM:
1765 {
1766 int v;
1767
1768 if (optlen != sizeof(v))
1769 return -EINVAL;
1770 if (get_user(v, (int __user *)optval))
1771 return -EFAULT;
1772 v = !!v;
1773 rtnl_lock();
1774 ret = 0;
1775 if (v != mrt->mroute_do_pim) {
1776 mrt->mroute_do_pim = v;
1777 mrt->mroute_do_assert = v;
1778 }
1779 rtnl_unlock();
1780 return ret;
1781 }
1782
1783 #endif
1784 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1785 case MRT6_TABLE:
1786 {
1787 u32 v;
1788
1789 if (optlen != sizeof(u32))
1790 return -EINVAL;
1791 if (get_user(v, (u32 __user *)optval))
1792 return -EFAULT;
1793 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1794 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1795 return -EINVAL;
1796 if (sk == mrt->mroute6_sk)
1797 return -EBUSY;
1798
1799 rtnl_lock();
1800 ret = 0;
1801 if (!ip6mr_new_table(net, v))
1802 ret = -ENOMEM;
1803 else
1804 raw6_sk(sk)->ip6mr_table = v;
1805 rtnl_unlock();
1806 return ret;
1807 }
1808 #endif
1809 /*
1810 * Spurious command, or MRT6_VERSION which you cannot
1811 * set.
1812 */
1813 default:
1814 return -ENOPROTOOPT;
1815 }
1816 }
1817
1818 /*
1819 * Getsock opt support for the multicast routing system.
1820 */
1821
1822 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1823 int __user *optlen)
1824 {
1825 int olr;
1826 int val;
1827 struct net *net = sock_net(sk);
1828 struct mr6_table *mrt;
1829
1830 if (sk->sk_type != SOCK_RAW ||
1831 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1832 return -EOPNOTSUPP;
1833
1834 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1835 if (!mrt)
1836 return -ENOENT;
1837
1838 switch (optname) {
1839 case MRT6_VERSION:
1840 val = 0x0305;
1841 break;
1842 #ifdef CONFIG_IPV6_PIMSM_V2
1843 case MRT6_PIM:
1844 val = mrt->mroute_do_pim;
1845 break;
1846 #endif
1847 case MRT6_ASSERT:
1848 val = mrt->mroute_do_assert;
1849 break;
1850 default:
1851 return -ENOPROTOOPT;
1852 }
1853
1854 if (get_user(olr, optlen))
1855 return -EFAULT;
1856
1857 olr = min_t(int, olr, sizeof(int));
1858 if (olr < 0)
1859 return -EINVAL;
1860
1861 if (put_user(olr, optlen))
1862 return -EFAULT;
1863 if (copy_to_user(optval, &val, olr))
1864 return -EFAULT;
1865 return 0;
1866 }
1867
1868 /*
1869 * The IP multicast ioctl support routines.
1870 */
1871
1872 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1873 {
1874 struct sioc_sg_req6 sr;
1875 struct sioc_mif_req6 vr;
1876 struct mif_device *vif;
1877 struct mfc6_cache *c;
1878 struct net *net = sock_net(sk);
1879 struct mr6_table *mrt;
1880
1881 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1882 if (!mrt)
1883 return -ENOENT;
1884
1885 switch (cmd) {
1886 case SIOCGETMIFCNT_IN6:
1887 if (copy_from_user(&vr, arg, sizeof(vr)))
1888 return -EFAULT;
1889 if (vr.mifi >= mrt->maxvif)
1890 return -EINVAL;
1891 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1892 read_lock(&mrt_lock);
1893 vif = &mrt->vif6_table[vr.mifi];
1894 if (MIF_EXISTS(mrt, vr.mifi)) {
1895 vr.icount = vif->pkt_in;
1896 vr.ocount = vif->pkt_out;
1897 vr.ibytes = vif->bytes_in;
1898 vr.obytes = vif->bytes_out;
1899 read_unlock(&mrt_lock);
1900
1901 if (copy_to_user(arg, &vr, sizeof(vr)))
1902 return -EFAULT;
1903 return 0;
1904 }
1905 read_unlock(&mrt_lock);
1906 return -EADDRNOTAVAIL;
1907 case SIOCGETSGCNT_IN6:
1908 if (copy_from_user(&sr, arg, sizeof(sr)))
1909 return -EFAULT;
1910
1911 read_lock(&mrt_lock);
1912 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1913 if (c) {
1914 sr.pktcnt = c->mfc_un.res.pkt;
1915 sr.bytecnt = c->mfc_un.res.bytes;
1916 sr.wrong_if = c->mfc_un.res.wrong_if;
1917 read_unlock(&mrt_lock);
1918
1919 if (copy_to_user(arg, &sr, sizeof(sr)))
1920 return -EFAULT;
1921 return 0;
1922 }
1923 read_unlock(&mrt_lock);
1924 return -EADDRNOTAVAIL;
1925 default:
1926 return -ENOIOCTLCMD;
1927 }
1928 }
1929
1930 #ifdef CONFIG_COMPAT
1931 struct compat_sioc_sg_req6 {
1932 struct sockaddr_in6 src;
1933 struct sockaddr_in6 grp;
1934 compat_ulong_t pktcnt;
1935 compat_ulong_t bytecnt;
1936 compat_ulong_t wrong_if;
1937 };
1938
1939 struct compat_sioc_mif_req6 {
1940 mifi_t mifi;
1941 compat_ulong_t icount;
1942 compat_ulong_t ocount;
1943 compat_ulong_t ibytes;
1944 compat_ulong_t obytes;
1945 };
1946
1947 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1948 {
1949 struct compat_sioc_sg_req6 sr;
1950 struct compat_sioc_mif_req6 vr;
1951 struct mif_device *vif;
1952 struct mfc6_cache *c;
1953 struct net *net = sock_net(sk);
1954 struct mr6_table *mrt;
1955
1956 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1957 if (!mrt)
1958 return -ENOENT;
1959
1960 switch (cmd) {
1961 case SIOCGETMIFCNT_IN6:
1962 if (copy_from_user(&vr, arg, sizeof(vr)))
1963 return -EFAULT;
1964 if (vr.mifi >= mrt->maxvif)
1965 return -EINVAL;
1966 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1967 read_lock(&mrt_lock);
1968 vif = &mrt->vif6_table[vr.mifi];
1969 if (MIF_EXISTS(mrt, vr.mifi)) {
1970 vr.icount = vif->pkt_in;
1971 vr.ocount = vif->pkt_out;
1972 vr.ibytes = vif->bytes_in;
1973 vr.obytes = vif->bytes_out;
1974 read_unlock(&mrt_lock);
1975
1976 if (copy_to_user(arg, &vr, sizeof(vr)))
1977 return -EFAULT;
1978 return 0;
1979 }
1980 read_unlock(&mrt_lock);
1981 return -EADDRNOTAVAIL;
1982 case SIOCGETSGCNT_IN6:
1983 if (copy_from_user(&sr, arg, sizeof(sr)))
1984 return -EFAULT;
1985
1986 read_lock(&mrt_lock);
1987 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1988 if (c) {
1989 sr.pktcnt = c->mfc_un.res.pkt;
1990 sr.bytecnt = c->mfc_un.res.bytes;
1991 sr.wrong_if = c->mfc_un.res.wrong_if;
1992 read_unlock(&mrt_lock);
1993
1994 if (copy_to_user(arg, &sr, sizeof(sr)))
1995 return -EFAULT;
1996 return 0;
1997 }
1998 read_unlock(&mrt_lock);
1999 return -EADDRNOTAVAIL;
2000 default:
2001 return -ENOIOCTLCMD;
2002 }
2003 }
2004 #endif
2005
2006 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
2007 {
2008 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2009 IPSTATS_MIB_OUTFORWDATAGRAMS);
2010 __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
2011 IPSTATS_MIB_OUTOCTETS, skb->len);
2012 return dst_output(net, sk, skb);
2013 }
2014
2015 /*
2016 * Processing handlers for ip6mr_forward
2017 */
2018
2019 static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
2020 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
2021 {
2022 struct ipv6hdr *ipv6h;
2023 struct mif_device *vif = &mrt->vif6_table[vifi];
2024 struct net_device *dev;
2025 struct dst_entry *dst;
2026 struct flowi6 fl6;
2027
2028 if (!vif->dev)
2029 goto out_free;
2030
2031 #ifdef CONFIG_IPV6_PIMSM_V2
2032 if (vif->flags & MIFF_REGISTER) {
2033 vif->pkt_out++;
2034 vif->bytes_out += skb->len;
2035 vif->dev->stats.tx_bytes += skb->len;
2036 vif->dev->stats.tx_packets++;
2037 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2038 goto out_free;
2039 }
2040 #endif
2041
2042 ipv6h = ipv6_hdr(skb);
2043
2044 fl6 = (struct flowi6) {
2045 .flowi6_oif = vif->link,
2046 .daddr = ipv6h->daddr,
2047 };
2048
2049 dst = ip6_route_output(net, NULL, &fl6);
2050 if (dst->error) {
2051 dst_release(dst);
2052 goto out_free;
2053 }
2054
2055 skb_dst_drop(skb);
2056 skb_dst_set(skb, dst);
2057
2058 /*
2059 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2060 * not only before forwarding, but after forwarding on all output
2061 * interfaces. It is clear, if mrouter runs a multicasting
2062 * program, it should receive packets not depending to what interface
2063 * program is joined.
2064 * If we will not make it, the program will have to join on all
2065 * interfaces. On the other hand, multihoming host (or router, but
2066 * not mrouter) cannot join to more than one interface - it will
2067 * result in receiving multiple packets.
2068 */
2069 dev = vif->dev;
2070 skb->dev = dev;
2071 vif->pkt_out++;
2072 vif->bytes_out += skb->len;
2073
2074 /* We are about to write */
2075 /* XXX: extension headers? */
2076 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2077 goto out_free;
2078
2079 ipv6h = ipv6_hdr(skb);
2080 ipv6h->hop_limit--;
2081
2082 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2083
2084 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2085 net, NULL, skb, skb->dev, dev,
2086 ip6mr_forward2_finish);
2087
2088 out_free:
2089 kfree_skb(skb);
2090 return 0;
2091 }
2092
2093 static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
2094 {
2095 int ct;
2096
2097 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2098 if (mrt->vif6_table[ct].dev == dev)
2099 break;
2100 }
2101 return ct;
2102 }
2103
2104 static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2105 struct sk_buff *skb, struct mfc6_cache *cache)
2106 {
2107 int psend = -1;
2108 int vif, ct;
2109 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2110
2111 vif = cache->mf6c_parent;
2112 cache->mfc_un.res.pkt++;
2113 cache->mfc_un.res.bytes += skb->len;
2114 cache->mfc_un.res.lastuse = jiffies;
2115
2116 if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2117 struct mfc6_cache *cache_proxy;
2118
2119 /* For an (*,G) entry, we only check that the incoming
2120 * interface is part of the static tree.
2121 */
2122 cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2123 if (cache_proxy &&
2124 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2125 goto forward;
2126 }
2127
2128 /*
2129 * Wrong interface: drop packet and (maybe) send PIM assert.
2130 */
2131 if (mrt->vif6_table[vif].dev != skb->dev) {
2132 cache->mfc_un.res.wrong_if++;
2133
2134 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2135 /* pimsm uses asserts, when switching from RPT to SPT,
2136 so that we cannot check that packet arrived on an oif.
2137 It is bad, but otherwise we would need to move pretty
2138 large chunk of pimd to kernel. Ough... --ANK
2139 */
2140 (mrt->mroute_do_pim ||
2141 cache->mfc_un.res.ttls[true_vifi] < 255) &&
2142 time_after(jiffies,
2143 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2144 cache->mfc_un.res.last_assert = jiffies;
2145 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2146 }
2147 goto dont_forward;
2148 }
2149
2150 forward:
2151 mrt->vif6_table[vif].pkt_in++;
2152 mrt->vif6_table[vif].bytes_in += skb->len;
2153
2154 /*
2155 * Forward the frame
2156 */
2157 if (ipv6_addr_any(&cache->mf6c_origin) &&
2158 ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2159 if (true_vifi >= 0 &&
2160 true_vifi != cache->mf6c_parent &&
2161 ipv6_hdr(skb)->hop_limit >
2162 cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2163 /* It's an (*,*) entry and the packet is not coming from
2164 * the upstream: forward the packet to the upstream
2165 * only.
2166 */
2167 psend = cache->mf6c_parent;
2168 goto last_forward;
2169 }
2170 goto dont_forward;
2171 }
2172 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
2173 /* For (*,G) entry, don't forward to the incoming interface */
2174 if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2175 ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
2176 if (psend != -1) {
2177 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2178 if (skb2)
2179 ip6mr_forward2(net, mrt, skb2, cache, psend);
2180 }
2181 psend = ct;
2182 }
2183 }
2184 last_forward:
2185 if (psend != -1) {
2186 ip6mr_forward2(net, mrt, skb, cache, psend);
2187 return;
2188 }
2189
2190 dont_forward:
2191 kfree_skb(skb);
2192 }
2193
2194
2195 /*
2196 * Multicast packets for forwarding arrive here
2197 */
2198
2199 int ip6_mr_input(struct sk_buff *skb)
2200 {
2201 struct mfc6_cache *cache;
2202 struct net *net = dev_net(skb->dev);
2203 struct mr6_table *mrt;
2204 struct flowi6 fl6 = {
2205 .flowi6_iif = skb->dev->ifindex,
2206 .flowi6_mark = skb->mark,
2207 };
2208 int err;
2209
2210 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2211 if (err < 0) {
2212 kfree_skb(skb);
2213 return err;
2214 }
2215
2216 read_lock(&mrt_lock);
2217 cache = ip6mr_cache_find(mrt,
2218 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2219 if (!cache) {
2220 int vif = ip6mr_find_vif(mrt, skb->dev);
2221
2222 if (vif >= 0)
2223 cache = ip6mr_cache_find_any(mrt,
2224 &ipv6_hdr(skb)->daddr,
2225 vif);
2226 }
2227
2228 /*
2229 * No usable cache entry
2230 */
2231 if (!cache) {
2232 int vif;
2233
2234 vif = ip6mr_find_vif(mrt, skb->dev);
2235 if (vif >= 0) {
2236 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2237 read_unlock(&mrt_lock);
2238
2239 return err;
2240 }
2241 read_unlock(&mrt_lock);
2242 kfree_skb(skb);
2243 return -ENODEV;
2244 }
2245
2246 ip6_mr_forward(net, mrt, skb, cache);
2247
2248 read_unlock(&mrt_lock);
2249
2250 return 0;
2251 }
2252
2253
2254 static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2255 struct mfc6_cache *c, struct rtmsg *rtm)
2256 {
2257 struct rta_mfc_stats mfcs;
2258 struct nlattr *mp_attr;
2259 struct rtnexthop *nhp;
2260 unsigned long lastuse;
2261 int ct;
2262
2263 /* If cache is unresolved, don't try to parse IIF and OIF */
2264 if (c->mf6c_parent >= MAXMIFS) {
2265 rtm->rtm_flags |= RTNH_F_UNRESOLVED;
2266 return -ENOENT;
2267 }
2268
2269 if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2270 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2271 return -EMSGSIZE;
2272 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
2273 if (!mp_attr)
2274 return -EMSGSIZE;
2275
2276 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
2277 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
2278 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
2279 if (!nhp) {
2280 nla_nest_cancel(skb, mp_attr);
2281 return -EMSGSIZE;
2282 }
2283
2284 nhp->rtnh_flags = 0;
2285 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
2286 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
2287 nhp->rtnh_len = sizeof(*nhp);
2288 }
2289 }
2290
2291 nla_nest_end(skb, mp_attr);
2292
2293 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2294 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2295
2296 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2297 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2298 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
2299 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
2300 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
2301 RTA_PAD))
2302 return -EMSGSIZE;
2303
2304 rtm->rtm_type = RTN_MULTICAST;
2305 return 1;
2306 }
2307
2308 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2309 u32 portid)
2310 {
2311 int err;
2312 struct mr6_table *mrt;
2313 struct mfc6_cache *cache;
2314 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2315
2316 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2317 if (!mrt)
2318 return -ENOENT;
2319
2320 read_lock(&mrt_lock);
2321 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2322 if (!cache && skb->dev) {
2323 int vif = ip6mr_find_vif(mrt, skb->dev);
2324
2325 if (vif >= 0)
2326 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2327 vif);
2328 }
2329
2330 if (!cache) {
2331 struct sk_buff *skb2;
2332 struct ipv6hdr *iph;
2333 struct net_device *dev;
2334 int vif;
2335
2336 dev = skb->dev;
2337 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2338 read_unlock(&mrt_lock);
2339 return -ENODEV;
2340 }
2341
2342 /* really correct? */
2343 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2344 if (!skb2) {
2345 read_unlock(&mrt_lock);
2346 return -ENOMEM;
2347 }
2348
2349 NETLINK_CB(skb2).portid = portid;
2350 skb_reset_transport_header(skb2);
2351
2352 skb_put(skb2, sizeof(struct ipv6hdr));
2353 skb_reset_network_header(skb2);
2354
2355 iph = ipv6_hdr(skb2);
2356 iph->version = 0;
2357 iph->priority = 0;
2358 iph->flow_lbl[0] = 0;
2359 iph->flow_lbl[1] = 0;
2360 iph->flow_lbl[2] = 0;
2361 iph->payload_len = 0;
2362 iph->nexthdr = IPPROTO_NONE;
2363 iph->hop_limit = 0;
2364 iph->saddr = rt->rt6i_src.addr;
2365 iph->daddr = rt->rt6i_dst.addr;
2366
2367 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2368 read_unlock(&mrt_lock);
2369
2370 return err;
2371 }
2372
2373 if (rtm->rtm_flags & RTM_F_NOTIFY)
2374 cache->mfc_flags |= MFC_NOTIFY;
2375
2376 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
2377 read_unlock(&mrt_lock);
2378 return err;
2379 }
2380
2381 static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2382 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2383 int flags)
2384 {
2385 struct nlmsghdr *nlh;
2386 struct rtmsg *rtm;
2387 int err;
2388
2389 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2390 if (!nlh)
2391 return -EMSGSIZE;
2392
2393 rtm = nlmsg_data(nlh);
2394 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2395 rtm->rtm_dst_len = 128;
2396 rtm->rtm_src_len = 128;
2397 rtm->rtm_tos = 0;
2398 rtm->rtm_table = mrt->id;
2399 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2400 goto nla_put_failure;
2401 rtm->rtm_type = RTN_MULTICAST;
2402 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2403 if (c->mfc_flags & MFC_STATIC)
2404 rtm->rtm_protocol = RTPROT_STATIC;
2405 else
2406 rtm->rtm_protocol = RTPROT_MROUTED;
2407 rtm->rtm_flags = 0;
2408
2409 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2410 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2411 goto nla_put_failure;
2412 err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2413 /* do not break the dump if cache is unresolved */
2414 if (err < 0 && err != -ENOENT)
2415 goto nla_put_failure;
2416
2417 nlmsg_end(skb, nlh);
2418 return 0;
2419
2420 nla_put_failure:
2421 nlmsg_cancel(skb, nlh);
2422 return -EMSGSIZE;
2423 }
2424
2425 static int mr6_msgsize(bool unresolved, int maxvif)
2426 {
2427 size_t len =
2428 NLMSG_ALIGN(sizeof(struct rtmsg))
2429 + nla_total_size(4) /* RTA_TABLE */
2430 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2431 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2432 ;
2433
2434 if (!unresolved)
2435 len = len
2436 + nla_total_size(4) /* RTA_IIF */
2437 + nla_total_size(0) /* RTA_MULTIPATH */
2438 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2439 /* RTA_MFC_STATS */
2440 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2441 ;
2442
2443 return len;
2444 }
2445
2446 static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2447 int cmd)
2448 {
2449 struct net *net = read_pnet(&mrt->net);
2450 struct sk_buff *skb;
2451 int err = -ENOBUFS;
2452
2453 skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2454 GFP_ATOMIC);
2455 if (!skb)
2456 goto errout;
2457
2458 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2459 if (err < 0)
2460 goto errout;
2461
2462 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2463 return;
2464
2465 errout:
2466 kfree_skb(skb);
2467 if (err < 0)
2468 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2469 }
2470
2471 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2472 {
2473 size_t len =
2474 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2475 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2476 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2477 /* IP6MRA_CREPORT_SRC_ADDR */
2478 + nla_total_size(sizeof(struct in6_addr))
2479 /* IP6MRA_CREPORT_DST_ADDR */
2480 + nla_total_size(sizeof(struct in6_addr))
2481 /* IP6MRA_CREPORT_PKT */
2482 + nla_total_size(payloadlen)
2483 ;
2484
2485 return len;
2486 }
2487
2488 static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt)
2489 {
2490 struct net *net = read_pnet(&mrt->net);
2491 struct nlmsghdr *nlh;
2492 struct rtgenmsg *rtgenm;
2493 struct mrt6msg *msg;
2494 struct sk_buff *skb;
2495 struct nlattr *nla;
2496 int payloadlen;
2497
2498 payloadlen = pkt->len - sizeof(struct mrt6msg);
2499 msg = (struct mrt6msg *)skb_transport_header(pkt);
2500
2501 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2502 if (!skb)
2503 goto errout;
2504
2505 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2506 sizeof(struct rtgenmsg), 0);
2507 if (!nlh)
2508 goto errout;
2509 rtgenm = nlmsg_data(nlh);
2510 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2511 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2512 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2513 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2514 &msg->im6_src) ||
2515 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2516 &msg->im6_dst))
2517 goto nla_put_failure;
2518
2519 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2520 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2521 nla_data(nla), payloadlen))
2522 goto nla_put_failure;
2523
2524 nlmsg_end(skb, nlh);
2525
2526 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2527 return;
2528
2529 nla_put_failure:
2530 nlmsg_cancel(skb, nlh);
2531 errout:
2532 kfree_skb(skb);
2533 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2534 }
2535
2536 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2537 {
2538 struct net *net = sock_net(skb->sk);
2539 struct mr6_table *mrt;
2540 struct mfc6_cache *mfc;
2541 unsigned int t = 0, s_t;
2542 unsigned int h = 0, s_h;
2543 unsigned int e = 0, s_e;
2544
2545 s_t = cb->args[0];
2546 s_h = cb->args[1];
2547 s_e = cb->args[2];
2548
2549 read_lock(&mrt_lock);
2550 ip6mr_for_each_table(mrt, net) {
2551 if (t < s_t)
2552 goto next_table;
2553 if (t > s_t)
2554 s_h = 0;
2555 for (h = s_h; h < MFC6_LINES; h++) {
2556 list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2557 if (e < s_e)
2558 goto next_entry;
2559 if (ip6mr_fill_mroute(mrt, skb,
2560 NETLINK_CB(cb->skb).portid,
2561 cb->nlh->nlmsg_seq,
2562 mfc, RTM_NEWROUTE,
2563 NLM_F_MULTI) < 0)
2564 goto done;
2565 next_entry:
2566 e++;
2567 }
2568 e = s_e = 0;
2569 }
2570 spin_lock_bh(&mfc_unres_lock);
2571 list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2572 if (e < s_e)
2573 goto next_entry2;
2574 if (ip6mr_fill_mroute(mrt, skb,
2575 NETLINK_CB(cb->skb).portid,
2576 cb->nlh->nlmsg_seq,
2577 mfc, RTM_NEWROUTE,
2578 NLM_F_MULTI) < 0) {
2579 spin_unlock_bh(&mfc_unres_lock);
2580 goto done;
2581 }
2582 next_entry2:
2583 e++;
2584 }
2585 spin_unlock_bh(&mfc_unres_lock);
2586 e = s_e = 0;
2587 s_h = 0;
2588 next_table:
2589 t++;
2590 }
2591 done:
2592 read_unlock(&mrt_lock);
2593
2594 cb->args[2] = e;
2595 cb->args[1] = h;
2596 cb->args[0] = t;
2597
2598 return skb->len;
2599 }