]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - net/ipv6/ip6mr.c
ip6mr: Fix potential Spectre v1 vulnerability
[mirror_ubuntu-bionic-kernel.git] / net / ipv6 / ip6mr.c
CommitLineData
7bc570c8
YH
1/*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
7c0f6ba6 19#include <linux/uaccess.h>
7bc570c8
YH
20#include <linux/types.h>
21#include <linux/sched.h>
22#include <linux/errno.h>
23#include <linux/timer.h>
24#include <linux/mm.h>
25#include <linux/kernel.h>
26#include <linux/fcntl.h>
27#include <linux/stat.h>
28#include <linux/socket.h>
7bc570c8
YH
29#include <linux/inet.h>
30#include <linux/netdevice.h>
31#include <linux/inetdevice.h>
7bc570c8
YH
32#include <linux/proc_fs.h>
33#include <linux/seq_file.h>
7bc570c8 34#include <linux/init.h>
5a0e3ad6 35#include <linux/slab.h>
e2d57766 36#include <linux/compat.h>
7bc570c8
YH
37#include <net/protocol.h>
38#include <linux/skbuff.h>
39#include <net/sock.h>
7bc570c8 40#include <net/raw.h>
7bc570c8
YH
41#include <linux/notifier.h>
42#include <linux/if_arp.h>
7bc570c8
YH
43#include <net/checksum.h>
44#include <net/netlink.h>
d1db275d 45#include <net/fib_rules.h>
7bc570c8
YH
46
47#include <net/ipv6.h>
48#include <net/ip6_route.h>
49#include <linux/mroute6.h>
14fb64e1 50#include <linux/pim.h>
7bc570c8
YH
51#include <net/addrconf.h>
52#include <linux/netfilter_ipv6.h>
bc3b2d7f 53#include <linux/export.h>
5d6e430d 54#include <net/ip6_checksum.h>
d67b8c61 55#include <linux/netconf.h>
215c81c4 56#include <linux/nospec.h>
7bc570c8 57
6bd52143 58struct mr6_table {
d1db275d 59 struct list_head list;
0c5c9fb5 60 possible_net_t net;
d1db275d 61 u32 id;
6bd52143
PM
62 struct sock *mroute6_sk;
63 struct timer_list ipmr_expire_timer;
64 struct list_head mfc6_unres_queue;
65 struct list_head mfc6_cache_array[MFC6_LINES];
66 struct mif_device vif6_table[MAXMIFS];
67 int maxvif;
68 atomic_t cache_resolve_queue_len;
53d6841d
JP
69 bool mroute_do_assert;
70 bool mroute_do_pim;
6bd52143
PM
71#ifdef CONFIG_IPV6_PIMSM_V2
72 int mroute_reg_vif_num;
73#endif
74};
75
d1db275d
PM
76struct ip6mr_rule {
77 struct fib_rule common;
78};
79
80struct ip6mr_result {
81 struct mr6_table *mrt;
82};
83
7bc570c8
YH
84/* Big lock, protecting vif table, mrt cache and mroute socket state.
85 Note that the changes are semaphored via rtnl_lock.
86 */
87
88static DEFINE_RWLOCK(mrt_lock);
89
90/*
91 * Multicast router control variables
92 */
93
6bd52143 94#define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
7bc570c8 95
7bc570c8
YH
96/* Special spinlock for queue of unresolved entries */
97static DEFINE_SPINLOCK(mfc_unres_lock);
98
99/* We return to original Alan's scheme. Hash table of resolved
100 entries is changed only in process context and protected
101 with weak lock mrt_lock. Queue of unresolved entries is protected
102 with strong spinlock mfc_unres_lock.
103
104 In this case data path is free of exclusive locks at all.
105 */
106
107static struct kmem_cache *mrt_cachep __read_mostly;
108
d1db275d
PM
109static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
110static void ip6mr_free_table(struct mr6_table *mrt);
111
2b52c3ad
RR
112static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
113 struct sk_buff *skb, struct mfc6_cache *cache);
6bd52143 114static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
8229efda 115 mifi_t mifi, int assert);
5b285cac
PM
116static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
117 struct mfc6_cache *c, struct rtmsg *rtm);
812e44dd
ND
118static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
119 int cmd);
dd12d15c 120static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt);
5b285cac
PM
121static int ip6mr_rtm_dumproute(struct sk_buff *skb,
122 struct netlink_callback *cb);
4c698046 123static void mroute_clean_tables(struct mr6_table *mrt, bool all);
e99e88a9 124static void ipmr_expire_process(struct timer_list *t);
d1db275d
PM
125
126#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
8ffb335e 127#define ip6mr_for_each_table(mrt, net) \
d1db275d
PM
128 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
129
130static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
131{
132 struct mr6_table *mrt;
133
134 ip6mr_for_each_table(mrt, net) {
135 if (mrt->id == id)
136 return mrt;
137 }
138 return NULL;
139}
140
4c9483b2 141static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
d1db275d
PM
142 struct mr6_table **mrt)
143{
d1db275d 144 int err;
95f4a45d
HFS
145 struct ip6mr_result res;
146 struct fib_lookup_arg arg = {
147 .result = &res,
148 .flags = FIB_LOOKUP_NOREF,
149 };
d1db275d 150
4c9483b2
DM
151 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
152 flowi6_to_flowi(flp6), 0, &arg);
d1db275d
PM
153 if (err < 0)
154 return err;
155 *mrt = res.mrt;
156 return 0;
157}
158
159static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
160 int flags, struct fib_lookup_arg *arg)
161{
162 struct ip6mr_result *res = arg->result;
163 struct mr6_table *mrt;
164
165 switch (rule->action) {
166 case FR_ACT_TO_TBL:
167 break;
168 case FR_ACT_UNREACHABLE:
169 return -ENETUNREACH;
170 case FR_ACT_PROHIBIT:
171 return -EACCES;
172 case FR_ACT_BLACKHOLE:
173 default:
174 return -EINVAL;
175 }
176
177 mrt = ip6mr_get_table(rule->fr_net, rule->table);
63159f29 178 if (!mrt)
d1db275d
PM
179 return -EAGAIN;
180 res->mrt = mrt;
181 return 0;
182}
183
184static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
185{
186 return 1;
187}
188
189static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
190 FRA_GENERIC_POLICY,
191};
192
193static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
194 struct fib_rule_hdr *frh, struct nlattr **tb)
195{
196 return 0;
197}
198
199static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
200 struct nlattr **tb)
201{
202 return 1;
203}
204
205static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
206 struct fib_rule_hdr *frh)
207{
208 frh->dst_len = 0;
209 frh->src_len = 0;
210 frh->tos = 0;
211 return 0;
212}
213
04a6f82c 214static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
d1db275d
PM
215 .family = RTNL_FAMILY_IP6MR,
216 .rule_size = sizeof(struct ip6mr_rule),
217 .addr_size = sizeof(struct in6_addr),
218 .action = ip6mr_rule_action,
219 .match = ip6mr_rule_match,
220 .configure = ip6mr_rule_configure,
221 .compare = ip6mr_rule_compare,
d1db275d
PM
222 .fill = ip6mr_rule_fill,
223 .nlgroup = RTNLGRP_IPV6_RULE,
224 .policy = ip6mr_rule_policy,
225 .owner = THIS_MODULE,
226};
227
228static int __net_init ip6mr_rules_init(struct net *net)
229{
230 struct fib_rules_ops *ops;
231 struct mr6_table *mrt;
232 int err;
233
234 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
235 if (IS_ERR(ops))
236 return PTR_ERR(ops);
237
238 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
239
240 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
63159f29 241 if (!mrt) {
d1db275d
PM
242 err = -ENOMEM;
243 goto err1;
244 }
245
246 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
247 if (err < 0)
248 goto err2;
249
250 net->ipv6.mr6_rules_ops = ops;
251 return 0;
252
253err2:
f243e5a7 254 ip6mr_free_table(mrt);
d1db275d
PM
255err1:
256 fib_rules_unregister(ops);
257 return err;
258}
259
260static void __net_exit ip6mr_rules_exit(struct net *net)
261{
262 struct mr6_table *mrt, *next;
263
905a6f96 264 rtnl_lock();
035320d5
ED
265 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
266 list_del(&mrt->list);
d1db275d 267 ip6mr_free_table(mrt);
035320d5 268 }
d1db275d 269 fib_rules_unregister(net->ipv6.mr6_rules_ops);
419df12f 270 rtnl_unlock();
d1db275d
PM
271}
272#else
273#define ip6mr_for_each_table(mrt, net) \
274 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
275
276static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
277{
278 return net->ipv6.mrt6;
279}
280
4c9483b2 281static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
d1db275d
PM
282 struct mr6_table **mrt)
283{
284 *mrt = net->ipv6.mrt6;
285 return 0;
286}
287
288static int __net_init ip6mr_rules_init(struct net *net)
289{
290 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
291 return net->ipv6.mrt6 ? 0 : -ENOMEM;
292}
293
294static void __net_exit ip6mr_rules_exit(struct net *net)
295{
905a6f96 296 rtnl_lock();
d1db275d 297 ip6mr_free_table(net->ipv6.mrt6);
905a6f96
HFS
298 net->ipv6.mrt6 = NULL;
299 rtnl_unlock();
d1db275d
PM
300}
301#endif
302
303static struct mr6_table *ip6mr_new_table(struct net *net, u32 id)
304{
305 struct mr6_table *mrt;
306 unsigned int i;
307
308 mrt = ip6mr_get_table(net, id);
53b24b8f 309 if (mrt)
d1db275d
PM
310 return mrt;
311
312 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
63159f29 313 if (!mrt)
d1db275d
PM
314 return NULL;
315 mrt->id = id;
316 write_pnet(&mrt->net, net);
317
318 /* Forwarding cache */
319 for (i = 0; i < MFC6_LINES; i++)
320 INIT_LIST_HEAD(&mrt->mfc6_cache_array[i]);
321
322 INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
323
e99e88a9 324 timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
d1db275d
PM
325
326#ifdef CONFIG_IPV6_PIMSM_V2
327 mrt->mroute_reg_vif_num = -1;
328#endif
329#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
330 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
331#endif
332 return mrt;
333}
7bc570c8 334
d1db275d
PM
335static void ip6mr_free_table(struct mr6_table *mrt)
336{
7ba0c47c 337 del_timer_sync(&mrt->ipmr_expire_timer);
4c698046 338 mroute_clean_tables(mrt, true);
d1db275d
PM
339 kfree(mrt);
340}
7bc570c8
YH
341
342#ifdef CONFIG_PROC_FS
343
344struct ipmr_mfc_iter {
8b90fc7e 345 struct seq_net_private p;
d1db275d 346 struct mr6_table *mrt;
f30a7784 347 struct list_head *cache;
7bc570c8
YH
348 int ct;
349};
350
351
8b90fc7e
BT
352static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
353 struct ipmr_mfc_iter *it, loff_t pos)
7bc570c8 354{
d1db275d 355 struct mr6_table *mrt = it->mrt;
7bc570c8
YH
356 struct mfc6_cache *mfc;
357
7bc570c8 358 read_lock(&mrt_lock);
f30a7784 359 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++) {
6bd52143 360 it->cache = &mrt->mfc6_cache_array[it->ct];
f30a7784 361 list_for_each_entry(mfc, it->cache, list)
7bc570c8
YH
362 if (pos-- == 0)
363 return mfc;
f30a7784 364 }
7bc570c8
YH
365 read_unlock(&mrt_lock);
366
7bc570c8 367 spin_lock_bh(&mfc_unres_lock);
6bd52143 368 it->cache = &mrt->mfc6_unres_queue;
f30a7784 369 list_for_each_entry(mfc, it->cache, list)
c476efbc 370 if (pos-- == 0)
7bc570c8
YH
371 return mfc;
372 spin_unlock_bh(&mfc_unres_lock);
373
374 it->cache = NULL;
375 return NULL;
376}
377
7bc570c8
YH
378/*
379 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
380 */
381
382struct ipmr_vif_iter {
8b90fc7e 383 struct seq_net_private p;
d1db275d 384 struct mr6_table *mrt;
7bc570c8
YH
385 int ct;
386};
387
8b90fc7e
BT
388static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
389 struct ipmr_vif_iter *iter,
7bc570c8
YH
390 loff_t pos)
391{
d1db275d 392 struct mr6_table *mrt = iter->mrt;
6bd52143
PM
393
394 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
395 if (!MIF_EXISTS(mrt, iter->ct))
7bc570c8
YH
396 continue;
397 if (pos-- == 0)
6bd52143 398 return &mrt->vif6_table[iter->ct];
7bc570c8
YH
399 }
400 return NULL;
401}
402
403static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
404 __acquires(mrt_lock)
405{
d1db275d 406 struct ipmr_vif_iter *iter = seq->private;
8b90fc7e 407 struct net *net = seq_file_net(seq);
d1db275d
PM
408 struct mr6_table *mrt;
409
410 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
63159f29 411 if (!mrt)
d1db275d
PM
412 return ERR_PTR(-ENOENT);
413
414 iter->mrt = mrt;
8b90fc7e 415
7bc570c8 416 read_lock(&mrt_lock);
8b90fc7e
BT
417 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
418 : SEQ_START_TOKEN;
7bc570c8
YH
419}
420
421static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
422{
423 struct ipmr_vif_iter *iter = seq->private;
8b90fc7e 424 struct net *net = seq_file_net(seq);
d1db275d 425 struct mr6_table *mrt = iter->mrt;
7bc570c8
YH
426
427 ++*pos;
428 if (v == SEQ_START_TOKEN)
8b90fc7e 429 return ip6mr_vif_seq_idx(net, iter, 0);
7bc570c8 430
6bd52143
PM
431 while (++iter->ct < mrt->maxvif) {
432 if (!MIF_EXISTS(mrt, iter->ct))
7bc570c8 433 continue;
6bd52143 434 return &mrt->vif6_table[iter->ct];
7bc570c8
YH
435 }
436 return NULL;
437}
438
439static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
440 __releases(mrt_lock)
441{
442 read_unlock(&mrt_lock);
443}
444
445static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
446{
d1db275d
PM
447 struct ipmr_vif_iter *iter = seq->private;
448 struct mr6_table *mrt = iter->mrt;
8b90fc7e 449
7bc570c8
YH
450 if (v == SEQ_START_TOKEN) {
451 seq_puts(seq,
452 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
453 } else {
454 const struct mif_device *vif = v;
455 const char *name = vif->dev ? vif->dev->name : "none";
456
457 seq_printf(seq,
d430a227 458 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
6bd52143 459 vif - mrt->vif6_table,
7bc570c8
YH
460 name, vif->bytes_in, vif->pkt_in,
461 vif->bytes_out, vif->pkt_out,
462 vif->flags);
463 }
464 return 0;
465}
466
98147d52 467static const struct seq_operations ip6mr_vif_seq_ops = {
7bc570c8
YH
468 .start = ip6mr_vif_seq_start,
469 .next = ip6mr_vif_seq_next,
470 .stop = ip6mr_vif_seq_stop,
471 .show = ip6mr_vif_seq_show,
472};
473
474static int ip6mr_vif_open(struct inode *inode, struct file *file)
475{
8b90fc7e
BT
476 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
477 sizeof(struct ipmr_vif_iter));
7bc570c8
YH
478}
479
5ca1b998 480static const struct file_operations ip6mr_vif_fops = {
7bc570c8
YH
481 .owner = THIS_MODULE,
482 .open = ip6mr_vif_open,
483 .read = seq_read,
484 .llseek = seq_lseek,
8b90fc7e 485 .release = seq_release_net,
7bc570c8
YH
486};
487
488static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
489{
d1db275d 490 struct ipmr_mfc_iter *it = seq->private;
8b90fc7e 491 struct net *net = seq_file_net(seq);
d1db275d 492 struct mr6_table *mrt;
8b90fc7e 493
d1db275d 494 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
63159f29 495 if (!mrt)
d1db275d
PM
496 return ERR_PTR(-ENOENT);
497
498 it->mrt = mrt;
7c71a060 499 it->cache = NULL;
8b90fc7e
BT
500 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
501 : SEQ_START_TOKEN;
7bc570c8
YH
502}
503
504static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
505{
506 struct mfc6_cache *mfc = v;
507 struct ipmr_mfc_iter *it = seq->private;
8b90fc7e 508 struct net *net = seq_file_net(seq);
d1db275d 509 struct mr6_table *mrt = it->mrt;
7bc570c8
YH
510
511 ++*pos;
512
513 if (v == SEQ_START_TOKEN)
8b90fc7e 514 return ipmr_mfc_seq_idx(net, seq->private, 0);
7bc570c8 515
f30a7784
PM
516 if (mfc->list.next != it->cache)
517 return list_entry(mfc->list.next, struct mfc6_cache, list);
7bc570c8 518
6bd52143 519 if (it->cache == &mrt->mfc6_unres_queue)
7bc570c8
YH
520 goto end_of_list;
521
6bd52143 522 BUG_ON(it->cache != &mrt->mfc6_cache_array[it->ct]);
7bc570c8 523
4a6258a0 524 while (++it->ct < MFC6_LINES) {
6bd52143 525 it->cache = &mrt->mfc6_cache_array[it->ct];
f30a7784
PM
526 if (list_empty(it->cache))
527 continue;
528 return list_first_entry(it->cache, struct mfc6_cache, list);
7bc570c8
YH
529 }
530
531 /* exhausted cache_array, show unresolved */
532 read_unlock(&mrt_lock);
6bd52143 533 it->cache = &mrt->mfc6_unres_queue;
7bc570c8
YH
534 it->ct = 0;
535
536 spin_lock_bh(&mfc_unres_lock);
f30a7784
PM
537 if (!list_empty(it->cache))
538 return list_first_entry(it->cache, struct mfc6_cache, list);
7bc570c8
YH
539
540 end_of_list:
541 spin_unlock_bh(&mfc_unres_lock);
542 it->cache = NULL;
543
544 return NULL;
545}
546
547static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
548{
549 struct ipmr_mfc_iter *it = seq->private;
d1db275d 550 struct mr6_table *mrt = it->mrt;
7bc570c8 551
6bd52143 552 if (it->cache == &mrt->mfc6_unres_queue)
7bc570c8 553 spin_unlock_bh(&mfc_unres_lock);
25b4a44c 554 else if (it->cache == &mrt->mfc6_cache_array[it->ct])
7bc570c8
YH
555 read_unlock(&mrt_lock);
556}
557
558static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
559{
560 int n;
561
562 if (v == SEQ_START_TOKEN) {
563 seq_puts(seq,
564 "Group "
565 "Origin "
566 "Iif Pkts Bytes Wrong Oifs\n");
567 } else {
568 const struct mfc6_cache *mfc = v;
569 const struct ipmr_mfc_iter *it = seq->private;
d1db275d 570 struct mr6_table *mrt = it->mrt;
7bc570c8 571
999890b2 572 seq_printf(seq, "%pI6 %pI6 %-3hd",
0c6ce78a 573 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
1ea472e2 574 mfc->mf6c_parent);
7bc570c8 575
6bd52143 576 if (it->cache != &mrt->mfc6_unres_queue) {
1ea472e2
BT
577 seq_printf(seq, " %8lu %8lu %8lu",
578 mfc->mfc_un.res.pkt,
579 mfc->mfc_un.res.bytes,
580 mfc->mfc_un.res.wrong_if);
7bc570c8
YH
581 for (n = mfc->mfc_un.res.minvif;
582 n < mfc->mfc_un.res.maxvif; n++) {
6bd52143 583 if (MIF_EXISTS(mrt, n) &&
7bc570c8
YH
584 mfc->mfc_un.res.ttls[n] < 255)
585 seq_printf(seq,
586 " %2d:%-3d",
587 n, mfc->mfc_un.res.ttls[n]);
588 }
1ea472e2
BT
589 } else {
590 /* unresolved mfc_caches don't contain
591 * pkt, bytes and wrong_if values
592 */
593 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
7bc570c8
YH
594 }
595 seq_putc(seq, '\n');
596 }
597 return 0;
598}
599
88e9d34c 600static const struct seq_operations ipmr_mfc_seq_ops = {
7bc570c8
YH
601 .start = ipmr_mfc_seq_start,
602 .next = ipmr_mfc_seq_next,
603 .stop = ipmr_mfc_seq_stop,
604 .show = ipmr_mfc_seq_show,
605};
606
607static int ipmr_mfc_open(struct inode *inode, struct file *file)
608{
8b90fc7e
BT
609 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
610 sizeof(struct ipmr_mfc_iter));
7bc570c8
YH
611}
612
5ca1b998 613static const struct file_operations ip6mr_mfc_fops = {
7bc570c8
YH
614 .owner = THIS_MODULE,
615 .open = ipmr_mfc_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
8b90fc7e 618 .release = seq_release_net,
7bc570c8
YH
619};
620#endif
621
14fb64e1 622#ifdef CONFIG_IPV6_PIMSM_V2
14fb64e1
YH
623
624static int pim6_rcv(struct sk_buff *skb)
625{
626 struct pimreghdr *pim;
627 struct ipv6hdr *encap;
628 struct net_device *reg_dev = NULL;
8229efda 629 struct net *net = dev_net(skb->dev);
d1db275d 630 struct mr6_table *mrt;
4c9483b2
DM
631 struct flowi6 fl6 = {
632 .flowi6_iif = skb->dev->ifindex,
633 .flowi6_mark = skb->mark,
d1db275d
PM
634 };
635 int reg_vif_num;
14fb64e1
YH
636
637 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
638 goto drop;
639
640 pim = (struct pimreghdr *)skb_transport_header(skb);
56245cae 641 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
14fb64e1 642 (pim->flags & PIM_NULL_REGISTER) ||
1d6e55f1
TG
643 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
644 sizeof(*pim), IPPROTO_PIM,
645 csum_partial((void *)pim, sizeof(*pim), 0)) &&
ec6b486f 646 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
14fb64e1
YH
647 goto drop;
648
649 /* check if the inner packet is destined to mcast group */
650 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
651 sizeof(*pim));
652
653 if (!ipv6_addr_is_multicast(&encap->daddr) ||
654 encap->payload_len == 0 ||
655 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
656 goto drop;
657
4c9483b2 658 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
d1db275d
PM
659 goto drop;
660 reg_vif_num = mrt->mroute_reg_vif_num;
661
14fb64e1
YH
662 read_lock(&mrt_lock);
663 if (reg_vif_num >= 0)
6bd52143 664 reg_dev = mrt->vif6_table[reg_vif_num].dev;
14fb64e1
YH
665 if (reg_dev)
666 dev_hold(reg_dev);
667 read_unlock(&mrt_lock);
668
63159f29 669 if (!reg_dev)
14fb64e1
YH
670 goto drop;
671
672 skb->mac_header = skb->network_header;
673 skb_pull(skb, (u8 *)encap - skb->data);
674 skb_reset_network_header(skb);
1d6e55f1 675 skb->protocol = htons(ETH_P_IPV6);
3e49e6d5 676 skb->ip_summed = CHECKSUM_NONE;
d19d56dd 677
ea23192e 678 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
d19d56dd 679
caf586e5 680 netif_rx(skb);
8990f468 681
14fb64e1
YH
682 dev_put(reg_dev);
683 return 0;
684 drop:
685 kfree_skb(skb);
686 return 0;
687}
688
41135cc8 689static const struct inet6_protocol pim6_protocol = {
14fb64e1
YH
690 .handler = pim6_rcv,
691};
692
693/* Service routines creating virtual interfaces: PIMREG */
694
6fef4c0c
SH
695static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
696 struct net_device *dev)
14fb64e1 697{
8229efda 698 struct net *net = dev_net(dev);
d1db275d 699 struct mr6_table *mrt;
4c9483b2
DM
700 struct flowi6 fl6 = {
701 .flowi6_oif = dev->ifindex,
6a662719 702 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
4c9483b2 703 .flowi6_mark = skb->mark,
d1db275d
PM
704 };
705 int err;
706
4c9483b2 707 err = ip6mr_fib_lookup(net, &fl6, &mrt);
67928c40
BG
708 if (err < 0) {
709 kfree_skb(skb);
d1db275d 710 return err;
67928c40 711 }
8229efda 712
14fb64e1 713 read_lock(&mrt_lock);
dc58c78c
PE
714 dev->stats.tx_bytes += skb->len;
715 dev->stats.tx_packets++;
6bd52143 716 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
14fb64e1
YH
717 read_unlock(&mrt_lock);
718 kfree_skb(skb);
6ed10654 719 return NETDEV_TX_OK;
14fb64e1
YH
720}
721
ee9b9596
ND
722static int reg_vif_get_iflink(const struct net_device *dev)
723{
724 return 0;
725}
726
007c3838
SH
727static const struct net_device_ops reg_vif_netdev_ops = {
728 .ndo_start_xmit = reg_vif_xmit,
ee9b9596 729 .ndo_get_iflink = reg_vif_get_iflink,
007c3838
SH
730};
731
14fb64e1
YH
732static void reg_vif_setup(struct net_device *dev)
733{
734 dev->type = ARPHRD_PIMREG;
735 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
736 dev->flags = IFF_NOARP;
007c3838 737 dev->netdev_ops = &reg_vif_netdev_ops;
cf124db5 738 dev->needs_free_netdev = true;
403dbb97 739 dev->features |= NETIF_F_NETNS_LOCAL;
14fb64e1
YH
740}
741
d1db275d 742static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
14fb64e1
YH
743{
744 struct net_device *dev;
d1db275d
PM
745 char name[IFNAMSIZ];
746
747 if (mrt->id == RT6_TABLE_DFLT)
748 sprintf(name, "pim6reg");
749 else
750 sprintf(name, "pim6reg%u", mrt->id);
14fb64e1 751
c835a677 752 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
63159f29 753 if (!dev)
14fb64e1
YH
754 return NULL;
755
8229efda
BT
756 dev_net_set(dev, net);
757
14fb64e1
YH
758 if (register_netdevice(dev)) {
759 free_netdev(dev);
760 return NULL;
761 }
14fb64e1 762
14fb64e1
YH
763 if (dev_open(dev))
764 goto failure;
765
7af3db78 766 dev_hold(dev);
14fb64e1
YH
767 return dev;
768
769failure:
14fb64e1
YH
770 unregister_netdevice(dev);
771 return NULL;
772}
773#endif
774
7bc570c8
YH
775/*
776 * Delete a VIF entry
777 */
778
723b929c
NA
779static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
780 struct list_head *head)
7bc570c8
YH
781{
782 struct mif_device *v;
783 struct net_device *dev;
1d6e55f1 784 struct inet6_dev *in6_dev;
6bd52143
PM
785
786 if (vifi < 0 || vifi >= mrt->maxvif)
7bc570c8
YH
787 return -EADDRNOTAVAIL;
788
6bd52143 789 v = &mrt->vif6_table[vifi];
7bc570c8
YH
790
791 write_lock_bh(&mrt_lock);
792 dev = v->dev;
793 v->dev = NULL;
794
795 if (!dev) {
796 write_unlock_bh(&mrt_lock);
797 return -EADDRNOTAVAIL;
798 }
799
14fb64e1 800#ifdef CONFIG_IPV6_PIMSM_V2
6bd52143
PM
801 if (vifi == mrt->mroute_reg_vif_num)
802 mrt->mroute_reg_vif_num = -1;
14fb64e1
YH
803#endif
804
6bd52143 805 if (vifi + 1 == mrt->maxvif) {
7bc570c8
YH
806 int tmp;
807 for (tmp = vifi - 1; tmp >= 0; tmp--) {
6bd52143 808 if (MIF_EXISTS(mrt, tmp))
7bc570c8
YH
809 break;
810 }
6bd52143 811 mrt->maxvif = tmp + 1;
7bc570c8
YH
812 }
813
814 write_unlock_bh(&mrt_lock);
815
816 dev_set_allmulti(dev, -1);
817
1d6e55f1 818 in6_dev = __in6_dev_get(dev);
d67b8c61 819 if (in6_dev) {
1d6e55f1 820 in6_dev->cnf.mc_forwarding--;
85b3daad 821 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
d67b8c61
ND
822 NETCONFA_MC_FORWARDING,
823 dev->ifindex, &in6_dev->cnf);
824 }
1d6e55f1 825
723b929c 826 if ((v->flags & MIFF_REGISTER) && !notify)
c871e664 827 unregister_netdevice_queue(dev, head);
7bc570c8
YH
828
829 dev_put(dev);
830 return 0;
831}
832
58701ad4
BT
833static inline void ip6mr_cache_free(struct mfc6_cache *c)
834{
58701ad4
BT
835 kmem_cache_free(mrt_cachep, c);
836}
837
7bc570c8
YH
838/* Destroy an unresolved cache entry, killing queued skbs
839 and reporting error to netlink readers.
840 */
841
6bd52143 842static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
7bc570c8 843{
6bd52143 844 struct net *net = read_pnet(&mrt->net);
7bc570c8
YH
845 struct sk_buff *skb;
846
6bd52143 847 atomic_dec(&mrt->cache_resolve_queue_len);
7bc570c8 848
67ba4152 849 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
7bc570c8 850 if (ipv6_hdr(skb)->version == 0) {
af72868b
JB
851 struct nlmsghdr *nlh = skb_pull(skb,
852 sizeof(struct ipv6hdr));
7bc570c8 853 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 854 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
7bc570c8 855 skb_trim(skb, nlh->nlmsg_len);
573ce260 856 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
15e47304 857 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
7bc570c8
YH
858 } else
859 kfree_skb(skb);
860 }
861
58701ad4 862 ip6mr_cache_free(c);
7bc570c8
YH
863}
864
865
c476efbc 866/* Timer process for all the unresolved queue. */
7bc570c8 867
6bd52143 868static void ipmr_do_expire_process(struct mr6_table *mrt)
7bc570c8
YH
869{
870 unsigned long now = jiffies;
871 unsigned long expires = 10 * HZ;
f30a7784 872 struct mfc6_cache *c, *next;
7bc570c8 873
6bd52143 874 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
7bc570c8
YH
875 if (time_after(c->mfc_un.unres.expires, now)) {
876 /* not yet... */
877 unsigned long interval = c->mfc_un.unres.expires - now;
878 if (interval < expires)
879 expires = interval;
7bc570c8
YH
880 continue;
881 }
882
f30a7784 883 list_del(&c->list);
812e44dd 884 mr6_netlink_event(mrt, c, RTM_DELROUTE);
6bd52143 885 ip6mr_destroy_unres(mrt, c);
7bc570c8
YH
886 }
887
6bd52143
PM
888 if (!list_empty(&mrt->mfc6_unres_queue))
889 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
7bc570c8
YH
890}
891
e99e88a9 892static void ipmr_expire_process(struct timer_list *t)
7bc570c8 893{
e99e88a9 894 struct mr6_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
c476efbc 895
7bc570c8 896 if (!spin_trylock(&mfc_unres_lock)) {
6bd52143 897 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
7bc570c8
YH
898 return;
899 }
900
6bd52143
PM
901 if (!list_empty(&mrt->mfc6_unres_queue))
902 ipmr_do_expire_process(mrt);
7bc570c8
YH
903
904 spin_unlock(&mfc_unres_lock);
905}
906
907/* Fill oifs list. It is called under write locked mrt_lock. */
908
6bd52143 909static void ip6mr_update_thresholds(struct mr6_table *mrt, struct mfc6_cache *cache,
b5aa30b1 910 unsigned char *ttls)
7bc570c8
YH
911{
912 int vifi;
913
6ac7eb08 914 cache->mfc_un.res.minvif = MAXMIFS;
7bc570c8 915 cache->mfc_un.res.maxvif = 0;
6ac7eb08 916 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
7bc570c8 917
6bd52143
PM
918 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
919 if (MIF_EXISTS(mrt, vifi) &&
4e16880c 920 ttls[vifi] && ttls[vifi] < 255) {
7bc570c8
YH
921 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
922 if (cache->mfc_un.res.minvif > vifi)
923 cache->mfc_un.res.minvif = vifi;
924 if (cache->mfc_un.res.maxvif <= vifi)
925 cache->mfc_un.res.maxvif = vifi + 1;
926 }
927 }
90b5ca17 928 cache->mfc_un.res.lastuse = jiffies;
7bc570c8
YH
929}
930
6bd52143
PM
931static int mif6_add(struct net *net, struct mr6_table *mrt,
932 struct mif6ctl *vifc, int mrtsock)
7bc570c8
YH
933{
934 int vifi = vifc->mif6c_mifi;
6bd52143 935 struct mif_device *v = &mrt->vif6_table[vifi];
7bc570c8 936 struct net_device *dev;
1d6e55f1 937 struct inet6_dev *in6_dev;
5ae7b444 938 int err;
7bc570c8
YH
939
940 /* Is vif busy ? */
6bd52143 941 if (MIF_EXISTS(mrt, vifi))
7bc570c8
YH
942 return -EADDRINUSE;
943
944 switch (vifc->mif6c_flags) {
14fb64e1
YH
945#ifdef CONFIG_IPV6_PIMSM_V2
946 case MIFF_REGISTER:
947 /*
948 * Special Purpose VIF in PIM
949 * All the packets will be sent to the daemon
950 */
6bd52143 951 if (mrt->mroute_reg_vif_num >= 0)
14fb64e1 952 return -EADDRINUSE;
d1db275d 953 dev = ip6mr_reg_vif(net, mrt);
14fb64e1
YH
954 if (!dev)
955 return -ENOBUFS;
5ae7b444
WC
956 err = dev_set_allmulti(dev, 1);
957 if (err) {
958 unregister_netdevice(dev);
7af3db78 959 dev_put(dev);
5ae7b444
WC
960 return err;
961 }
14fb64e1
YH
962 break;
963#endif
7bc570c8 964 case 0:
8229efda 965 dev = dev_get_by_index(net, vifc->mif6c_pifi);
7bc570c8
YH
966 if (!dev)
967 return -EADDRNOTAVAIL;
5ae7b444 968 err = dev_set_allmulti(dev, 1);
7af3db78
WC
969 if (err) {
970 dev_put(dev);
5ae7b444 971 return err;
7af3db78 972 }
7bc570c8
YH
973 break;
974 default:
975 return -EINVAL;
976 }
977
1d6e55f1 978 in6_dev = __in6_dev_get(dev);
d67b8c61 979 if (in6_dev) {
1d6e55f1 980 in6_dev->cnf.mc_forwarding++;
85b3daad 981 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
d67b8c61
ND
982 NETCONFA_MC_FORWARDING,
983 dev->ifindex, &in6_dev->cnf);
984 }
1d6e55f1 985
7bc570c8
YH
986 /*
987 * Fill in the VIF structures
988 */
989 v->rate_limit = vifc->vifc_rate_limit;
990 v->flags = vifc->mif6c_flags;
991 if (!mrtsock)
992 v->flags |= VIFF_STATIC;
993 v->threshold = vifc->vifc_threshold;
994 v->bytes_in = 0;
995 v->bytes_out = 0;
996 v->pkt_in = 0;
997 v->pkt_out = 0;
998 v->link = dev->ifindex;
999 if (v->flags & MIFF_REGISTER)
a54acb3a 1000 v->link = dev_get_iflink(dev);
7bc570c8
YH
1001
1002 /* And finish update writing critical data */
1003 write_lock_bh(&mrt_lock);
7bc570c8 1004 v->dev = dev;
14fb64e1
YH
1005#ifdef CONFIG_IPV6_PIMSM_V2
1006 if (v->flags & MIFF_REGISTER)
6bd52143 1007 mrt->mroute_reg_vif_num = vifi;
14fb64e1 1008#endif
6bd52143
PM
1009 if (vifi + 1 > mrt->maxvif)
1010 mrt->maxvif = vifi + 1;
7bc570c8
YH
1011 write_unlock_bh(&mrt_lock);
1012 return 0;
1013}
1014
6bd52143 1015static struct mfc6_cache *ip6mr_cache_find(struct mr6_table *mrt,
b71d1d42
ED
1016 const struct in6_addr *origin,
1017 const struct in6_addr *mcastgrp)
7bc570c8
YH
1018{
1019 int line = MFC6_HASH(mcastgrp, origin);
1020 struct mfc6_cache *c;
1021
6bd52143 1022 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
7bc570c8
YH
1023 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
1024 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
f30a7784 1025 return c;
7bc570c8 1026 }
f30a7784 1027 return NULL;
7bc570c8
YH
1028}
1029
660b26dc
ND
1030/* Look for a (*,*,oif) entry */
1031static struct mfc6_cache *ip6mr_cache_find_any_parent(struct mr6_table *mrt,
1032 mifi_t mifi)
1033{
1034 int line = MFC6_HASH(&in6addr_any, &in6addr_any);
1035 struct mfc6_cache *c;
1036
1037 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1038 if (ipv6_addr_any(&c->mf6c_origin) &&
1039 ipv6_addr_any(&c->mf6c_mcastgrp) &&
1040 (c->mfc_un.res.ttls[mifi] < 255))
1041 return c;
1042
1043 return NULL;
1044}
1045
1046/* Look for a (*,G) entry */
1047static struct mfc6_cache *ip6mr_cache_find_any(struct mr6_table *mrt,
1048 struct in6_addr *mcastgrp,
1049 mifi_t mifi)
1050{
1051 int line = MFC6_HASH(mcastgrp, &in6addr_any);
1052 struct mfc6_cache *c, *proxy;
1053
1054 if (ipv6_addr_any(mcastgrp))
1055 goto skip;
1056
1057 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list)
1058 if (ipv6_addr_any(&c->mf6c_origin) &&
1059 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp)) {
1060 if (c->mfc_un.res.ttls[mifi] < 255)
1061 return c;
1062
1063 /* It's ok if the mifi is part of the static tree */
1064 proxy = ip6mr_cache_find_any_parent(mrt,
1065 c->mf6c_parent);
1066 if (proxy && proxy->mfc_un.res.ttls[mifi] < 255)
1067 return c;
1068 }
1069
1070skip:
1071 return ip6mr_cache_find_any_parent(mrt, mifi);
1072}
1073
7bc570c8
YH
1074/*
1075 * Allocate a multicast cache entry
1076 */
b5aa30b1 1077static struct mfc6_cache *ip6mr_cache_alloc(void)
7bc570c8 1078{
36cbac59 1079 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
63159f29 1080 if (!c)
7bc570c8 1081 return NULL;
70a0dec4 1082 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
6ac7eb08 1083 c->mfc_un.res.minvif = MAXMIFS;
7bc570c8
YH
1084 return c;
1085}
1086
b5aa30b1 1087static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
7bc570c8 1088{
36cbac59 1089 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
63159f29 1090 if (!c)
7bc570c8 1091 return NULL;
7bc570c8
YH
1092 skb_queue_head_init(&c->mfc_un.unres.unresolved);
1093 c->mfc_un.unres.expires = jiffies + 10 * HZ;
1094 return c;
1095}
1096
1097/*
1098 * A cache entry has gone into a resolved state from queued
1099 */
1100
6bd52143
PM
1101static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1102 struct mfc6_cache *uc, struct mfc6_cache *c)
7bc570c8
YH
1103{
1104 struct sk_buff *skb;
1105
1106 /*
1107 * Play the pending entries through our router
1108 */
1109
67ba4152 1110 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
7bc570c8 1111 if (ipv6_hdr(skb)->version == 0) {
af72868b
JB
1112 struct nlmsghdr *nlh = skb_pull(skb,
1113 sizeof(struct ipv6hdr));
7bc570c8 1114
573ce260 1115 if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
549e028d 1116 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
7bc570c8
YH
1117 } else {
1118 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 1119 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
7bc570c8 1120 skb_trim(skb, nlh->nlmsg_len);
573ce260 1121 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
7bc570c8 1122 }
15e47304 1123 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
7bc570c8 1124 } else
6bd52143 1125 ip6_mr_forward(net, mrt, skb, c);
7bc570c8
YH
1126 }
1127}
1128
1129/*
dd12d15c 1130 * Bounce a cache query up to pim6sd and netlink.
7bc570c8
YH
1131 *
1132 * Called under mrt_lock.
1133 */
1134
6bd52143
PM
1135static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
1136 mifi_t mifi, int assert)
7bc570c8
YH
1137{
1138 struct sk_buff *skb;
1139 struct mrt6msg *msg;
1140 int ret;
1141
14fb64e1
YH
1142#ifdef CONFIG_IPV6_PIMSM_V2
1143 if (assert == MRT6MSG_WHOLEPKT)
1144 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1145 +sizeof(*msg));
1146 else
1147#endif
1148 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
7bc570c8
YH
1149
1150 if (!skb)
1151 return -ENOBUFS;
1152
1153 /* I suppose that internal messages
1154 * do not require checksums */
1155
1156 skb->ip_summed = CHECKSUM_UNNECESSARY;
1157
14fb64e1
YH
1158#ifdef CONFIG_IPV6_PIMSM_V2
1159 if (assert == MRT6MSG_WHOLEPKT) {
1160 /* Ugly, but we have no choice with this interface.
1161 Duplicate old header, fix length etc.
1162 And all this only to mangle msg->im6_msgtype and
1163 to set msg->im6_mbz to "mbz" :-)
1164 */
1165 skb_push(skb, -skb_network_offset(pkt));
1166
1167 skb_push(skb, sizeof(*msg));
1168 skb_reset_transport_header(skb);
1169 msg = (struct mrt6msg *)skb_transport_header(skb);
1170 msg->im6_mbz = 0;
1171 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
6bd52143 1172 msg->im6_mif = mrt->mroute_reg_vif_num;
14fb64e1 1173 msg->im6_pad = 0;
4e3fd7a0
AD
1174 msg->im6_src = ipv6_hdr(pkt)->saddr;
1175 msg->im6_dst = ipv6_hdr(pkt)->daddr;
14fb64e1
YH
1176
1177 skb->ip_summed = CHECKSUM_UNNECESSARY;
1178 } else
1179#endif
1180 {
7bc570c8
YH
1181 /*
1182 * Copy the IP header
1183 */
1184
1185 skb_put(skb, sizeof(struct ipv6hdr));
1186 skb_reset_network_header(skb);
1187 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1188
1189 /*
1190 * Add our header
1191 */
1192 skb_put(skb, sizeof(*msg));
1193 skb_reset_transport_header(skb);
1194 msg = (struct mrt6msg *)skb_transport_header(skb);
1195
1196 msg->im6_mbz = 0;
1197 msg->im6_msgtype = assert;
6ac7eb08 1198 msg->im6_mif = mifi;
7bc570c8 1199 msg->im6_pad = 0;
4e3fd7a0
AD
1200 msg->im6_src = ipv6_hdr(pkt)->saddr;
1201 msg->im6_dst = ipv6_hdr(pkt)->daddr;
7bc570c8 1202
adf30907 1203 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
7bc570c8 1204 skb->ip_summed = CHECKSUM_UNNECESSARY;
14fb64e1 1205 }
7bc570c8 1206
63159f29 1207 if (!mrt->mroute6_sk) {
7bc570c8
YH
1208 kfree_skb(skb);
1209 return -EINVAL;
1210 }
1211
dd12d15c
JG
1212 mrt6msg_netlink_event(mrt, skb);
1213
7bc570c8
YH
1214 /*
1215 * Deliver to user space multicast routing algorithms
1216 */
6bd52143 1217 ret = sock_queue_rcv_skb(mrt->mroute6_sk, skb);
bd91b8bf 1218 if (ret < 0) {
e87cc472 1219 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
7bc570c8
YH
1220 kfree_skb(skb);
1221 }
1222
1223 return ret;
1224}
1225
1226/*
1227 * Queue a packet for resolution. It gets locked cache entry!
1228 */
1229
1230static int
6bd52143 1231ip6mr_cache_unresolved(struct mr6_table *mrt, mifi_t mifi, struct sk_buff *skb)
7bc570c8 1232{
f30a7784 1233 bool found = false;
7bc570c8
YH
1234 int err;
1235 struct mfc6_cache *c;
1236
1237 spin_lock_bh(&mfc_unres_lock);
6bd52143 1238 list_for_each_entry(c, &mrt->mfc6_unres_queue, list) {
c476efbc 1239 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
f30a7784
PM
1240 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1241 found = true;
7bc570c8 1242 break;
f30a7784 1243 }
7bc570c8
YH
1244 }
1245
f30a7784 1246 if (!found) {
7bc570c8
YH
1247 /*
1248 * Create a new entry if allowable
1249 */
1250
6bd52143 1251 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
b5aa30b1 1252 (c = ip6mr_cache_alloc_unres()) == NULL) {
7bc570c8
YH
1253 spin_unlock_bh(&mfc_unres_lock);
1254
1255 kfree_skb(skb);
1256 return -ENOBUFS;
1257 }
1258
1259 /*
1260 * Fill in the new cache entry
1261 */
1262 c->mf6c_parent = -1;
1263 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1264 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1265
1266 /*
1267 * Reflect first query at pim6sd
1268 */
6bd52143 1269 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
8229efda 1270 if (err < 0) {
7bc570c8
YH
1271 /* If the report failed throw the cache entry
1272 out - Brad Parker
1273 */
1274 spin_unlock_bh(&mfc_unres_lock);
1275
58701ad4 1276 ip6mr_cache_free(c);
7bc570c8
YH
1277 kfree_skb(skb);
1278 return err;
1279 }
1280
6bd52143
PM
1281 atomic_inc(&mrt->cache_resolve_queue_len);
1282 list_add(&c->list, &mrt->mfc6_unres_queue);
812e44dd 1283 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
7bc570c8 1284
6bd52143 1285 ipmr_do_expire_process(mrt);
7bc570c8
YH
1286 }
1287
1288 /*
1289 * See if we can append the packet
1290 */
1291 if (c->mfc_un.unres.unresolved.qlen > 3) {
1292 kfree_skb(skb);
1293 err = -ENOBUFS;
1294 } else {
1295 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1296 err = 0;
1297 }
1298
1299 spin_unlock_bh(&mfc_unres_lock);
1300 return err;
1301}
1302
1303/*
1304 * MFC6 cache manipulation by user space
1305 */
1306
660b26dc
ND
1307static int ip6mr_mfc_delete(struct mr6_table *mrt, struct mf6cctl *mfc,
1308 int parent)
7bc570c8
YH
1309{
1310 int line;
f30a7784 1311 struct mfc6_cache *c, *next;
7bc570c8
YH
1312
1313 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1314
6bd52143 1315 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[line], list) {
7bc570c8 1316 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
660b26dc
ND
1317 ipv6_addr_equal(&c->mf6c_mcastgrp,
1318 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1319 (parent == -1 || parent == c->mf6c_parent)) {
7bc570c8 1320 write_lock_bh(&mrt_lock);
f30a7784 1321 list_del(&c->list);
7bc570c8
YH
1322 write_unlock_bh(&mrt_lock);
1323
812e44dd 1324 mr6_netlink_event(mrt, c, RTM_DELROUTE);
58701ad4 1325 ip6mr_cache_free(c);
7bc570c8
YH
1326 return 0;
1327 }
1328 }
1329 return -ENOENT;
1330}
1331
1332static int ip6mr_device_event(struct notifier_block *this,
1333 unsigned long event, void *ptr)
1334{
351638e7 1335 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
8229efda 1336 struct net *net = dev_net(dev);
d1db275d 1337 struct mr6_table *mrt;
7bc570c8
YH
1338 struct mif_device *v;
1339 int ct;
1340
7bc570c8
YH
1341 if (event != NETDEV_UNREGISTER)
1342 return NOTIFY_DONE;
1343
d1db275d
PM
1344 ip6mr_for_each_table(mrt, net) {
1345 v = &mrt->vif6_table[0];
1346 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1347 if (v->dev == dev)
723b929c 1348 mif6_delete(mrt, ct, 1, NULL);
d1db275d 1349 }
7bc570c8 1350 }
c871e664 1351
7bc570c8
YH
1352 return NOTIFY_DONE;
1353}
1354
1355static struct notifier_block ip6_mr_notifier = {
1356 .notifier_call = ip6mr_device_event
1357};
1358
1359/*
1360 * Setup for IP multicast routing
1361 */
1362
4e16880c
BT
1363static int __net_init ip6mr_net_init(struct net *net)
1364{
d1db275d 1365 int err;
f30a7784 1366
d1db275d
PM
1367 err = ip6mr_rules_init(net);
1368 if (err < 0)
4e16880c 1369 goto fail;
8b90fc7e
BT
1370
1371#ifdef CONFIG_PROC_FS
1372 err = -ENOMEM;
d4beaa66 1373 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
8b90fc7e 1374 goto proc_vif_fail;
d4beaa66 1375 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
8b90fc7e
BT
1376 goto proc_cache_fail;
1377#endif
6bd52143 1378
4a6258a0
BT
1379 return 0;
1380
8b90fc7e
BT
1381#ifdef CONFIG_PROC_FS
1382proc_cache_fail:
ece31ffd 1383 remove_proc_entry("ip6_mr_vif", net->proc_net);
8b90fc7e 1384proc_vif_fail:
d1db275d 1385 ip6mr_rules_exit(net);
8b90fc7e 1386#endif
4e16880c
BT
1387fail:
1388 return err;
1389}
1390
1391static void __net_exit ip6mr_net_exit(struct net *net)
1392{
8b90fc7e 1393#ifdef CONFIG_PROC_FS
ece31ffd
G
1394 remove_proc_entry("ip6_mr_cache", net->proc_net);
1395 remove_proc_entry("ip6_mr_vif", net->proc_net);
8b90fc7e 1396#endif
d1db275d 1397 ip6mr_rules_exit(net);
4e16880c
BT
1398}
1399
1400static struct pernet_operations ip6mr_net_ops = {
1401 .init = ip6mr_net_init,
1402 .exit = ip6mr_net_exit,
1403};
1404
623d1a1a 1405int __init ip6_mr_init(void)
7bc570c8 1406{
623d1a1a
WC
1407 int err;
1408
7bc570c8
YH
1409 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1410 sizeof(struct mfc6_cache),
1411 0, SLAB_HWCACHE_ALIGN,
1412 NULL);
1413 if (!mrt_cachep)
623d1a1a 1414 return -ENOMEM;
7bc570c8 1415
4e16880c
BT
1416 err = register_pernet_subsys(&ip6mr_net_ops);
1417 if (err)
1418 goto reg_pernet_fail;
1419
623d1a1a
WC
1420 err = register_netdevice_notifier(&ip6_mr_notifier);
1421 if (err)
1422 goto reg_notif_fail;
403dbb97
TG
1423#ifdef CONFIG_IPV6_PIMSM_V2
1424 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
f3213831 1425 pr_err("%s: can't add PIM protocol\n", __func__);
403dbb97
TG
1426 err = -EAGAIN;
1427 goto add_proto_fail;
1428 }
1429#endif
c7ac8679 1430 rtnl_register(RTNL_FAMILY_IP6MR, RTM_GETROUTE, NULL,
b97bac64 1431 ip6mr_rtm_dumproute, 0);
623d1a1a 1432 return 0;
403dbb97
TG
1433#ifdef CONFIG_IPV6_PIMSM_V2
1434add_proto_fail:
1435 unregister_netdevice_notifier(&ip6_mr_notifier);
1436#endif
87b30a65 1437reg_notif_fail:
4e16880c
BT
1438 unregister_pernet_subsys(&ip6mr_net_ops);
1439reg_pernet_fail:
87b30a65 1440 kmem_cache_destroy(mrt_cachep);
623d1a1a 1441 return err;
7bc570c8
YH
1442}
1443
623d1a1a
WC
1444void ip6_mr_cleanup(void)
1445{
ffb1388a
DJ
1446 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1447#ifdef CONFIG_IPV6_PIMSM_V2
1448 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1449#endif
623d1a1a 1450 unregister_netdevice_notifier(&ip6_mr_notifier);
4e16880c 1451 unregister_pernet_subsys(&ip6mr_net_ops);
623d1a1a
WC
1452 kmem_cache_destroy(mrt_cachep);
1453}
7bc570c8 1454
6bd52143 1455static int ip6mr_mfc_add(struct net *net, struct mr6_table *mrt,
660b26dc 1456 struct mf6cctl *mfc, int mrtsock, int parent)
7bc570c8 1457{
f30a7784 1458 bool found = false;
7bc570c8 1459 int line;
f30a7784 1460 struct mfc6_cache *uc, *c;
6ac7eb08 1461 unsigned char ttls[MAXMIFS];
7bc570c8
YH
1462 int i;
1463
a50436f2
PM
1464 if (mfc->mf6cc_parent >= MAXMIFS)
1465 return -ENFILE;
1466
6ac7eb08
RR
1467 memset(ttls, 255, MAXMIFS);
1468 for (i = 0; i < MAXMIFS; i++) {
7bc570c8
YH
1469 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1470 ttls[i] = 1;
1471
1472 }
1473
1474 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1475
6bd52143 1476 list_for_each_entry(c, &mrt->mfc6_cache_array[line], list) {
7bc570c8 1477 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
660b26dc
ND
1478 ipv6_addr_equal(&c->mf6c_mcastgrp,
1479 &mfc->mf6cc_mcastgrp.sin6_addr) &&
1480 (parent == -1 || parent == mfc->mf6cc_parent)) {
f30a7784 1481 found = true;
7bc570c8 1482 break;
f30a7784 1483 }
7bc570c8
YH
1484 }
1485
f30a7784 1486 if (found) {
7bc570c8
YH
1487 write_lock_bh(&mrt_lock);
1488 c->mf6c_parent = mfc->mf6cc_parent;
6bd52143 1489 ip6mr_update_thresholds(mrt, c, ttls);
7bc570c8
YH
1490 if (!mrtsock)
1491 c->mfc_flags |= MFC_STATIC;
1492 write_unlock_bh(&mrt_lock);
812e44dd 1493 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
7bc570c8
YH
1494 return 0;
1495 }
1496
660b26dc
ND
1497 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1498 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
7bc570c8
YH
1499 return -EINVAL;
1500
b5aa30b1 1501 c = ip6mr_cache_alloc();
63159f29 1502 if (!c)
7bc570c8
YH
1503 return -ENOMEM;
1504
1505 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1506 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1507 c->mf6c_parent = mfc->mf6cc_parent;
6bd52143 1508 ip6mr_update_thresholds(mrt, c, ttls);
7bc570c8
YH
1509 if (!mrtsock)
1510 c->mfc_flags |= MFC_STATIC;
1511
1512 write_lock_bh(&mrt_lock);
6bd52143 1513 list_add(&c->list, &mrt->mfc6_cache_array[line]);
7bc570c8
YH
1514 write_unlock_bh(&mrt_lock);
1515
1516 /*
1517 * Check to see if we resolved a queued list. If so we
1518 * need to send on the frames and tidy up.
1519 */
f30a7784 1520 found = false;
7bc570c8 1521 spin_lock_bh(&mfc_unres_lock);
6bd52143 1522 list_for_each_entry(uc, &mrt->mfc6_unres_queue, list) {
c476efbc 1523 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
7bc570c8 1524 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
f30a7784 1525 list_del(&uc->list);
6bd52143 1526 atomic_dec(&mrt->cache_resolve_queue_len);
f30a7784 1527 found = true;
7bc570c8
YH
1528 break;
1529 }
1530 }
6bd52143
PM
1531 if (list_empty(&mrt->mfc6_unres_queue))
1532 del_timer(&mrt->ipmr_expire_timer);
7bc570c8
YH
1533 spin_unlock_bh(&mfc_unres_lock);
1534
f30a7784 1535 if (found) {
6bd52143 1536 ip6mr_cache_resolve(net, mrt, uc, c);
58701ad4 1537 ip6mr_cache_free(uc);
7bc570c8 1538 }
812e44dd 1539 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
7bc570c8
YH
1540 return 0;
1541}
1542
1543/*
1544 * Close the multicast socket, and clear the vif tables etc
1545 */
1546
4c698046 1547static void mroute_clean_tables(struct mr6_table *mrt, bool all)
7bc570c8
YH
1548{
1549 int i;
c871e664 1550 LIST_HEAD(list);
f30a7784 1551 struct mfc6_cache *c, *next;
7bc570c8
YH
1552
1553 /*
1554 * Shut down all active vif entries
1555 */
6bd52143 1556 for (i = 0; i < mrt->maxvif; i++) {
4c698046
NA
1557 if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
1558 continue;
723b929c 1559 mif6_delete(mrt, i, 0, &list);
7bc570c8 1560 }
c871e664 1561 unregister_netdevice_many(&list);
7bc570c8
YH
1562
1563 /*
1564 * Wipe the cache
1565 */
4a6258a0 1566 for (i = 0; i < MFC6_LINES; i++) {
6bd52143 1567 list_for_each_entry_safe(c, next, &mrt->mfc6_cache_array[i], list) {
4c698046 1568 if (!all && (c->mfc_flags & MFC_STATIC))
7bc570c8 1569 continue;
7bc570c8 1570 write_lock_bh(&mrt_lock);
f30a7784 1571 list_del(&c->list);
7bc570c8
YH
1572 write_unlock_bh(&mrt_lock);
1573
812e44dd 1574 mr6_netlink_event(mrt, c, RTM_DELROUTE);
58701ad4 1575 ip6mr_cache_free(c);
7bc570c8
YH
1576 }
1577 }
1578
6bd52143 1579 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
7bc570c8 1580 spin_lock_bh(&mfc_unres_lock);
6bd52143 1581 list_for_each_entry_safe(c, next, &mrt->mfc6_unres_queue, list) {
f30a7784 1582 list_del(&c->list);
812e44dd 1583 mr6_netlink_event(mrt, c, RTM_DELROUTE);
6bd52143 1584 ip6mr_destroy_unres(mrt, c);
7bc570c8
YH
1585 }
1586 spin_unlock_bh(&mfc_unres_lock);
1587 }
1588}
1589
6bd52143 1590static int ip6mr_sk_init(struct mr6_table *mrt, struct sock *sk)
7bc570c8
YH
1591{
1592 int err = 0;
8229efda 1593 struct net *net = sock_net(sk);
7bc570c8
YH
1594
1595 rtnl_lock();
1596 write_lock_bh(&mrt_lock);
6bd52143
PM
1597 if (likely(mrt->mroute6_sk == NULL)) {
1598 mrt->mroute6_sk = sk;
1d6e55f1 1599 net->ipv6.devconf_all->mc_forwarding++;
927265bc 1600 } else {
7bc570c8 1601 err = -EADDRINUSE;
927265bc 1602 }
7bc570c8
YH
1603 write_unlock_bh(&mrt_lock);
1604
927265bc 1605 if (!err)
85b3daad
DA
1606 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1607 NETCONFA_MC_FORWARDING,
927265bc
ED
1608 NETCONFA_IFINDEX_ALL,
1609 net->ipv6.devconf_all);
7bc570c8
YH
1610 rtnl_unlock();
1611
1612 return err;
1613}
1614
1615int ip6mr_sk_done(struct sock *sk)
1616{
d1db275d 1617 int err = -EACCES;
8229efda 1618 struct net *net = sock_net(sk);
d1db275d 1619 struct mr6_table *mrt;
7bc570c8 1620
338d182f
FR
1621 if (sk->sk_type != SOCK_RAW ||
1622 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1623 return err;
1624
7bc570c8 1625 rtnl_lock();
d1db275d
PM
1626 ip6mr_for_each_table(mrt, net) {
1627 if (sk == mrt->mroute6_sk) {
1628 write_lock_bh(&mrt_lock);
1629 mrt->mroute6_sk = NULL;
1630 net->ipv6.devconf_all->mc_forwarding--;
927265bc 1631 write_unlock_bh(&mrt_lock);
85b3daad 1632 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
d67b8c61
ND
1633 NETCONFA_MC_FORWARDING,
1634 NETCONFA_IFINDEX_ALL,
1635 net->ipv6.devconf_all);
7bc570c8 1636
4c698046 1637 mroute_clean_tables(mrt, false);
d1db275d
PM
1638 err = 0;
1639 break;
1640 }
1641 }
7bc570c8
YH
1642 rtnl_unlock();
1643
1644 return err;
1645}
1646
d1db275d 1647struct sock *mroute6_socket(struct net *net, struct sk_buff *skb)
6bd52143 1648{
d1db275d 1649 struct mr6_table *mrt;
4c9483b2 1650 struct flowi6 fl6 = {
e374c618 1651 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
4c9483b2
DM
1652 .flowi6_oif = skb->dev->ifindex,
1653 .flowi6_mark = skb->mark,
d1db275d
PM
1654 };
1655
4c9483b2 1656 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
d1db275d 1657 return NULL;
6bd52143
PM
1658
1659 return mrt->mroute6_sk;
1660}
1661
7bc570c8
YH
1662/*
1663 * Socket options and virtual interface manipulation. The whole
1664 * virtual interface system is a complete heap, but unfortunately
1665 * that's how BSD mrouted happens to think. Maybe one day with a proper
1666 * MOSPF/PIM router set up we can clean this up.
1667 */
1668
b7058842 1669int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
7bc570c8 1670{
660b26dc 1671 int ret, parent = 0;
7bc570c8
YH
1672 struct mif6ctl vif;
1673 struct mf6cctl mfc;
1674 mifi_t mifi;
8229efda 1675 struct net *net = sock_net(sk);
d1db275d
PM
1676 struct mr6_table *mrt;
1677
99253eb7
XL
1678 if (sk->sk_type != SOCK_RAW ||
1679 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1680 return -EOPNOTSUPP;
1681
d1db275d 1682 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
63159f29 1683 if (!mrt)
d1db275d 1684 return -ENOENT;
7bc570c8
YH
1685
1686 if (optname != MRT6_INIT) {
af31f412 1687 if (sk != mrt->mroute6_sk && !ns_capable(net->user_ns, CAP_NET_ADMIN))
7bc570c8
YH
1688 return -EACCES;
1689 }
1690
1691 switch (optname) {
1692 case MRT6_INIT:
7bc570c8
YH
1693 if (optlen < sizeof(int))
1694 return -EINVAL;
1695
6bd52143 1696 return ip6mr_sk_init(mrt, sk);
7bc570c8
YH
1697
1698 case MRT6_DONE:
1699 return ip6mr_sk_done(sk);
1700
1701 case MRT6_ADD_MIF:
1702 if (optlen < sizeof(vif))
1703 return -EINVAL;
1704 if (copy_from_user(&vif, optval, sizeof(vif)))
1705 return -EFAULT;
6ac7eb08 1706 if (vif.mif6c_mifi >= MAXMIFS)
7bc570c8
YH
1707 return -ENFILE;
1708 rtnl_lock();
6bd52143 1709 ret = mif6_add(net, mrt, &vif, sk == mrt->mroute6_sk);
7bc570c8
YH
1710 rtnl_unlock();
1711 return ret;
1712
1713 case MRT6_DEL_MIF:
1714 if (optlen < sizeof(mifi_t))
1715 return -EINVAL;
1716 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1717 return -EFAULT;
1718 rtnl_lock();
723b929c 1719 ret = mif6_delete(mrt, mifi, 0, NULL);
7bc570c8
YH
1720 rtnl_unlock();
1721 return ret;
1722
1723 /*
1724 * Manipulate the forwarding caches. These live
1725 * in a sort of kernel/user symbiosis.
1726 */
1727 case MRT6_ADD_MFC:
1728 case MRT6_DEL_MFC:
660b26dc 1729 parent = -1;
275757e6 1730 /* fall through */
660b26dc
ND
1731 case MRT6_ADD_MFC_PROXY:
1732 case MRT6_DEL_MFC_PROXY:
7bc570c8
YH
1733 if (optlen < sizeof(mfc))
1734 return -EINVAL;
1735 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1736 return -EFAULT;
660b26dc
ND
1737 if (parent == 0)
1738 parent = mfc.mf6cc_parent;
7bc570c8 1739 rtnl_lock();
660b26dc
ND
1740 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1741 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
7bc570c8 1742 else
660b26dc
ND
1743 ret = ip6mr_mfc_add(net, mrt, &mfc,
1744 sk == mrt->mroute6_sk, parent);
7bc570c8
YH
1745 rtnl_unlock();
1746 return ret;
1747
14fb64e1
YH
1748 /*
1749 * Control PIM assert (to activate pim will activate assert)
1750 */
1751 case MRT6_ASSERT:
1752 {
1753 int v;
03f52a0a
JP
1754
1755 if (optlen != sizeof(v))
1756 return -EINVAL;
14fb64e1
YH
1757 if (get_user(v, (int __user *)optval))
1758 return -EFAULT;
53d6841d 1759 mrt->mroute_do_assert = v;
14fb64e1
YH
1760 return 0;
1761 }
1762
1763#ifdef CONFIG_IPV6_PIMSM_V2
1764 case MRT6_PIM:
1765 {
a9f83bf3 1766 int v;
03f52a0a
JP
1767
1768 if (optlen != sizeof(v))
1769 return -EINVAL;
14fb64e1
YH
1770 if (get_user(v, (int __user *)optval))
1771 return -EFAULT;
1772 v = !!v;
1773 rtnl_lock();
1774 ret = 0;
6bd52143
PM
1775 if (v != mrt->mroute_do_pim) {
1776 mrt->mroute_do_pim = v;
1777 mrt->mroute_do_assert = v;
14fb64e1
YH
1778 }
1779 rtnl_unlock();
1780 return ret;
1781 }
1782
d1db275d
PM
1783#endif
1784#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1785 case MRT6_TABLE:
1786 {
1787 u32 v;
1788
1789 if (optlen != sizeof(u32))
1790 return -EINVAL;
1791 if (get_user(v, (u32 __user *)optval))
1792 return -EFAULT;
75356a81
DC
1793 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1794 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1795 return -EINVAL;
d1db275d
PM
1796 if (sk == mrt->mroute6_sk)
1797 return -EBUSY;
1798
1799 rtnl_lock();
1800 ret = 0;
1801 if (!ip6mr_new_table(net, v))
1802 ret = -ENOMEM;
4861f799
SD
1803 else
1804 raw6_sk(sk)->ip6mr_table = v;
d1db275d
PM
1805 rtnl_unlock();
1806 return ret;
1807 }
14fb64e1 1808#endif
7bc570c8 1809 /*
7d120c55 1810 * Spurious command, or MRT6_VERSION which you cannot
7bc570c8
YH
1811 * set.
1812 */
1813 default:
1814 return -ENOPROTOOPT;
1815 }
1816}
1817
1818/*
1819 * Getsock opt support for the multicast routing system.
1820 */
1821
1822int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1823 int __user *optlen)
1824{
1825 int olr;
1826 int val;
8229efda 1827 struct net *net = sock_net(sk);
d1db275d
PM
1828 struct mr6_table *mrt;
1829
99253eb7
XL
1830 if (sk->sk_type != SOCK_RAW ||
1831 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1832 return -EOPNOTSUPP;
1833
d1db275d 1834 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
63159f29 1835 if (!mrt)
d1db275d 1836 return -ENOENT;
7bc570c8
YH
1837
1838 switch (optname) {
1839 case MRT6_VERSION:
1840 val = 0x0305;
1841 break;
14fb64e1
YH
1842#ifdef CONFIG_IPV6_PIMSM_V2
1843 case MRT6_PIM:
6bd52143 1844 val = mrt->mroute_do_pim;
14fb64e1
YH
1845 break;
1846#endif
1847 case MRT6_ASSERT:
6bd52143 1848 val = mrt->mroute_do_assert;
14fb64e1 1849 break;
7bc570c8
YH
1850 default:
1851 return -ENOPROTOOPT;
1852 }
1853
1854 if (get_user(olr, optlen))
1855 return -EFAULT;
1856
1857 olr = min_t(int, olr, sizeof(int));
1858 if (olr < 0)
1859 return -EINVAL;
1860
1861 if (put_user(olr, optlen))
1862 return -EFAULT;
1863 if (copy_to_user(optval, &val, olr))
1864 return -EFAULT;
1865 return 0;
1866}
1867
1868/*
1869 * The IP multicast ioctl support routines.
1870 */
1871
1872int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1873{
1874 struct sioc_sg_req6 sr;
1875 struct sioc_mif_req6 vr;
1876 struct mif_device *vif;
1877 struct mfc6_cache *c;
8229efda 1878 struct net *net = sock_net(sk);
d1db275d
PM
1879 struct mr6_table *mrt;
1880
1881 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
63159f29 1882 if (!mrt)
d1db275d 1883 return -ENOENT;
7bc570c8
YH
1884
1885 switch (cmd) {
1886 case SIOCGETMIFCNT_IN6:
1887 if (copy_from_user(&vr, arg, sizeof(vr)))
1888 return -EFAULT;
6bd52143 1889 if (vr.mifi >= mrt->maxvif)
7bc570c8 1890 return -EINVAL;
215c81c4 1891 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
7bc570c8 1892 read_lock(&mrt_lock);
6bd52143
PM
1893 vif = &mrt->vif6_table[vr.mifi];
1894 if (MIF_EXISTS(mrt, vr.mifi)) {
7bc570c8
YH
1895 vr.icount = vif->pkt_in;
1896 vr.ocount = vif->pkt_out;
1897 vr.ibytes = vif->bytes_in;
1898 vr.obytes = vif->bytes_out;
1899 read_unlock(&mrt_lock);
1900
1901 if (copy_to_user(arg, &vr, sizeof(vr)))
1902 return -EFAULT;
1903 return 0;
1904 }
1905 read_unlock(&mrt_lock);
1906 return -EADDRNOTAVAIL;
1907 case SIOCGETSGCNT_IN6:
1908 if (copy_from_user(&sr, arg, sizeof(sr)))
1909 return -EFAULT;
1910
1911 read_lock(&mrt_lock);
6bd52143 1912 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
7bc570c8
YH
1913 if (c) {
1914 sr.pktcnt = c->mfc_un.res.pkt;
1915 sr.bytecnt = c->mfc_un.res.bytes;
1916 sr.wrong_if = c->mfc_un.res.wrong_if;
1917 read_unlock(&mrt_lock);
1918
1919 if (copy_to_user(arg, &sr, sizeof(sr)))
1920 return -EFAULT;
1921 return 0;
1922 }
1923 read_unlock(&mrt_lock);
1924 return -EADDRNOTAVAIL;
1925 default:
1926 return -ENOIOCTLCMD;
1927 }
1928}
1929
e2d57766
DM
1930#ifdef CONFIG_COMPAT
1931struct compat_sioc_sg_req6 {
1932 struct sockaddr_in6 src;
1933 struct sockaddr_in6 grp;
1934 compat_ulong_t pktcnt;
1935 compat_ulong_t bytecnt;
1936 compat_ulong_t wrong_if;
1937};
1938
1939struct compat_sioc_mif_req6 {
1940 mifi_t mifi;
1941 compat_ulong_t icount;
1942 compat_ulong_t ocount;
1943 compat_ulong_t ibytes;
1944 compat_ulong_t obytes;
1945};
1946
1947int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1948{
1949 struct compat_sioc_sg_req6 sr;
1950 struct compat_sioc_mif_req6 vr;
1951 struct mif_device *vif;
1952 struct mfc6_cache *c;
1953 struct net *net = sock_net(sk);
1954 struct mr6_table *mrt;
1955
1956 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
63159f29 1957 if (!mrt)
e2d57766
DM
1958 return -ENOENT;
1959
1960 switch (cmd) {
1961 case SIOCGETMIFCNT_IN6:
1962 if (copy_from_user(&vr, arg, sizeof(vr)))
1963 return -EFAULT;
1964 if (vr.mifi >= mrt->maxvif)
1965 return -EINVAL;
215c81c4 1966 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
e2d57766
DM
1967 read_lock(&mrt_lock);
1968 vif = &mrt->vif6_table[vr.mifi];
1969 if (MIF_EXISTS(mrt, vr.mifi)) {
1970 vr.icount = vif->pkt_in;
1971 vr.ocount = vif->pkt_out;
1972 vr.ibytes = vif->bytes_in;
1973 vr.obytes = vif->bytes_out;
1974 read_unlock(&mrt_lock);
1975
1976 if (copy_to_user(arg, &vr, sizeof(vr)))
1977 return -EFAULT;
1978 return 0;
1979 }
1980 read_unlock(&mrt_lock);
1981 return -EADDRNOTAVAIL;
1982 case SIOCGETSGCNT_IN6:
1983 if (copy_from_user(&sr, arg, sizeof(sr)))
1984 return -EFAULT;
1985
1986 read_lock(&mrt_lock);
1987 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1988 if (c) {
1989 sr.pktcnt = c->mfc_un.res.pkt;
1990 sr.bytecnt = c->mfc_un.res.bytes;
1991 sr.wrong_if = c->mfc_un.res.wrong_if;
1992 read_unlock(&mrt_lock);
1993
1994 if (copy_to_user(arg, &sr, sizeof(sr)))
1995 return -EFAULT;
1996 return 0;
1997 }
1998 read_unlock(&mrt_lock);
1999 return -EADDRNOTAVAIL;
2000 default:
2001 return -ENOIOCTLCMD;
2002 }
2003}
2004#endif
7bc570c8 2005
0c4b51f0 2006static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
7bc570c8 2007{
1d015503
ED
2008 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
2009 IPSTATS_MIB_OUTFORWDATAGRAMS);
2010 __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
2011 IPSTATS_MIB_OUTOCTETS, skb->len);
13206b6b 2012 return dst_output(net, sk, skb);
7bc570c8
YH
2013}
2014
2015/*
2016 * Processing handlers for ip6mr_forward
2017 */
2018
6bd52143
PM
2019static int ip6mr_forward2(struct net *net, struct mr6_table *mrt,
2020 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
7bc570c8
YH
2021{
2022 struct ipv6hdr *ipv6h;
6bd52143 2023 struct mif_device *vif = &mrt->vif6_table[vifi];
7bc570c8
YH
2024 struct net_device *dev;
2025 struct dst_entry *dst;
4c9483b2 2026 struct flowi6 fl6;
7bc570c8 2027
63159f29 2028 if (!vif->dev)
7bc570c8
YH
2029 goto out_free;
2030
14fb64e1
YH
2031#ifdef CONFIG_IPV6_PIMSM_V2
2032 if (vif->flags & MIFF_REGISTER) {
2033 vif->pkt_out++;
2034 vif->bytes_out += skb->len;
dc58c78c
PE
2035 vif->dev->stats.tx_bytes += skb->len;
2036 vif->dev->stats.tx_packets++;
6bd52143 2037 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
8da73b73 2038 goto out_free;
14fb64e1
YH
2039 }
2040#endif
2041
7bc570c8
YH
2042 ipv6h = ipv6_hdr(skb);
2043
4c9483b2
DM
2044 fl6 = (struct flowi6) {
2045 .flowi6_oif = vif->link,
2046 .daddr = ipv6h->daddr,
7bc570c8
YH
2047 };
2048
4c9483b2 2049 dst = ip6_route_output(net, NULL, &fl6);
5095d64d
RL
2050 if (dst->error) {
2051 dst_release(dst);
7bc570c8 2052 goto out_free;
5095d64d 2053 }
7bc570c8 2054
adf30907
ED
2055 skb_dst_drop(skb);
2056 skb_dst_set(skb, dst);
7bc570c8
YH
2057
2058 /*
2059 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2060 * not only before forwarding, but after forwarding on all output
2061 * interfaces. It is clear, if mrouter runs a multicasting
2062 * program, it should receive packets not depending to what interface
2063 * program is joined.
2064 * If we will not make it, the program will have to join on all
2065 * interfaces. On the other hand, multihoming host (or router, but
2066 * not mrouter) cannot join to more than one interface - it will
2067 * result in receiving multiple packets.
2068 */
2069 dev = vif->dev;
2070 skb->dev = dev;
2071 vif->pkt_out++;
2072 vif->bytes_out += skb->len;
2073
2074 /* We are about to write */
2075 /* XXX: extension headers? */
2076 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2077 goto out_free;
2078
2079 ipv6h = ipv6_hdr(skb);
2080 ipv6h->hop_limit--;
2081
2082 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2083
29a26a56
EB
2084 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2085 net, NULL, skb, skb->dev, dev,
7bc570c8
YH
2086 ip6mr_forward2_finish);
2087
2088out_free:
2089 kfree_skb(skb);
2090 return 0;
2091}
2092
6bd52143 2093static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
7bc570c8
YH
2094{
2095 int ct;
6bd52143
PM
2096
2097 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2098 if (mrt->vif6_table[ct].dev == dev)
7bc570c8
YH
2099 break;
2100 }
2101 return ct;
2102}
2103
2b52c3ad
RR
2104static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
2105 struct sk_buff *skb, struct mfc6_cache *cache)
7bc570c8
YH
2106{
2107 int psend = -1;
2108 int vif, ct;
660b26dc 2109 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
7bc570c8
YH
2110
2111 vif = cache->mf6c_parent;
2112 cache->mfc_un.res.pkt++;
2113 cache->mfc_un.res.bytes += skb->len;
43b9e127 2114 cache->mfc_un.res.lastuse = jiffies;
7bc570c8 2115
660b26dc
ND
2116 if (ipv6_addr_any(&cache->mf6c_origin) && true_vifi >= 0) {
2117 struct mfc6_cache *cache_proxy;
2118
40dc2ca3 2119 /* For an (*,G) entry, we only check that the incoming
660b26dc
ND
2120 * interface is part of the static tree.
2121 */
2122 cache_proxy = ip6mr_cache_find_any_parent(mrt, vif);
2123 if (cache_proxy &&
2124 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
2125 goto forward;
2126 }
2127
14fb64e1
YH
2128 /*
2129 * Wrong interface: drop packet and (maybe) send PIM assert.
2130 */
6bd52143 2131 if (mrt->vif6_table[vif].dev != skb->dev) {
14fb64e1 2132 cache->mfc_un.res.wrong_if++;
14fb64e1 2133
6bd52143 2134 if (true_vifi >= 0 && mrt->mroute_do_assert &&
14fb64e1
YH
2135 /* pimsm uses asserts, when switching from RPT to SPT,
2136 so that we cannot check that packet arrived on an oif.
2137 It is bad, but otherwise we would need to move pretty
2138 large chunk of pimd to kernel. Ough... --ANK
2139 */
6bd52143 2140 (mrt->mroute_do_pim ||
a21f3f99 2141 cache->mfc_un.res.ttls[true_vifi] < 255) &&
14fb64e1
YH
2142 time_after(jiffies,
2143 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
2144 cache->mfc_un.res.last_assert = jiffies;
6bd52143 2145 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
14fb64e1
YH
2146 }
2147 goto dont_forward;
2148 }
2149
660b26dc 2150forward:
6bd52143
PM
2151 mrt->vif6_table[vif].pkt_in++;
2152 mrt->vif6_table[vif].bytes_in += skb->len;
7bc570c8
YH
2153
2154 /*
2155 * Forward the frame
2156 */
660b26dc
ND
2157 if (ipv6_addr_any(&cache->mf6c_origin) &&
2158 ipv6_addr_any(&cache->mf6c_mcastgrp)) {
2159 if (true_vifi >= 0 &&
2160 true_vifi != cache->mf6c_parent &&
2161 ipv6_hdr(skb)->hop_limit >
2162 cache->mfc_un.res.ttls[cache->mf6c_parent]) {
2163 /* It's an (*,*) entry and the packet is not coming from
2164 * the upstream: forward the packet to the upstream
2165 * only.
2166 */
2167 psend = cache->mf6c_parent;
2168 goto last_forward;
2169 }
2170 goto dont_forward;
2171 }
7bc570c8 2172 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
660b26dc
ND
2173 /* For (*,G) entry, don't forward to the incoming interface */
2174 if ((!ipv6_addr_any(&cache->mf6c_origin) || ct != true_vifi) &&
2175 ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
7bc570c8
YH
2176 if (psend != -1) {
2177 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2178 if (skb2)
6bd52143 2179 ip6mr_forward2(net, mrt, skb2, cache, psend);
7bc570c8
YH
2180 }
2181 psend = ct;
2182 }
2183 }
660b26dc 2184last_forward:
7bc570c8 2185 if (psend != -1) {
6bd52143 2186 ip6mr_forward2(net, mrt, skb, cache, psend);
2b52c3ad 2187 return;
7bc570c8
YH
2188 }
2189
14fb64e1 2190dont_forward:
7bc570c8 2191 kfree_skb(skb);
7bc570c8
YH
2192}
2193
2194
2195/*
2196 * Multicast packets for forwarding arrive here
2197 */
2198
2199int ip6_mr_input(struct sk_buff *skb)
2200{
2201 struct mfc6_cache *cache;
8229efda 2202 struct net *net = dev_net(skb->dev);
d1db275d 2203 struct mr6_table *mrt;
4c9483b2
DM
2204 struct flowi6 fl6 = {
2205 .flowi6_iif = skb->dev->ifindex,
2206 .flowi6_mark = skb->mark,
d1db275d
PM
2207 };
2208 int err;
2209
4c9483b2 2210 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2015de5f
BG
2211 if (err < 0) {
2212 kfree_skb(skb);
d1db275d 2213 return err;
2015de5f 2214 }
7bc570c8
YH
2215
2216 read_lock(&mrt_lock);
6bd52143 2217 cache = ip6mr_cache_find(mrt,
8229efda 2218 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
63159f29 2219 if (!cache) {
660b26dc
ND
2220 int vif = ip6mr_find_vif(mrt, skb->dev);
2221
2222 if (vif >= 0)
2223 cache = ip6mr_cache_find_any(mrt,
2224 &ipv6_hdr(skb)->daddr,
2225 vif);
2226 }
7bc570c8
YH
2227
2228 /*
2229 * No usable cache entry
2230 */
63159f29 2231 if (!cache) {
7bc570c8
YH
2232 int vif;
2233
6bd52143 2234 vif = ip6mr_find_vif(mrt, skb->dev);
7bc570c8 2235 if (vif >= 0) {
6bd52143 2236 int err = ip6mr_cache_unresolved(mrt, vif, skb);
7bc570c8
YH
2237 read_unlock(&mrt_lock);
2238
2239 return err;
2240 }
2241 read_unlock(&mrt_lock);
2242 kfree_skb(skb);
2243 return -ENODEV;
2244 }
2245
6bd52143 2246 ip6_mr_forward(net, mrt, skb, cache);
7bc570c8
YH
2247
2248 read_unlock(&mrt_lock);
2249
2250 return 0;
2251}
2252
2253
5b285cac
PM
2254static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2255 struct mfc6_cache *c, struct rtmsg *rtm)
7bc570c8 2256{
adfa85e4 2257 struct rta_mfc_stats mfcs;
43b9e127
NA
2258 struct nlattr *mp_attr;
2259 struct rtnexthop *nhp;
b5036cd4 2260 unsigned long lastuse;
43b9e127 2261 int ct;
7bc570c8 2262
7438189b 2263 /* If cache is unresolved, don't try to parse IIF and OIF */
1708ebc9
NA
2264 if (c->mf6c_parent >= MAXMIFS) {
2265 rtm->rtm_flags |= RTNH_F_UNRESOLVED;
7438189b 2266 return -ENOENT;
1708ebc9 2267 }
7438189b 2268
74a0bd7d
TG
2269 if (MIF_EXISTS(mrt, c->mf6c_parent) &&
2270 nla_put_u32(skb, RTA_IIF, mrt->vif6_table[c->mf6c_parent].dev->ifindex) < 0)
2271 return -EMSGSIZE;
70b386a0 2272 mp_attr = nla_nest_start(skb, RTA_MULTIPATH);
63159f29 2273 if (!mp_attr)
70b386a0 2274 return -EMSGSIZE;
7bc570c8
YH
2275
2276 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
6bd52143 2277 if (MIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
70b386a0 2278 nhp = nla_reserve_nohdr(skb, sizeof(*nhp));
63159f29 2279 if (!nhp) {
70b386a0
ND
2280 nla_nest_cancel(skb, mp_attr);
2281 return -EMSGSIZE;
2282 }
2283
7bc570c8
YH
2284 nhp->rtnh_flags = 0;
2285 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
6bd52143 2286 nhp->rtnh_ifindex = mrt->vif6_table[ct].dev->ifindex;
7bc570c8
YH
2287 nhp->rtnh_len = sizeof(*nhp);
2288 }
2289 }
70b386a0
ND
2290
2291 nla_nest_end(skb, mp_attr);
2292
b5036cd4
NA
2293 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2294 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2295
adfa85e4
ND
2296 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2297 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2298 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
43b9e127 2299 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
b5036cd4 2300 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
43b9e127 2301 RTA_PAD))
adfa85e4
ND
2302 return -EMSGSIZE;
2303
7bc570c8
YH
2304 rtm->rtm_type = RTN_MULTICAST;
2305 return 1;
7bc570c8
YH
2306}
2307
2cf75070 2308int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
fd61c6ba 2309 u32 portid)
7bc570c8
YH
2310{
2311 int err;
d1db275d 2312 struct mr6_table *mrt;
7bc570c8 2313 struct mfc6_cache *cache;
adf30907 2314 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
7bc570c8 2315
d1db275d 2316 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
63159f29 2317 if (!mrt)
d1db275d
PM
2318 return -ENOENT;
2319
7bc570c8 2320 read_lock(&mrt_lock);
6bd52143 2321 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
660b26dc
ND
2322 if (!cache && skb->dev) {
2323 int vif = ip6mr_find_vif(mrt, skb->dev);
2324
2325 if (vif >= 0)
2326 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2327 vif);
2328 }
7bc570c8
YH
2329
2330 if (!cache) {
2331 struct sk_buff *skb2;
2332 struct ipv6hdr *iph;
2333 struct net_device *dev;
2334 int vif;
2335
7bc570c8 2336 dev = skb->dev;
63159f29 2337 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
7bc570c8
YH
2338 read_unlock(&mrt_lock);
2339 return -ENODEV;
2340 }
2341
2342 /* really correct? */
2343 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2344 if (!skb2) {
2345 read_unlock(&mrt_lock);
2346 return -ENOMEM;
2347 }
2348
2cf75070 2349 NETLINK_CB(skb2).portid = portid;
7bc570c8
YH
2350 skb_reset_transport_header(skb2);
2351
2352 skb_put(skb2, sizeof(struct ipv6hdr));
2353 skb_reset_network_header(skb2);
2354
2355 iph = ipv6_hdr(skb2);
2356 iph->version = 0;
2357 iph->priority = 0;
2358 iph->flow_lbl[0] = 0;
2359 iph->flow_lbl[1] = 0;
2360 iph->flow_lbl[2] = 0;
2361 iph->payload_len = 0;
2362 iph->nexthdr = IPPROTO_NONE;
2363 iph->hop_limit = 0;
4e3fd7a0
AD
2364 iph->saddr = rt->rt6i_src.addr;
2365 iph->daddr = rt->rt6i_dst.addr;
7bc570c8 2366
6bd52143 2367 err = ip6mr_cache_unresolved(mrt, vif, skb2);
7bc570c8
YH
2368 read_unlock(&mrt_lock);
2369
2370 return err;
2371 }
2372
fd61c6ba 2373 if (rtm->rtm_flags & RTM_F_NOTIFY)
7bc570c8
YH
2374 cache->mfc_flags |= MFC_NOTIFY;
2375
5b285cac 2376 err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
7bc570c8
YH
2377 read_unlock(&mrt_lock);
2378 return err;
2379}
2380
5b285cac 2381static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
f518338b
ND
2382 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2383 int flags)
5b285cac
PM
2384{
2385 struct nlmsghdr *nlh;
2386 struct rtmsg *rtm;
1eb99af5 2387 int err;
5b285cac 2388
f518338b 2389 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
63159f29 2390 if (!nlh)
5b285cac
PM
2391 return -EMSGSIZE;
2392
2393 rtm = nlmsg_data(nlh);
193c1e47 2394 rtm->rtm_family = RTNL_FAMILY_IP6MR;
5b285cac
PM
2395 rtm->rtm_dst_len = 128;
2396 rtm->rtm_src_len = 128;
2397 rtm->rtm_tos = 0;
2398 rtm->rtm_table = mrt->id;
c78679e8
DM
2399 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2400 goto nla_put_failure;
1eb99af5 2401 rtm->rtm_type = RTN_MULTICAST;
5b285cac 2402 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
9a68ac72
ND
2403 if (c->mfc_flags & MFC_STATIC)
2404 rtm->rtm_protocol = RTPROT_STATIC;
2405 else
2406 rtm->rtm_protocol = RTPROT_MROUTED;
5b285cac
PM
2407 rtm->rtm_flags = 0;
2408
930345ea
JB
2409 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2410 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
c78679e8 2411 goto nla_put_failure;
1eb99af5
ND
2412 err = __ip6mr_fill_mroute(mrt, skb, c, rtm);
2413 /* do not break the dump if cache is unresolved */
2414 if (err < 0 && err != -ENOENT)
5b285cac
PM
2415 goto nla_put_failure;
2416
053c095a
JB
2417 nlmsg_end(skb, nlh);
2418 return 0;
5b285cac
PM
2419
2420nla_put_failure:
2421 nlmsg_cancel(skb, nlh);
2422 return -EMSGSIZE;
2423}
2424
812e44dd
ND
2425static int mr6_msgsize(bool unresolved, int maxvif)
2426{
2427 size_t len =
2428 NLMSG_ALIGN(sizeof(struct rtmsg))
2429 + nla_total_size(4) /* RTA_TABLE */
2430 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2431 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2432 ;
2433
2434 if (!unresolved)
2435 len = len
2436 + nla_total_size(4) /* RTA_IIF */
2437 + nla_total_size(0) /* RTA_MULTIPATH */
2438 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2439 /* RTA_MFC_STATS */
3d6b66c1 2440 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
812e44dd
ND
2441 ;
2442
2443 return len;
2444}
2445
2446static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
2447 int cmd)
2448{
2449 struct net *net = read_pnet(&mrt->net);
2450 struct sk_buff *skb;
2451 int err = -ENOBUFS;
2452
2453 skb = nlmsg_new(mr6_msgsize(mfc->mf6c_parent >= MAXMIFS, mrt->maxvif),
2454 GFP_ATOMIC);
63159f29 2455 if (!skb)
812e44dd
ND
2456 goto errout;
2457
f518338b 2458 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
812e44dd
ND
2459 if (err < 0)
2460 goto errout;
2461
2462 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2463 return;
2464
2465errout:
2466 kfree_skb(skb);
2467 if (err < 0)
2468 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2469}
2470
dd12d15c
JG
2471static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2472{
2473 size_t len =
2474 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2475 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2476 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2477 /* IP6MRA_CREPORT_SRC_ADDR */
2478 + nla_total_size(sizeof(struct in6_addr))
2479 /* IP6MRA_CREPORT_DST_ADDR */
2480 + nla_total_size(sizeof(struct in6_addr))
2481 /* IP6MRA_CREPORT_PKT */
2482 + nla_total_size(payloadlen)
2483 ;
2484
2485 return len;
2486}
2487
2488static void mrt6msg_netlink_event(struct mr6_table *mrt, struct sk_buff *pkt)
2489{
2490 struct net *net = read_pnet(&mrt->net);
2491 struct nlmsghdr *nlh;
2492 struct rtgenmsg *rtgenm;
2493 struct mrt6msg *msg;
2494 struct sk_buff *skb;
2495 struct nlattr *nla;
2496 int payloadlen;
2497
2498 payloadlen = pkt->len - sizeof(struct mrt6msg);
2499 msg = (struct mrt6msg *)skb_transport_header(pkt);
2500
2501 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2502 if (!skb)
2503 goto errout;
2504
2505 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2506 sizeof(struct rtgenmsg), 0);
2507 if (!nlh)
2508 goto errout;
2509 rtgenm = nlmsg_data(nlh);
2510 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2511 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2512 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2513 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2514 &msg->im6_src) ||
2515 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2516 &msg->im6_dst))
2517 goto nla_put_failure;
2518
2519 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2520 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2521 nla_data(nla), payloadlen))
2522 goto nla_put_failure;
2523
2524 nlmsg_end(skb, nlh);
2525
2526 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2527 return;
2528
2529nla_put_failure:
2530 nlmsg_cancel(skb, nlh);
2531errout:
2532 kfree_skb(skb);
2533 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2534}
2535
5b285cac
PM
2536static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2537{
2538 struct net *net = sock_net(skb->sk);
2539 struct mr6_table *mrt;
2540 struct mfc6_cache *mfc;
2541 unsigned int t = 0, s_t;
2542 unsigned int h = 0, s_h;
2543 unsigned int e = 0, s_e;
2544
2545 s_t = cb->args[0];
2546 s_h = cb->args[1];
2547 s_e = cb->args[2];
2548
2549 read_lock(&mrt_lock);
2550 ip6mr_for_each_table(mrt, net) {
2551 if (t < s_t)
2552 goto next_table;
2553 if (t > s_t)
2554 s_h = 0;
2555 for (h = s_h; h < MFC6_LINES; h++) {
2556 list_for_each_entry(mfc, &mrt->mfc6_cache_array[h], list) {
2557 if (e < s_e)
2558 goto next_entry;
2559 if (ip6mr_fill_mroute(mrt, skb,
15e47304 2560 NETLINK_CB(cb->skb).portid,
5b285cac 2561 cb->nlh->nlmsg_seq,
f518338b
ND
2562 mfc, RTM_NEWROUTE,
2563 NLM_F_MULTI) < 0)
5b285cac
PM
2564 goto done;
2565next_entry:
2566 e++;
2567 }
2568 e = s_e = 0;
2569 }
1eb99af5
ND
2570 spin_lock_bh(&mfc_unres_lock);
2571 list_for_each_entry(mfc, &mrt->mfc6_unres_queue, list) {
2572 if (e < s_e)
2573 goto next_entry2;
2574 if (ip6mr_fill_mroute(mrt, skb,
2575 NETLINK_CB(cb->skb).portid,
2576 cb->nlh->nlmsg_seq,
f518338b
ND
2577 mfc, RTM_NEWROUTE,
2578 NLM_F_MULTI) < 0) {
1eb99af5
ND
2579 spin_unlock_bh(&mfc_unres_lock);
2580 goto done;
2581 }
2582next_entry2:
2583 e++;
2584 }
2585 spin_unlock_bh(&mfc_unres_lock);
2586 e = s_e = 0;
5b285cac
PM
2587 s_h = 0;
2588next_table:
2589 t++;
2590 }
2591done:
2592 read_unlock(&mrt_lock);
2593
2594 cb->args[2] = e;
2595 cb->args[1] = h;
2596 cb->args[0] = t;
2597
2598 return skb->len;
2599}