]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/ipv6/ip6mr.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[mirror_ubuntu-jammy-kernel.git] / net / ipv6 / ip6mr.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
7bc570c8
YH
2/*
3 * Linux IPv6 multicast routing support for BSD pim6sd
4 * Based on net/ipv4/ipmr.c.
5 *
6 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
7 * LSIIT Laboratory, Strasbourg, France
8 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * 6WIND, Paris, France
10 * Copyright (C)2007,2008 USAGI/WIDE Project
11 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
7bc570c8
YH
12 */
13
7c0f6ba6 14#include <linux/uaccess.h>
7bc570c8
YH
15#include <linux/types.h>
16#include <linux/sched.h>
17#include <linux/errno.h>
7bc570c8
YH
18#include <linux/mm.h>
19#include <linux/kernel.h>
20#include <linux/fcntl.h>
21#include <linux/stat.h>
22#include <linux/socket.h>
7bc570c8
YH
23#include <linux/inet.h>
24#include <linux/netdevice.h>
25#include <linux/inetdevice.h>
7bc570c8
YH
26#include <linux/proc_fs.h>
27#include <linux/seq_file.h>
7bc570c8 28#include <linux/init.h>
e2d57766 29#include <linux/compat.h>
0eb71a9d 30#include <linux/rhashtable.h>
7bc570c8
YH
31#include <net/protocol.h>
32#include <linux/skbuff.h>
7bc570c8 33#include <net/raw.h>
7bc570c8
YH
34#include <linux/notifier.h>
35#include <linux/if_arp.h>
7bc570c8
YH
36#include <net/checksum.h>
37#include <net/netlink.h>
d1db275d 38#include <net/fib_rules.h>
7bc570c8
YH
39
40#include <net/ipv6.h>
41#include <net/ip6_route.h>
42#include <linux/mroute6.h>
14fb64e1 43#include <linux/pim.h>
7bc570c8
YH
44#include <net/addrconf.h>
45#include <linux/netfilter_ipv6.h>
bc3b2d7f 46#include <linux/export.h>
5d6e430d 47#include <net/ip6_checksum.h>
d67b8c61 48#include <linux/netconf.h>
cb9f1b78 49#include <net/ip_tunnels.h>
7bc570c8 50
69d2c867
GS
51#include <linux/nospec.h>
52
d1db275d
PM
53struct ip6mr_rule {
54 struct fib_rule common;
55};
56
57struct ip6mr_result {
b70432f7 58 struct mr_table *mrt;
d1db275d
PM
59};
60
7bc570c8
YH
61/* Big lock, protecting vif table, mrt cache and mroute socket state.
62 Note that the changes are semaphored via rtnl_lock.
63 */
64
65static DEFINE_RWLOCK(mrt_lock);
66
b70432f7 67/* Multicast router control variables */
7bc570c8 68
7bc570c8
YH
69/* Special spinlock for queue of unresolved entries */
70static DEFINE_SPINLOCK(mfc_unres_lock);
71
72/* We return to original Alan's scheme. Hash table of resolved
73 entries is changed only in process context and protected
74 with weak lock mrt_lock. Queue of unresolved entries is protected
75 with strong spinlock mfc_unres_lock.
76
77 In this case data path is free of exclusive locks at all.
78 */
79
80static struct kmem_cache *mrt_cachep __read_mostly;
81
b70432f7
YM
82static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
83static void ip6mr_free_table(struct mr_table *mrt);
d1db275d 84
b70432f7 85static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
e4a38c0c
PR
86 struct net_device *dev, struct sk_buff *skb,
87 struct mfc6_cache *cache);
b70432f7 88static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
8229efda 89 mifi_t mifi, int assert);
b70432f7 90static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
812e44dd 91 int cmd);
b70432f7 92static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
5b285cac
PM
93static int ip6mr_rtm_dumproute(struct sk_buff *skb,
94 struct netlink_callback *cb);
ca8d4794 95static void mroute_clean_tables(struct mr_table *mrt, int flags);
e99e88a9 96static void ipmr_expire_process(struct timer_list *t);
d1db275d
PM
97
98#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
8ffb335e 99#define ip6mr_for_each_table(mrt, net) \
d1db275d
PM
100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
101
7b0db857
YM
102static struct mr_table *ip6mr_mr_table_iter(struct net *net,
103 struct mr_table *mrt)
104{
105 struct mr_table *ret;
106
107 if (!mrt)
108 ret = list_entry_rcu(net->ipv6.mr6_tables.next,
109 struct mr_table, list);
110 else
111 ret = list_entry_rcu(mrt->list.next,
112 struct mr_table, list);
113
114 if (&ret->list == &net->ipv6.mr6_tables)
115 return NULL;
116 return ret;
117}
118
b70432f7 119static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
d1db275d 120{
b70432f7 121 struct mr_table *mrt;
d1db275d
PM
122
123 ip6mr_for_each_table(mrt, net) {
124 if (mrt->id == id)
125 return mrt;
126 }
127 return NULL;
128}
129
4c9483b2 130static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
b70432f7 131 struct mr_table **mrt)
d1db275d 132{
d1db275d 133 int err;
95f4a45d
HFS
134 struct ip6mr_result res;
135 struct fib_lookup_arg arg = {
136 .result = &res,
137 .flags = FIB_LOOKUP_NOREF,
138 };
d1db275d 139
e4a38c0c
PR
140 /* update flow if oif or iif point to device enslaved to l3mdev */
141 l3mdev_update_flow(net, flowi6_to_flowi(flp6));
142
4c9483b2
DM
143 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
144 flowi6_to_flowi(flp6), 0, &arg);
d1db275d
PM
145 if (err < 0)
146 return err;
147 *mrt = res.mrt;
148 return 0;
149}
150
151static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
152 int flags, struct fib_lookup_arg *arg)
153{
154 struct ip6mr_result *res = arg->result;
b70432f7 155 struct mr_table *mrt;
d1db275d
PM
156
157 switch (rule->action) {
158 case FR_ACT_TO_TBL:
159 break;
160 case FR_ACT_UNREACHABLE:
161 return -ENETUNREACH;
162 case FR_ACT_PROHIBIT:
163 return -EACCES;
164 case FR_ACT_BLACKHOLE:
165 default:
166 return -EINVAL;
167 }
168
e4a38c0c
PR
169 arg->table = fib_rule_get_table(rule, arg);
170
171 mrt = ip6mr_get_table(rule->fr_net, arg->table);
63159f29 172 if (!mrt)
d1db275d
PM
173 return -EAGAIN;
174 res->mrt = mrt;
175 return 0;
176}
177
178static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
179{
180 return 1;
181}
182
183static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
184 FRA_GENERIC_POLICY,
185};
186
187static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
b16fb418
RP
188 struct fib_rule_hdr *frh, struct nlattr **tb,
189 struct netlink_ext_ack *extack)
d1db275d
PM
190{
191 return 0;
192}
193
194static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
195 struct nlattr **tb)
196{
197 return 1;
198}
199
200static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
201 struct fib_rule_hdr *frh)
202{
203 frh->dst_len = 0;
204 frh->src_len = 0;
205 frh->tos = 0;
206 return 0;
207}
208
04a6f82c 209static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
d1db275d
PM
210 .family = RTNL_FAMILY_IP6MR,
211 .rule_size = sizeof(struct ip6mr_rule),
212 .addr_size = sizeof(struct in6_addr),
213 .action = ip6mr_rule_action,
214 .match = ip6mr_rule_match,
215 .configure = ip6mr_rule_configure,
216 .compare = ip6mr_rule_compare,
d1db275d
PM
217 .fill = ip6mr_rule_fill,
218 .nlgroup = RTNLGRP_IPV6_RULE,
219 .policy = ip6mr_rule_policy,
220 .owner = THIS_MODULE,
221};
222
223static int __net_init ip6mr_rules_init(struct net *net)
224{
225 struct fib_rules_ops *ops;
b70432f7 226 struct mr_table *mrt;
d1db275d
PM
227 int err;
228
229 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
230 if (IS_ERR(ops))
231 return PTR_ERR(ops);
232
233 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
234
235 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
e783bb00
SD
236 if (IS_ERR(mrt)) {
237 err = PTR_ERR(mrt);
d1db275d
PM
238 goto err1;
239 }
240
241 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
242 if (err < 0)
243 goto err2;
244
245 net->ipv6.mr6_rules_ops = ops;
246 return 0;
247
248err2:
f243e5a7 249 ip6mr_free_table(mrt);
d1db275d
PM
250err1:
251 fib_rules_unregister(ops);
252 return err;
253}
254
255static void __net_exit ip6mr_rules_exit(struct net *net)
256{
b70432f7 257 struct mr_table *mrt, *next;
d1db275d 258
905a6f96 259 rtnl_lock();
035320d5
ED
260 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
261 list_del(&mrt->list);
d1db275d 262 ip6mr_free_table(mrt);
035320d5 263 }
d1db275d 264 fib_rules_unregister(net->ipv6.mr6_rules_ops);
419df12f 265 rtnl_unlock();
d1db275d 266}
088aa3ee 267
b7a59557
JP
268static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
269 struct netlink_ext_ack *extack)
088aa3ee 270{
b7a59557 271 return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR, extack);
088aa3ee
YM
272}
273
274static unsigned int ip6mr_rules_seq_read(struct net *net)
275{
276 return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
277}
d3c07e5b
YM
278
279bool ip6mr_rule_default(const struct fib_rule *rule)
280{
281 return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
282 rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
283}
284EXPORT_SYMBOL(ip6mr_rule_default);
d1db275d
PM
285#else
286#define ip6mr_for_each_table(mrt, net) \
287 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
288
7b0db857
YM
289static struct mr_table *ip6mr_mr_table_iter(struct net *net,
290 struct mr_table *mrt)
291{
292 if (!mrt)
293 return net->ipv6.mrt6;
294 return NULL;
295}
296
b70432f7 297static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
d1db275d
PM
298{
299 return net->ipv6.mrt6;
300}
301
4c9483b2 302static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
b70432f7 303 struct mr_table **mrt)
d1db275d
PM
304{
305 *mrt = net->ipv6.mrt6;
306 return 0;
307}
308
309static int __net_init ip6mr_rules_init(struct net *net)
310{
e783bb00
SD
311 struct mr_table *mrt;
312
313 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
314 if (IS_ERR(mrt))
315 return PTR_ERR(mrt);
316 net->ipv6.mrt6 = mrt;
317 return 0;
d1db275d
PM
318}
319
320static void __net_exit ip6mr_rules_exit(struct net *net)
321{
905a6f96 322 rtnl_lock();
d1db275d 323 ip6mr_free_table(net->ipv6.mrt6);
905a6f96
HFS
324 net->ipv6.mrt6 = NULL;
325 rtnl_unlock();
d1db275d 326}
088aa3ee 327
b7a59557
JP
328static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
329 struct netlink_ext_ack *extack)
088aa3ee
YM
330{
331 return 0;
332}
333
334static unsigned int ip6mr_rules_seq_read(struct net *net)
335{
336 return 0;
337}
d1db275d
PM
338#endif
339
87c418bf
YM
340static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
341 const void *ptr)
342{
343 const struct mfc6_cache_cmp_arg *cmparg = arg->key;
344 struct mfc6_cache *c = (struct mfc6_cache *)ptr;
345
346 return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) ||
347 !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin);
348}
349
350static const struct rhashtable_params ip6mr_rht_params = {
494fff56 351 .head_offset = offsetof(struct mr_mfc, mnode),
87c418bf
YM
352 .key_offset = offsetof(struct mfc6_cache, cmparg),
353 .key_len = sizeof(struct mfc6_cache_cmp_arg),
354 .nelem_hint = 3,
87c418bf
YM
355 .obj_cmpfn = ip6mr_hash_cmp,
356 .automatic_shrinking = true,
357};
358
0bbbf0e7
YM
359static void ip6mr_new_table_set(struct mr_table *mrt,
360 struct net *net)
361{
362#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
363 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
364#endif
365}
366
845c9a7a
YM
367static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = {
368 .mf6c_origin = IN6ADDR_ANY_INIT,
369 .mf6c_mcastgrp = IN6ADDR_ANY_INIT,
370};
371
372static struct mr_table_ops ip6mr_mr_table_ops = {
373 .rht_params = &ip6mr_rht_params,
374 .cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
375};
376
b70432f7 377static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
d1db275d 378{
b70432f7 379 struct mr_table *mrt;
d1db275d
PM
380
381 mrt = ip6mr_get_table(net, id);
53b24b8f 382 if (mrt)
d1db275d
PM
383 return mrt;
384
845c9a7a 385 return mr_table_alloc(net, id, &ip6mr_mr_table_ops,
0bbbf0e7 386 ipmr_expire_process, ip6mr_new_table_set);
d1db275d 387}
7bc570c8 388
b70432f7 389static void ip6mr_free_table(struct mr_table *mrt)
d1db275d 390{
7ba0c47c 391 del_timer_sync(&mrt->ipmr_expire_timer);
ca8d4794
CS
392 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
393 MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC);
b70432f7 394 rhltable_destroy(&mrt->mfc_hash);
d1db275d
PM
395 kfree(mrt);
396}
7bc570c8
YH
397
398#ifdef CONFIG_PROC_FS
c8d61968
YM
399/* The /proc interfaces to multicast routing
400 * /proc/ip6_mr_cache /proc/ip6_mr_vif
7bc570c8
YH
401 */
402
7bc570c8
YH
403static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
404 __acquires(mrt_lock)
405{
3feda6b4 406 struct mr_vif_iter *iter = seq->private;
8b90fc7e 407 struct net *net = seq_file_net(seq);
b70432f7 408 struct mr_table *mrt;
d1db275d
PM
409
410 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
63159f29 411 if (!mrt)
d1db275d
PM
412 return ERR_PTR(-ENOENT);
413
414 iter->mrt = mrt;
8b90fc7e 415
7bc570c8 416 read_lock(&mrt_lock);
3feda6b4 417 return mr_vif_seq_start(seq, pos);
7bc570c8
YH
418}
419
420static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
421 __releases(mrt_lock)
422{
423 read_unlock(&mrt_lock);
424}
425
426static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
427{
3feda6b4 428 struct mr_vif_iter *iter = seq->private;
b70432f7 429 struct mr_table *mrt = iter->mrt;
8b90fc7e 430
7bc570c8
YH
431 if (v == SEQ_START_TOKEN) {
432 seq_puts(seq,
433 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
434 } else {
6853f21f 435 const struct vif_device *vif = v;
7bc570c8
YH
436 const char *name = vif->dev ? vif->dev->name : "none";
437
438 seq_printf(seq,
d430a227 439 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
b70432f7 440 vif - mrt->vif_table,
7bc570c8
YH
441 name, vif->bytes_in, vif->pkt_in,
442 vif->bytes_out, vif->pkt_out,
443 vif->flags);
444 }
445 return 0;
446}
447
98147d52 448static const struct seq_operations ip6mr_vif_seq_ops = {
7bc570c8 449 .start = ip6mr_vif_seq_start,
3feda6b4 450 .next = mr_vif_seq_next,
7bc570c8
YH
451 .stop = ip6mr_vif_seq_stop,
452 .show = ip6mr_vif_seq_show,
453};
454
7bc570c8
YH
455static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
456{
8b90fc7e 457 struct net *net = seq_file_net(seq);
b70432f7 458 struct mr_table *mrt;
8b90fc7e 459
d1db275d 460 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
63159f29 461 if (!mrt)
d1db275d
PM
462 return ERR_PTR(-ENOENT);
463
c8d61968 464 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
7bc570c8
YH
465}
466
467static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
468{
469 int n;
470
471 if (v == SEQ_START_TOKEN) {
472 seq_puts(seq,
473 "Group "
474 "Origin "
475 "Iif Pkts Bytes Wrong Oifs\n");
476 } else {
477 const struct mfc6_cache *mfc = v;
c8d61968 478 const struct mr_mfc_iter *it = seq->private;
b70432f7 479 struct mr_table *mrt = it->mrt;
7bc570c8 480
999890b2 481 seq_printf(seq, "%pI6 %pI6 %-3hd",
0c6ce78a 482 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
494fff56 483 mfc->_c.mfc_parent);
7bc570c8 484
b70432f7 485 if (it->cache != &mrt->mfc_unres_queue) {
1ea472e2 486 seq_printf(seq, " %8lu %8lu %8lu",
494fff56
YM
487 mfc->_c.mfc_un.res.pkt,
488 mfc->_c.mfc_un.res.bytes,
489 mfc->_c.mfc_un.res.wrong_if);
490 for (n = mfc->_c.mfc_un.res.minvif;
491 n < mfc->_c.mfc_un.res.maxvif; n++) {
b70432f7 492 if (VIF_EXISTS(mrt, n) &&
494fff56 493 mfc->_c.mfc_un.res.ttls[n] < 255)
7bc570c8 494 seq_printf(seq,
494fff56
YM
495 " %2d:%-3d", n,
496 mfc->_c.mfc_un.res.ttls[n]);
7bc570c8 497 }
1ea472e2
BT
498 } else {
499 /* unresolved mfc_caches don't contain
500 * pkt, bytes and wrong_if values
501 */
502 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
7bc570c8
YH
503 }
504 seq_putc(seq, '\n');
505 }
506 return 0;
507}
508
88e9d34c 509static const struct seq_operations ipmr_mfc_seq_ops = {
7bc570c8 510 .start = ipmr_mfc_seq_start,
c8d61968
YM
511 .next = mr_mfc_seq_next,
512 .stop = mr_mfc_seq_stop,
7bc570c8
YH
513 .show = ipmr_mfc_seq_show,
514};
7bc570c8
YH
515#endif
516
14fb64e1 517#ifdef CONFIG_IPV6_PIMSM_V2
14fb64e1
YH
518
519static int pim6_rcv(struct sk_buff *skb)
520{
521 struct pimreghdr *pim;
522 struct ipv6hdr *encap;
523 struct net_device *reg_dev = NULL;
8229efda 524 struct net *net = dev_net(skb->dev);
b70432f7 525 struct mr_table *mrt;
4c9483b2
DM
526 struct flowi6 fl6 = {
527 .flowi6_iif = skb->dev->ifindex,
528 .flowi6_mark = skb->mark,
d1db275d
PM
529 };
530 int reg_vif_num;
14fb64e1
YH
531
532 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
533 goto drop;
534
535 pim = (struct pimreghdr *)skb_transport_header(skb);
56245cae 536 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
14fb64e1 537 (pim->flags & PIM_NULL_REGISTER) ||
1d6e55f1
TG
538 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
539 sizeof(*pim), IPPROTO_PIM,
540 csum_partial((void *)pim, sizeof(*pim), 0)) &&
ec6b486f 541 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
14fb64e1
YH
542 goto drop;
543
544 /* check if the inner packet is destined to mcast group */
545 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
546 sizeof(*pim));
547
548 if (!ipv6_addr_is_multicast(&encap->daddr) ||
549 encap->payload_len == 0 ||
550 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
551 goto drop;
552
4c9483b2 553 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
d1db275d
PM
554 goto drop;
555 reg_vif_num = mrt->mroute_reg_vif_num;
556
14fb64e1
YH
557 read_lock(&mrt_lock);
558 if (reg_vif_num >= 0)
b70432f7 559 reg_dev = mrt->vif_table[reg_vif_num].dev;
14fb64e1
YH
560 if (reg_dev)
561 dev_hold(reg_dev);
562 read_unlock(&mrt_lock);
563
63159f29 564 if (!reg_dev)
14fb64e1
YH
565 goto drop;
566
567 skb->mac_header = skb->network_header;
568 skb_pull(skb, (u8 *)encap - skb->data);
569 skb_reset_network_header(skb);
1d6e55f1 570 skb->protocol = htons(ETH_P_IPV6);
3e49e6d5 571 skb->ip_summed = CHECKSUM_NONE;
d19d56dd 572
ea23192e 573 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
d19d56dd 574
caf586e5 575 netif_rx(skb);
8990f468 576
14fb64e1
YH
577 dev_put(reg_dev);
578 return 0;
579 drop:
580 kfree_skb(skb);
581 return 0;
582}
583
41135cc8 584static const struct inet6_protocol pim6_protocol = {
14fb64e1
YH
585 .handler = pim6_rcv,
586};
587
588/* Service routines creating virtual interfaces: PIMREG */
589
6fef4c0c
SH
590static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
591 struct net_device *dev)
14fb64e1 592{
8229efda 593 struct net *net = dev_net(dev);
b70432f7 594 struct mr_table *mrt;
4c9483b2
DM
595 struct flowi6 fl6 = {
596 .flowi6_oif = dev->ifindex,
6a662719 597 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
4c9483b2 598 .flowi6_mark = skb->mark,
d1db275d 599 };
d1db275d 600
cb9f1b78
WB
601 if (!pskb_inet_may_pull(skb))
602 goto tx_err;
603
604 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
605 goto tx_err;
8229efda 606
14fb64e1 607 read_lock(&mrt_lock);
dc58c78c
PE
608 dev->stats.tx_bytes += skb->len;
609 dev->stats.tx_packets++;
6bd52143 610 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
14fb64e1
YH
611 read_unlock(&mrt_lock);
612 kfree_skb(skb);
6ed10654 613 return NETDEV_TX_OK;
cb9f1b78
WB
614
615tx_err:
616 dev->stats.tx_errors++;
617 kfree_skb(skb);
618 return NETDEV_TX_OK;
14fb64e1
YH
619}
620
ee9b9596
ND
621static int reg_vif_get_iflink(const struct net_device *dev)
622{
623 return 0;
624}
625
007c3838
SH
626static const struct net_device_ops reg_vif_netdev_ops = {
627 .ndo_start_xmit = reg_vif_xmit,
ee9b9596 628 .ndo_get_iflink = reg_vif_get_iflink,
007c3838
SH
629};
630
14fb64e1
YH
631static void reg_vif_setup(struct net_device *dev)
632{
633 dev->type = ARPHRD_PIMREG;
634 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
635 dev->flags = IFF_NOARP;
007c3838 636 dev->netdev_ops = &reg_vif_netdev_ops;
cf124db5 637 dev->needs_free_netdev = true;
403dbb97 638 dev->features |= NETIF_F_NETNS_LOCAL;
14fb64e1
YH
639}
640
b70432f7 641static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
14fb64e1
YH
642{
643 struct net_device *dev;
d1db275d
PM
644 char name[IFNAMSIZ];
645
646 if (mrt->id == RT6_TABLE_DFLT)
647 sprintf(name, "pim6reg");
648 else
649 sprintf(name, "pim6reg%u", mrt->id);
14fb64e1 650
c835a677 651 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
63159f29 652 if (!dev)
14fb64e1
YH
653 return NULL;
654
8229efda
BT
655 dev_net_set(dev, net);
656
14fb64e1
YH
657 if (register_netdevice(dev)) {
658 free_netdev(dev);
659 return NULL;
660 }
14fb64e1 661
00f54e68 662 if (dev_open(dev, NULL))
14fb64e1
YH
663 goto failure;
664
7af3db78 665 dev_hold(dev);
14fb64e1
YH
666 return dev;
667
668failure:
14fb64e1
YH
669 unregister_netdevice(dev);
670 return NULL;
671}
672#endif
673
088aa3ee
YM
674static int call_ip6mr_vif_entry_notifiers(struct net *net,
675 enum fib_event_type event_type,
676 struct vif_device *vif,
677 mifi_t vif_index, u32 tb_id)
678{
679 return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
680 vif, vif_index, tb_id,
681 &net->ipv6.ipmr_seq);
682}
7bc570c8 683
088aa3ee
YM
684static int call_ip6mr_mfc_entry_notifiers(struct net *net,
685 enum fib_event_type event_type,
686 struct mfc6_cache *mfc, u32 tb_id)
687{
688 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
689 &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
690}
691
692/* Delete a VIF entry */
b70432f7 693static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
723b929c 694 struct list_head *head)
7bc570c8 695{
6853f21f 696 struct vif_device *v;
7bc570c8 697 struct net_device *dev;
1d6e55f1 698 struct inet6_dev *in6_dev;
6bd52143
PM
699
700 if (vifi < 0 || vifi >= mrt->maxvif)
7bc570c8
YH
701 return -EADDRNOTAVAIL;
702
b70432f7 703 v = &mrt->vif_table[vifi];
7bc570c8 704
088aa3ee
YM
705 if (VIF_EXISTS(mrt, vifi))
706 call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
707 FIB_EVENT_VIF_DEL, v, vifi,
708 mrt->id);
709
7bc570c8
YH
710 write_lock_bh(&mrt_lock);
711 dev = v->dev;
712 v->dev = NULL;
713
714 if (!dev) {
715 write_unlock_bh(&mrt_lock);
716 return -EADDRNOTAVAIL;
717 }
718
14fb64e1 719#ifdef CONFIG_IPV6_PIMSM_V2
6bd52143
PM
720 if (vifi == mrt->mroute_reg_vif_num)
721 mrt->mroute_reg_vif_num = -1;
14fb64e1
YH
722#endif
723
6bd52143 724 if (vifi + 1 == mrt->maxvif) {
7bc570c8
YH
725 int tmp;
726 for (tmp = vifi - 1; tmp >= 0; tmp--) {
b70432f7 727 if (VIF_EXISTS(mrt, tmp))
7bc570c8
YH
728 break;
729 }
6bd52143 730 mrt->maxvif = tmp + 1;
7bc570c8
YH
731 }
732
733 write_unlock_bh(&mrt_lock);
734
735 dev_set_allmulti(dev, -1);
736
1d6e55f1 737 in6_dev = __in6_dev_get(dev);
d67b8c61 738 if (in6_dev) {
1d6e55f1 739 in6_dev->cnf.mc_forwarding--;
85b3daad 740 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
d67b8c61
ND
741 NETCONFA_MC_FORWARDING,
742 dev->ifindex, &in6_dev->cnf);
743 }
1d6e55f1 744
723b929c 745 if ((v->flags & MIFF_REGISTER) && !notify)
c871e664 746 unregister_netdevice_queue(dev, head);
7bc570c8
YH
747
748 dev_put(dev);
749 return 0;
750}
751
87c418bf 752static inline void ip6mr_cache_free_rcu(struct rcu_head *head)
58701ad4 753{
494fff56 754 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
87c418bf 755
494fff56 756 kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c);
58701ad4
BT
757}
758
87c418bf
YM
759static inline void ip6mr_cache_free(struct mfc6_cache *c)
760{
494fff56 761 call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu);
87c418bf
YM
762}
763
7bc570c8
YH
764/* Destroy an unresolved cache entry, killing queued skbs
765 and reporting error to netlink readers.
766 */
767
b70432f7 768static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
7bc570c8 769{
6bd52143 770 struct net *net = read_pnet(&mrt->net);
7bc570c8
YH
771 struct sk_buff *skb;
772
6bd52143 773 atomic_dec(&mrt->cache_resolve_queue_len);
7bc570c8 774
494fff56 775 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
7bc570c8 776 if (ipv6_hdr(skb)->version == 0) {
af72868b
JB
777 struct nlmsghdr *nlh = skb_pull(skb,
778 sizeof(struct ipv6hdr));
7bc570c8 779 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 780 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
7bc570c8 781 skb_trim(skb, nlh->nlmsg_len);
573ce260 782 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
15e47304 783 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
7bc570c8
YH
784 } else
785 kfree_skb(skb);
786 }
787
58701ad4 788 ip6mr_cache_free(c);
7bc570c8
YH
789}
790
791
c476efbc 792/* Timer process for all the unresolved queue. */
7bc570c8 793
b70432f7 794static void ipmr_do_expire_process(struct mr_table *mrt)
7bc570c8
YH
795{
796 unsigned long now = jiffies;
797 unsigned long expires = 10 * HZ;
494fff56 798 struct mr_mfc *c, *next;
7bc570c8 799
b70432f7 800 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
7bc570c8
YH
801 if (time_after(c->mfc_un.unres.expires, now)) {
802 /* not yet... */
803 unsigned long interval = c->mfc_un.unres.expires - now;
804 if (interval < expires)
805 expires = interval;
7bc570c8
YH
806 continue;
807 }
808
f30a7784 809 list_del(&c->list);
494fff56
YM
810 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
811 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
7bc570c8
YH
812 }
813
b70432f7 814 if (!list_empty(&mrt->mfc_unres_queue))
6bd52143 815 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
7bc570c8
YH
816}
817
e99e88a9 818static void ipmr_expire_process(struct timer_list *t)
7bc570c8 819{
b70432f7 820 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
c476efbc 821
7bc570c8 822 if (!spin_trylock(&mfc_unres_lock)) {
6bd52143 823 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
7bc570c8
YH
824 return;
825 }
826
b70432f7 827 if (!list_empty(&mrt->mfc_unres_queue))
6bd52143 828 ipmr_do_expire_process(mrt);
7bc570c8
YH
829
830 spin_unlock(&mfc_unres_lock);
831}
832
833/* Fill oifs list. It is called under write locked mrt_lock. */
834
b70432f7 835static void ip6mr_update_thresholds(struct mr_table *mrt,
494fff56 836 struct mr_mfc *cache,
b5aa30b1 837 unsigned char *ttls)
7bc570c8
YH
838{
839 int vifi;
840
6ac7eb08 841 cache->mfc_un.res.minvif = MAXMIFS;
7bc570c8 842 cache->mfc_un.res.maxvif = 0;
6ac7eb08 843 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
7bc570c8 844
6bd52143 845 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
b70432f7 846 if (VIF_EXISTS(mrt, vifi) &&
4e16880c 847 ttls[vifi] && ttls[vifi] < 255) {
7bc570c8
YH
848 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
849 if (cache->mfc_un.res.minvif > vifi)
850 cache->mfc_un.res.minvif = vifi;
851 if (cache->mfc_un.res.maxvif <= vifi)
852 cache->mfc_un.res.maxvif = vifi + 1;
853 }
854 }
90b5ca17 855 cache->mfc_un.res.lastuse = jiffies;
7bc570c8
YH
856}
857
b70432f7 858static int mif6_add(struct net *net, struct mr_table *mrt,
6bd52143 859 struct mif6ctl *vifc, int mrtsock)
7bc570c8
YH
860{
861 int vifi = vifc->mif6c_mifi;
b70432f7 862 struct vif_device *v = &mrt->vif_table[vifi];
7bc570c8 863 struct net_device *dev;
1d6e55f1 864 struct inet6_dev *in6_dev;
5ae7b444 865 int err;
7bc570c8
YH
866
867 /* Is vif busy ? */
b70432f7 868 if (VIF_EXISTS(mrt, vifi))
7bc570c8
YH
869 return -EADDRINUSE;
870
871 switch (vifc->mif6c_flags) {
14fb64e1
YH
872#ifdef CONFIG_IPV6_PIMSM_V2
873 case MIFF_REGISTER:
874 /*
875 * Special Purpose VIF in PIM
876 * All the packets will be sent to the daemon
877 */
6bd52143 878 if (mrt->mroute_reg_vif_num >= 0)
14fb64e1 879 return -EADDRINUSE;
d1db275d 880 dev = ip6mr_reg_vif(net, mrt);
14fb64e1
YH
881 if (!dev)
882 return -ENOBUFS;
5ae7b444
WC
883 err = dev_set_allmulti(dev, 1);
884 if (err) {
885 unregister_netdevice(dev);
7af3db78 886 dev_put(dev);
5ae7b444
WC
887 return err;
888 }
14fb64e1
YH
889 break;
890#endif
7bc570c8 891 case 0:
8229efda 892 dev = dev_get_by_index(net, vifc->mif6c_pifi);
7bc570c8
YH
893 if (!dev)
894 return -EADDRNOTAVAIL;
5ae7b444 895 err = dev_set_allmulti(dev, 1);
7af3db78
WC
896 if (err) {
897 dev_put(dev);
5ae7b444 898 return err;
7af3db78 899 }
7bc570c8
YH
900 break;
901 default:
902 return -EINVAL;
903 }
904
1d6e55f1 905 in6_dev = __in6_dev_get(dev);
d67b8c61 906 if (in6_dev) {
1d6e55f1 907 in6_dev->cnf.mc_forwarding++;
85b3daad 908 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
d67b8c61
ND
909 NETCONFA_MC_FORWARDING,
910 dev->ifindex, &in6_dev->cnf);
911 }
1d6e55f1 912
6853f21f
YM
913 /* Fill in the VIF structures */
914 vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold,
915 vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0),
916 MIFF_REGISTER);
7bc570c8
YH
917
918 /* And finish update writing critical data */
919 write_lock_bh(&mrt_lock);
7bc570c8 920 v->dev = dev;
14fb64e1
YH
921#ifdef CONFIG_IPV6_PIMSM_V2
922 if (v->flags & MIFF_REGISTER)
6bd52143 923 mrt->mroute_reg_vif_num = vifi;
14fb64e1 924#endif
6bd52143
PM
925 if (vifi + 1 > mrt->maxvif)
926 mrt->maxvif = vifi + 1;
7bc570c8 927 write_unlock_bh(&mrt_lock);
088aa3ee
YM
928 call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
929 v, vifi, mrt->id);
7bc570c8
YH
930 return 0;
931}
932
b70432f7 933static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
b71d1d42
ED
934 const struct in6_addr *origin,
935 const struct in6_addr *mcastgrp)
7bc570c8 936{
87c418bf
YM
937 struct mfc6_cache_cmp_arg arg = {
938 .mf6c_origin = *origin,
939 .mf6c_mcastgrp = *mcastgrp,
940 };
87c418bf 941
845c9a7a 942 return mr_mfc_find(mrt, &arg);
660b26dc
ND
943}
944
945/* Look for a (*,G) entry */
b70432f7 946static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt,
660b26dc
ND
947 struct in6_addr *mcastgrp,
948 mifi_t mifi)
949{
87c418bf
YM
950 struct mfc6_cache_cmp_arg arg = {
951 .mf6c_origin = in6addr_any,
952 .mf6c_mcastgrp = *mcastgrp,
953 };
660b26dc
ND
954
955 if (ipv6_addr_any(mcastgrp))
845c9a7a
YM
956 return mr_mfc_find_any_parent(mrt, mifi);
957 return mr_mfc_find_any(mrt, mifi, &arg);
660b26dc
ND
958}
959
87c418bf
YM
960/* Look for a (S,G,iif) entry if parent != -1 */
961static struct mfc6_cache *
b70432f7 962ip6mr_cache_find_parent(struct mr_table *mrt,
87c418bf
YM
963 const struct in6_addr *origin,
964 const struct in6_addr *mcastgrp,
965 int parent)
966{
967 struct mfc6_cache_cmp_arg arg = {
968 .mf6c_origin = *origin,
969 .mf6c_mcastgrp = *mcastgrp,
970 };
87c418bf 971
845c9a7a 972 return mr_mfc_find_parent(mrt, &arg, parent);
87c418bf
YM
973}
974
845c9a7a 975/* Allocate a multicast cache entry */
b5aa30b1 976static struct mfc6_cache *ip6mr_cache_alloc(void)
7bc570c8 977{
36cbac59 978 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
63159f29 979 if (!c)
7bc570c8 980 return NULL;
494fff56
YM
981 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
982 c->_c.mfc_un.res.minvif = MAXMIFS;
8c13af2a
YM
983 c->_c.free = ip6mr_cache_free_rcu;
984 refcount_set(&c->_c.mfc_un.res.refcount, 1);
7bc570c8
YH
985 return c;
986}
987
b5aa30b1 988static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
7bc570c8 989{
36cbac59 990 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
63159f29 991 if (!c)
7bc570c8 992 return NULL;
494fff56
YM
993 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
994 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
7bc570c8
YH
995 return c;
996}
997
998/*
999 * A cache entry has gone into a resolved state from queued
1000 */
1001
b70432f7 1002static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
6bd52143 1003 struct mfc6_cache *uc, struct mfc6_cache *c)
7bc570c8
YH
1004{
1005 struct sk_buff *skb;
1006
1007 /*
1008 * Play the pending entries through our router
1009 */
1010
494fff56 1011 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
7bc570c8 1012 if (ipv6_hdr(skb)->version == 0) {
af72868b
JB
1013 struct nlmsghdr *nlh = skb_pull(skb,
1014 sizeof(struct ipv6hdr));
7bc570c8 1015
7b0db857
YM
1016 if (mr_fill_mroute(mrt, skb, &c->_c,
1017 nlmsg_data(nlh)) > 0) {
549e028d 1018 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
7bc570c8
YH
1019 } else {
1020 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 1021 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
7bc570c8 1022 skb_trim(skb, nlh->nlmsg_len);
573ce260 1023 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
7bc570c8 1024 }
15e47304 1025 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
7bc570c8 1026 } else
e4a38c0c 1027 ip6_mr_forward(net, mrt, skb->dev, skb, c);
7bc570c8
YH
1028 }
1029}
1030
1031/*
dd12d15c 1032 * Bounce a cache query up to pim6sd and netlink.
7bc570c8
YH
1033 *
1034 * Called under mrt_lock.
1035 */
1036
b70432f7 1037static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
6bd52143 1038 mifi_t mifi, int assert)
7bc570c8 1039{
8571ab47 1040 struct sock *mroute6_sk;
7bc570c8
YH
1041 struct sk_buff *skb;
1042 struct mrt6msg *msg;
1043 int ret;
1044
14fb64e1
YH
1045#ifdef CONFIG_IPV6_PIMSM_V2
1046 if (assert == MRT6MSG_WHOLEPKT)
1047 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1048 +sizeof(*msg));
1049 else
1050#endif
1051 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
7bc570c8
YH
1052
1053 if (!skb)
1054 return -ENOBUFS;
1055
1056 /* I suppose that internal messages
1057 * do not require checksums */
1058
1059 skb->ip_summed = CHECKSUM_UNNECESSARY;
1060
14fb64e1
YH
1061#ifdef CONFIG_IPV6_PIMSM_V2
1062 if (assert == MRT6MSG_WHOLEPKT) {
1063 /* Ugly, but we have no choice with this interface.
1064 Duplicate old header, fix length etc.
1065 And all this only to mangle msg->im6_msgtype and
1066 to set msg->im6_mbz to "mbz" :-)
1067 */
1068 skb_push(skb, -skb_network_offset(pkt));
1069
1070 skb_push(skb, sizeof(*msg));
1071 skb_reset_transport_header(skb);
1072 msg = (struct mrt6msg *)skb_transport_header(skb);
1073 msg->im6_mbz = 0;
1074 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
6bd52143 1075 msg->im6_mif = mrt->mroute_reg_vif_num;
14fb64e1 1076 msg->im6_pad = 0;
4e3fd7a0
AD
1077 msg->im6_src = ipv6_hdr(pkt)->saddr;
1078 msg->im6_dst = ipv6_hdr(pkt)->daddr;
14fb64e1
YH
1079
1080 skb->ip_summed = CHECKSUM_UNNECESSARY;
1081 } else
1082#endif
1083 {
7bc570c8
YH
1084 /*
1085 * Copy the IP header
1086 */
1087
1088 skb_put(skb, sizeof(struct ipv6hdr));
1089 skb_reset_network_header(skb);
1090 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1091
1092 /*
1093 * Add our header
1094 */
1095 skb_put(skb, sizeof(*msg));
1096 skb_reset_transport_header(skb);
1097 msg = (struct mrt6msg *)skb_transport_header(skb);
1098
1099 msg->im6_mbz = 0;
1100 msg->im6_msgtype = assert;
6ac7eb08 1101 msg->im6_mif = mifi;
7bc570c8 1102 msg->im6_pad = 0;
4e3fd7a0
AD
1103 msg->im6_src = ipv6_hdr(pkt)->saddr;
1104 msg->im6_dst = ipv6_hdr(pkt)->daddr;
7bc570c8 1105
adf30907 1106 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
7bc570c8 1107 skb->ip_summed = CHECKSUM_UNNECESSARY;
14fb64e1 1108 }
7bc570c8 1109
8571ab47 1110 rcu_read_lock();
b70432f7 1111 mroute6_sk = rcu_dereference(mrt->mroute_sk);
8571ab47
YM
1112 if (!mroute6_sk) {
1113 rcu_read_unlock();
7bc570c8
YH
1114 kfree_skb(skb);
1115 return -EINVAL;
1116 }
1117
dd12d15c
JG
1118 mrt6msg_netlink_event(mrt, skb);
1119
8571ab47
YM
1120 /* Deliver to user space multicast routing algorithms */
1121 ret = sock_queue_rcv_skb(mroute6_sk, skb);
1122 rcu_read_unlock();
bd91b8bf 1123 if (ret < 0) {
e87cc472 1124 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
7bc570c8
YH
1125 kfree_skb(skb);
1126 }
1127
1128 return ret;
1129}
1130
494fff56
YM
1131/* Queue a packet for resolution. It gets locked cache entry! */
1132static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
e4a38c0c 1133 struct sk_buff *skb, struct net_device *dev)
7bc570c8 1134{
494fff56 1135 struct mfc6_cache *c;
f30a7784 1136 bool found = false;
7bc570c8 1137 int err;
7bc570c8
YH
1138
1139 spin_lock_bh(&mfc_unres_lock);
494fff56 1140 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
c476efbc 1141 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
f30a7784
PM
1142 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1143 found = true;
7bc570c8 1144 break;
f30a7784 1145 }
7bc570c8
YH
1146 }
1147
f30a7784 1148 if (!found) {
7bc570c8
YH
1149 /*
1150 * Create a new entry if allowable
1151 */
1152
0079ad8e
HL
1153 c = ip6mr_cache_alloc_unres();
1154 if (!c) {
7bc570c8
YH
1155 spin_unlock_bh(&mfc_unres_lock);
1156
1157 kfree_skb(skb);
1158 return -ENOBUFS;
1159 }
1160
494fff56
YM
1161 /* Fill in the new cache entry */
1162 c->_c.mfc_parent = -1;
7bc570c8
YH
1163 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1164 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1165
1166 /*
1167 * Reflect first query at pim6sd
1168 */
6bd52143 1169 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
8229efda 1170 if (err < 0) {
7bc570c8
YH
1171 /* If the report failed throw the cache entry
1172 out - Brad Parker
1173 */
1174 spin_unlock_bh(&mfc_unres_lock);
1175
58701ad4 1176 ip6mr_cache_free(c);
7bc570c8
YH
1177 kfree_skb(skb);
1178 return err;
1179 }
1180
6bd52143 1181 atomic_inc(&mrt->cache_resolve_queue_len);
494fff56 1182 list_add(&c->_c.list, &mrt->mfc_unres_queue);
812e44dd 1183 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
7bc570c8 1184
6bd52143 1185 ipmr_do_expire_process(mrt);
7bc570c8
YH
1186 }
1187
494fff56
YM
1188 /* See if we can append the packet */
1189 if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
7bc570c8
YH
1190 kfree_skb(skb);
1191 err = -ENOBUFS;
1192 } else {
e4a38c0c
PR
1193 if (dev) {
1194 skb->dev = dev;
1195 skb->skb_iif = dev->ifindex;
1196 }
494fff56 1197 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
7bc570c8
YH
1198 err = 0;
1199 }
1200
1201 spin_unlock_bh(&mfc_unres_lock);
1202 return err;
1203}
1204
1205/*
1206 * MFC6 cache manipulation by user space
1207 */
1208
b70432f7 1209static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
660b26dc 1210 int parent)
7bc570c8 1211{
87c418bf 1212 struct mfc6_cache *c;
7bc570c8 1213
87c418bf
YM
1214 /* The entries are added/deleted only under RTNL */
1215 rcu_read_lock();
1216 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1217 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1218 rcu_read_unlock();
1219 if (!c)
1220 return -ENOENT;
494fff56
YM
1221 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
1222 list_del_rcu(&c->_c.list);
7bc570c8 1223
088aa3ee
YM
1224 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1225 FIB_EVENT_ENTRY_DEL, c, mrt->id);
87c418bf 1226 mr6_netlink_event(mrt, c, RTM_DELROUTE);
8c13af2a 1227 mr_cache_put(&c->_c);
87c418bf 1228 return 0;
7bc570c8
YH
1229}
1230
1231static int ip6mr_device_event(struct notifier_block *this,
1232 unsigned long event, void *ptr)
1233{
351638e7 1234 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
8229efda 1235 struct net *net = dev_net(dev);
b70432f7 1236 struct mr_table *mrt;
6853f21f 1237 struct vif_device *v;
7bc570c8
YH
1238 int ct;
1239
7bc570c8
YH
1240 if (event != NETDEV_UNREGISTER)
1241 return NOTIFY_DONE;
1242
d1db275d 1243 ip6mr_for_each_table(mrt, net) {
b70432f7 1244 v = &mrt->vif_table[0];
d1db275d
PM
1245 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1246 if (v->dev == dev)
723b929c 1247 mif6_delete(mrt, ct, 1, NULL);
d1db275d 1248 }
7bc570c8 1249 }
c871e664 1250
7bc570c8
YH
1251 return NOTIFY_DONE;
1252}
1253
088aa3ee
YM
1254static unsigned int ip6mr_seq_read(struct net *net)
1255{
1256 ASSERT_RTNL();
1257
1258 return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
1259}
1260
b7a59557
JP
1261static int ip6mr_dump(struct net *net, struct notifier_block *nb,
1262 struct netlink_ext_ack *extack)
088aa3ee
YM
1263{
1264 return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
b7a59557 1265 ip6mr_mr_table_iter, &mrt_lock, extack);
088aa3ee
YM
1266}
1267
7bc570c8
YH
1268static struct notifier_block ip6_mr_notifier = {
1269 .notifier_call = ip6mr_device_event
1270};
1271
088aa3ee
YM
1272static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
1273 .family = RTNL_FAMILY_IP6MR,
1274 .fib_seq_read = ip6mr_seq_read,
1275 .fib_dump = ip6mr_dump,
1276 .owner = THIS_MODULE,
1277};
1278
1279static int __net_init ip6mr_notifier_init(struct net *net)
1280{
1281 struct fib_notifier_ops *ops;
1282
1283 net->ipv6.ipmr_seq = 0;
7bc570c8 1284
088aa3ee
YM
1285 ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
1286 if (IS_ERR(ops))
1287 return PTR_ERR(ops);
1288
1289 net->ipv6.ip6mr_notifier_ops = ops;
1290
1291 return 0;
1292}
1293
1294static void __net_exit ip6mr_notifier_exit(struct net *net)
1295{
1296 fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
1297 net->ipv6.ip6mr_notifier_ops = NULL;
1298}
1299
1300/* Setup for IP multicast routing */
4e16880c
BT
1301static int __net_init ip6mr_net_init(struct net *net)
1302{
d1db275d 1303 int err;
f30a7784 1304
088aa3ee
YM
1305 err = ip6mr_notifier_init(net);
1306 if (err)
1307 return err;
1308
d1db275d
PM
1309 err = ip6mr_rules_init(net);
1310 if (err < 0)
088aa3ee 1311 goto ip6mr_rules_fail;
8b90fc7e
BT
1312
1313#ifdef CONFIG_PROC_FS
1314 err = -ENOMEM;
c3506372
CH
1315 if (!proc_create_net("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_seq_ops,
1316 sizeof(struct mr_vif_iter)))
8b90fc7e 1317 goto proc_vif_fail;
c3506372
CH
1318 if (!proc_create_net("ip6_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
1319 sizeof(struct mr_mfc_iter)))
8b90fc7e
BT
1320 goto proc_cache_fail;
1321#endif
6bd52143 1322
4a6258a0
BT
1323 return 0;
1324
8b90fc7e
BT
1325#ifdef CONFIG_PROC_FS
1326proc_cache_fail:
ece31ffd 1327 remove_proc_entry("ip6_mr_vif", net->proc_net);
8b90fc7e 1328proc_vif_fail:
d1db275d 1329 ip6mr_rules_exit(net);
8b90fc7e 1330#endif
088aa3ee
YM
1331ip6mr_rules_fail:
1332 ip6mr_notifier_exit(net);
4e16880c
BT
1333 return err;
1334}
1335
1336static void __net_exit ip6mr_net_exit(struct net *net)
1337{
8b90fc7e 1338#ifdef CONFIG_PROC_FS
ece31ffd
G
1339 remove_proc_entry("ip6_mr_cache", net->proc_net);
1340 remove_proc_entry("ip6_mr_vif", net->proc_net);
8b90fc7e 1341#endif
d1db275d 1342 ip6mr_rules_exit(net);
088aa3ee 1343 ip6mr_notifier_exit(net);
4e16880c
BT
1344}
1345
1346static struct pernet_operations ip6mr_net_ops = {
1347 .init = ip6mr_net_init,
1348 .exit = ip6mr_net_exit,
1349};
1350
623d1a1a 1351int __init ip6_mr_init(void)
7bc570c8 1352{
623d1a1a
WC
1353 int err;
1354
7bc570c8
YH
1355 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1356 sizeof(struct mfc6_cache),
1357 0, SLAB_HWCACHE_ALIGN,
1358 NULL);
1359 if (!mrt_cachep)
623d1a1a 1360 return -ENOMEM;
7bc570c8 1361
4e16880c
BT
1362 err = register_pernet_subsys(&ip6mr_net_ops);
1363 if (err)
1364 goto reg_pernet_fail;
1365
623d1a1a
WC
1366 err = register_netdevice_notifier(&ip6_mr_notifier);
1367 if (err)
1368 goto reg_notif_fail;
403dbb97
TG
1369#ifdef CONFIG_IPV6_PIMSM_V2
1370 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
f3213831 1371 pr_err("%s: can't add PIM protocol\n", __func__);
403dbb97
TG
1372 err = -EAGAIN;
1373 goto add_proto_fail;
1374 }
1375#endif
a3fde2ad
FW
1376 err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
1377 NULL, ip6mr_rtm_dumproute, 0);
1378 if (err == 0)
1379 return 0;
1380
403dbb97 1381#ifdef CONFIG_IPV6_PIMSM_V2
a3fde2ad 1382 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
403dbb97
TG
1383add_proto_fail:
1384 unregister_netdevice_notifier(&ip6_mr_notifier);
1385#endif
87b30a65 1386reg_notif_fail:
4e16880c
BT
1387 unregister_pernet_subsys(&ip6mr_net_ops);
1388reg_pernet_fail:
87b30a65 1389 kmem_cache_destroy(mrt_cachep);
623d1a1a 1390 return err;
7bc570c8
YH
1391}
1392
623d1a1a
WC
1393void ip6_mr_cleanup(void)
1394{
ffb1388a
DJ
1395 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1396#ifdef CONFIG_IPV6_PIMSM_V2
1397 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1398#endif
623d1a1a 1399 unregister_netdevice_notifier(&ip6_mr_notifier);
4e16880c 1400 unregister_pernet_subsys(&ip6mr_net_ops);
623d1a1a
WC
1401 kmem_cache_destroy(mrt_cachep);
1402}
7bc570c8 1403
b70432f7 1404static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
660b26dc 1405 struct mf6cctl *mfc, int mrtsock, int parent)
7bc570c8 1406{
6ac7eb08 1407 unsigned char ttls[MAXMIFS];
87c418bf 1408 struct mfc6_cache *uc, *c;
494fff56 1409 struct mr_mfc *_uc;
87c418bf
YM
1410 bool found;
1411 int i, err;
7bc570c8 1412
a50436f2
PM
1413 if (mfc->mf6cc_parent >= MAXMIFS)
1414 return -ENFILE;
1415
6ac7eb08
RR
1416 memset(ttls, 255, MAXMIFS);
1417 for (i = 0; i < MAXMIFS; i++) {
7bc570c8
YH
1418 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1419 ttls[i] = 1;
7bc570c8
YH
1420 }
1421
87c418bf
YM
1422 /* The entries are added/deleted only under RTNL */
1423 rcu_read_lock();
1424 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1425 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1426 rcu_read_unlock();
1427 if (c) {
7bc570c8 1428 write_lock_bh(&mrt_lock);
494fff56
YM
1429 c->_c.mfc_parent = mfc->mf6cc_parent;
1430 ip6mr_update_thresholds(mrt, &c->_c, ttls);
7bc570c8 1431 if (!mrtsock)
494fff56 1432 c->_c.mfc_flags |= MFC_STATIC;
7bc570c8 1433 write_unlock_bh(&mrt_lock);
088aa3ee
YM
1434 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
1435 c, mrt->id);
812e44dd 1436 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
7bc570c8
YH
1437 return 0;
1438 }
1439
660b26dc
ND
1440 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1441 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
7bc570c8
YH
1442 return -EINVAL;
1443
b5aa30b1 1444 c = ip6mr_cache_alloc();
63159f29 1445 if (!c)
7bc570c8
YH
1446 return -ENOMEM;
1447
1448 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1449 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
494fff56
YM
1450 c->_c.mfc_parent = mfc->mf6cc_parent;
1451 ip6mr_update_thresholds(mrt, &c->_c, ttls);
7bc570c8 1452 if (!mrtsock)
494fff56 1453 c->_c.mfc_flags |= MFC_STATIC;
7bc570c8 1454
494fff56 1455 err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
87c418bf
YM
1456 ip6mr_rht_params);
1457 if (err) {
1458 pr_err("ip6mr: rhtable insert error %d\n", err);
1459 ip6mr_cache_free(c);
1460 return err;
1461 }
494fff56 1462 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
7bc570c8 1463
87c418bf
YM
1464 /* Check to see if we resolved a queued list. If so we
1465 * need to send on the frames and tidy up.
7bc570c8 1466 */
f30a7784 1467 found = false;
7bc570c8 1468 spin_lock_bh(&mfc_unres_lock);
494fff56
YM
1469 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1470 uc = (struct mfc6_cache *)_uc;
c476efbc 1471 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
7bc570c8 1472 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
494fff56 1473 list_del(&_uc->list);
6bd52143 1474 atomic_dec(&mrt->cache_resolve_queue_len);
f30a7784 1475 found = true;
7bc570c8
YH
1476 break;
1477 }
1478 }
b70432f7 1479 if (list_empty(&mrt->mfc_unres_queue))
6bd52143 1480 del_timer(&mrt->ipmr_expire_timer);
7bc570c8
YH
1481 spin_unlock_bh(&mfc_unres_lock);
1482
f30a7784 1483 if (found) {
6bd52143 1484 ip6mr_cache_resolve(net, mrt, uc, c);
58701ad4 1485 ip6mr_cache_free(uc);
7bc570c8 1486 }
088aa3ee
YM
1487 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
1488 c, mrt->id);
812e44dd 1489 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
7bc570c8
YH
1490 return 0;
1491}
1492
1493/*
1494 * Close the multicast socket, and clear the vif tables etc
1495 */
1496
ca8d4794 1497static void mroute_clean_tables(struct mr_table *mrt, int flags)
7bc570c8 1498{
494fff56 1499 struct mr_mfc *c, *tmp;
c871e664 1500 LIST_HEAD(list);
87c418bf 1501 int i;
7bc570c8 1502
87c418bf 1503 /* Shut down all active vif entries */
ca8d4794
CS
1504 if (flags & (MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC)) {
1505 for (i = 0; i < mrt->maxvif; i++) {
1506 if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1507 !(flags & MRT6_FLUSH_MIFS_STATIC)) ||
1508 (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT6_FLUSH_MIFS)))
1509 continue;
1510 mif6_delete(mrt, i, 0, &list);
1511 }
1512 unregister_netdevice_many(&list);
7bc570c8
YH
1513 }
1514
87c418bf 1515 /* Wipe the cache */
ca8d4794
CS
1516 if (flags & (MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC)) {
1517 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1518 if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) ||
1519 (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC)))
1520 continue;
1521 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1522 list_del_rcu(&c->list);
1523 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1524 FIB_EVENT_ENTRY_DEL,
1525 (struct mfc6_cache *)c, mrt->id);
1526 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1527 mr_cache_put(c);
1528 }
7bc570c8
YH
1529 }
1530
ca8d4794
CS
1531 if (flags & MRT6_FLUSH_MFC) {
1532 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1533 spin_lock_bh(&mfc_unres_lock);
1534 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1535 list_del(&c->list);
1536 mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1537 RTM_DELROUTE);
1538 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
1539 }
1540 spin_unlock_bh(&mfc_unres_lock);
7bc570c8 1541 }
7bc570c8
YH
1542 }
1543}
1544
b70432f7 1545static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
7bc570c8
YH
1546{
1547 int err = 0;
8229efda 1548 struct net *net = sock_net(sk);
7bc570c8
YH
1549
1550 rtnl_lock();
1551 write_lock_bh(&mrt_lock);
b70432f7 1552 if (rtnl_dereference(mrt->mroute_sk)) {
7bc570c8 1553 err = -EADDRINUSE;
8571ab47 1554 } else {
b70432f7 1555 rcu_assign_pointer(mrt->mroute_sk, sk);
a366e300 1556 sock_set_flag(sk, SOCK_RCU_FREE);
8571ab47 1557 net->ipv6.devconf_all->mc_forwarding++;
927265bc 1558 }
7bc570c8
YH
1559 write_unlock_bh(&mrt_lock);
1560
927265bc 1561 if (!err)
85b3daad
DA
1562 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1563 NETCONFA_MC_FORWARDING,
927265bc
ED
1564 NETCONFA_IFINDEX_ALL,
1565 net->ipv6.devconf_all);
7bc570c8
YH
1566 rtnl_unlock();
1567
1568 return err;
1569}
1570
1571int ip6mr_sk_done(struct sock *sk)
1572{
d1db275d 1573 int err = -EACCES;
8229efda 1574 struct net *net = sock_net(sk);
b70432f7 1575 struct mr_table *mrt;
7bc570c8 1576
338d182f
FR
1577 if (sk->sk_type != SOCK_RAW ||
1578 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1579 return err;
1580
7bc570c8 1581 rtnl_lock();
d1db275d 1582 ip6mr_for_each_table(mrt, net) {
b70432f7 1583 if (sk == rtnl_dereference(mrt->mroute_sk)) {
d1db275d 1584 write_lock_bh(&mrt_lock);
b70432f7 1585 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
a366e300
ED
1586 /* Note that mroute_sk had SOCK_RCU_FREE set,
1587 * so the RCU grace period before sk freeing
1588 * is guaranteed by sk_destruct()
1589 */
d1db275d 1590 net->ipv6.devconf_all->mc_forwarding--;
927265bc 1591 write_unlock_bh(&mrt_lock);
85b3daad 1592 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
d67b8c61
ND
1593 NETCONFA_MC_FORWARDING,
1594 NETCONFA_IFINDEX_ALL,
1595 net->ipv6.devconf_all);
7bc570c8 1596
ca8d4794 1597 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MFC);
d1db275d
PM
1598 err = 0;
1599 break;
1600 }
1601 }
7bc570c8
YH
1602 rtnl_unlock();
1603
1604 return err;
1605}
1606
8571ab47 1607bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
6bd52143 1608{
b70432f7 1609 struct mr_table *mrt;
4c9483b2 1610 struct flowi6 fl6 = {
e374c618 1611 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
4c9483b2
DM
1612 .flowi6_oif = skb->dev->ifindex,
1613 .flowi6_mark = skb->mark,
d1db275d
PM
1614 };
1615
4c9483b2 1616 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
d1db275d 1617 return NULL;
6bd52143 1618
b70432f7 1619 return rcu_access_pointer(mrt->mroute_sk);
6bd52143 1620}
8571ab47 1621EXPORT_SYMBOL(mroute6_is_socket);
6bd52143 1622
7bc570c8
YH
1623/*
1624 * Socket options and virtual interface manipulation. The whole
1625 * virtual interface system is a complete heap, but unfortunately
1626 * that's how BSD mrouted happens to think. Maybe one day with a proper
1627 * MOSPF/PIM router set up we can clean this up.
1628 */
1629
b7058842 1630int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
7bc570c8 1631{
660b26dc 1632 int ret, parent = 0;
7bc570c8
YH
1633 struct mif6ctl vif;
1634 struct mf6cctl mfc;
1635 mifi_t mifi;
8229efda 1636 struct net *net = sock_net(sk);
b70432f7 1637 struct mr_table *mrt;
d1db275d 1638
99253eb7
XL
1639 if (sk->sk_type != SOCK_RAW ||
1640 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1641 return -EOPNOTSUPP;
1642
d1db275d 1643 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
63159f29 1644 if (!mrt)
d1db275d 1645 return -ENOENT;
7bc570c8
YH
1646
1647 if (optname != MRT6_INIT) {
b70432f7 1648 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
8571ab47 1649 !ns_capable(net->user_ns, CAP_NET_ADMIN))
7bc570c8
YH
1650 return -EACCES;
1651 }
1652
1653 switch (optname) {
1654 case MRT6_INIT:
7bc570c8
YH
1655 if (optlen < sizeof(int))
1656 return -EINVAL;
1657
6bd52143 1658 return ip6mr_sk_init(mrt, sk);
7bc570c8
YH
1659
1660 case MRT6_DONE:
1661 return ip6mr_sk_done(sk);
1662
1663 case MRT6_ADD_MIF:
1664 if (optlen < sizeof(vif))
1665 return -EINVAL;
1666 if (copy_from_user(&vif, optval, sizeof(vif)))
1667 return -EFAULT;
6ac7eb08 1668 if (vif.mif6c_mifi >= MAXMIFS)
7bc570c8
YH
1669 return -ENFILE;
1670 rtnl_lock();
8571ab47 1671 ret = mif6_add(net, mrt, &vif,
b70432f7 1672 sk == rtnl_dereference(mrt->mroute_sk));
7bc570c8
YH
1673 rtnl_unlock();
1674 return ret;
1675
1676 case MRT6_DEL_MIF:
1677 if (optlen < sizeof(mifi_t))
1678 return -EINVAL;
1679 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1680 return -EFAULT;
1681 rtnl_lock();
723b929c 1682 ret = mif6_delete(mrt, mifi, 0, NULL);
7bc570c8
YH
1683 rtnl_unlock();
1684 return ret;
1685
1686 /*
1687 * Manipulate the forwarding caches. These live
1688 * in a sort of kernel/user symbiosis.
1689 */
1690 case MRT6_ADD_MFC:
1691 case MRT6_DEL_MFC:
660b26dc 1692 parent = -1;
275757e6 1693 /* fall through */
660b26dc
ND
1694 case MRT6_ADD_MFC_PROXY:
1695 case MRT6_DEL_MFC_PROXY:
7bc570c8
YH
1696 if (optlen < sizeof(mfc))
1697 return -EINVAL;
1698 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1699 return -EFAULT;
660b26dc
ND
1700 if (parent == 0)
1701 parent = mfc.mf6cc_parent;
7bc570c8 1702 rtnl_lock();
660b26dc
ND
1703 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1704 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
7bc570c8 1705 else
660b26dc 1706 ret = ip6mr_mfc_add(net, mrt, &mfc,
8571ab47 1707 sk ==
b70432f7 1708 rtnl_dereference(mrt->mroute_sk),
8571ab47 1709 parent);
7bc570c8
YH
1710 rtnl_unlock();
1711 return ret;
1712
ca8d4794
CS
1713 case MRT6_FLUSH:
1714 {
1715 int flags;
1716
1717 if (optlen != sizeof(flags))
1718 return -EINVAL;
1719 if (get_user(flags, (int __user *)optval))
1720 return -EFAULT;
1721 rtnl_lock();
1722 mroute_clean_tables(mrt, flags);
1723 rtnl_unlock();
1724 return 0;
1725 }
1726
14fb64e1
YH
1727 /*
1728 * Control PIM assert (to activate pim will activate assert)
1729 */
1730 case MRT6_ASSERT:
1731 {
1732 int v;
03f52a0a
JP
1733
1734 if (optlen != sizeof(v))
1735 return -EINVAL;
14fb64e1
YH
1736 if (get_user(v, (int __user *)optval))
1737 return -EFAULT;
53d6841d 1738 mrt->mroute_do_assert = v;
14fb64e1
YH
1739 return 0;
1740 }
1741
1742#ifdef CONFIG_IPV6_PIMSM_V2
1743 case MRT6_PIM:
1744 {
a9f83bf3 1745 int v;
03f52a0a
JP
1746
1747 if (optlen != sizeof(v))
1748 return -EINVAL;
14fb64e1
YH
1749 if (get_user(v, (int __user *)optval))
1750 return -EFAULT;
1751 v = !!v;
1752 rtnl_lock();
1753 ret = 0;
6bd52143
PM
1754 if (v != mrt->mroute_do_pim) {
1755 mrt->mroute_do_pim = v;
1756 mrt->mroute_do_assert = v;
14fb64e1
YH
1757 }
1758 rtnl_unlock();
1759 return ret;
1760 }
1761
d1db275d
PM
1762#endif
1763#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1764 case MRT6_TABLE:
1765 {
1766 u32 v;
1767
1768 if (optlen != sizeof(u32))
1769 return -EINVAL;
1770 if (get_user(v, (u32 __user *)optval))
1771 return -EFAULT;
75356a81
DC
1772 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1773 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1774 return -EINVAL;
b70432f7 1775 if (sk == rcu_access_pointer(mrt->mroute_sk))
d1db275d
PM
1776 return -EBUSY;
1777
1778 rtnl_lock();
1779 ret = 0;
e783bb00
SD
1780 mrt = ip6mr_new_table(net, v);
1781 if (IS_ERR(mrt))
1782 ret = PTR_ERR(mrt);
848235ed
SD
1783 else
1784 raw6_sk(sk)->ip6mr_table = v;
d1db275d
PM
1785 rtnl_unlock();
1786 return ret;
1787 }
14fb64e1 1788#endif
7bc570c8 1789 /*
7d120c55 1790 * Spurious command, or MRT6_VERSION which you cannot
7bc570c8
YH
1791 * set.
1792 */
1793 default:
1794 return -ENOPROTOOPT;
1795 }
1796}
1797
1798/*
1799 * Getsock opt support for the multicast routing system.
1800 */
1801
1802int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1803 int __user *optlen)
1804{
1805 int olr;
1806 int val;
8229efda 1807 struct net *net = sock_net(sk);
b70432f7 1808 struct mr_table *mrt;
d1db275d 1809
99253eb7
XL
1810 if (sk->sk_type != SOCK_RAW ||
1811 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1812 return -EOPNOTSUPP;
1813
d1db275d 1814 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
63159f29 1815 if (!mrt)
d1db275d 1816 return -ENOENT;
7bc570c8
YH
1817
1818 switch (optname) {
1819 case MRT6_VERSION:
1820 val = 0x0305;
1821 break;
14fb64e1
YH
1822#ifdef CONFIG_IPV6_PIMSM_V2
1823 case MRT6_PIM:
6bd52143 1824 val = mrt->mroute_do_pim;
14fb64e1
YH
1825 break;
1826#endif
1827 case MRT6_ASSERT:
6bd52143 1828 val = mrt->mroute_do_assert;
14fb64e1 1829 break;
7bc570c8
YH
1830 default:
1831 return -ENOPROTOOPT;
1832 }
1833
1834 if (get_user(olr, optlen))
1835 return -EFAULT;
1836
1837 olr = min_t(int, olr, sizeof(int));
1838 if (olr < 0)
1839 return -EINVAL;
1840
1841 if (put_user(olr, optlen))
1842 return -EFAULT;
1843 if (copy_to_user(optval, &val, olr))
1844 return -EFAULT;
1845 return 0;
1846}
1847
1848/*
1849 * The IP multicast ioctl support routines.
1850 */
1851
1852int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1853{
1854 struct sioc_sg_req6 sr;
1855 struct sioc_mif_req6 vr;
6853f21f 1856 struct vif_device *vif;
7bc570c8 1857 struct mfc6_cache *c;
8229efda 1858 struct net *net = sock_net(sk);
b70432f7 1859 struct mr_table *mrt;
d1db275d
PM
1860
1861 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
63159f29 1862 if (!mrt)
d1db275d 1863 return -ENOENT;
7bc570c8
YH
1864
1865 switch (cmd) {
1866 case SIOCGETMIFCNT_IN6:
1867 if (copy_from_user(&vr, arg, sizeof(vr)))
1868 return -EFAULT;
6bd52143 1869 if (vr.mifi >= mrt->maxvif)
7bc570c8 1870 return -EINVAL;
69d2c867 1871 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
7bc570c8 1872 read_lock(&mrt_lock);
b70432f7
YM
1873 vif = &mrt->vif_table[vr.mifi];
1874 if (VIF_EXISTS(mrt, vr.mifi)) {
7bc570c8
YH
1875 vr.icount = vif->pkt_in;
1876 vr.ocount = vif->pkt_out;
1877 vr.ibytes = vif->bytes_in;
1878 vr.obytes = vif->bytes_out;
1879 read_unlock(&mrt_lock);
1880
1881 if (copy_to_user(arg, &vr, sizeof(vr)))
1882 return -EFAULT;
1883 return 0;
1884 }
1885 read_unlock(&mrt_lock);
1886 return -EADDRNOTAVAIL;
1887 case SIOCGETSGCNT_IN6:
1888 if (copy_from_user(&sr, arg, sizeof(sr)))
1889 return -EFAULT;
1890
87c418bf 1891 rcu_read_lock();
6bd52143 1892 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
7bc570c8 1893 if (c) {
494fff56
YM
1894 sr.pktcnt = c->_c.mfc_un.res.pkt;
1895 sr.bytecnt = c->_c.mfc_un.res.bytes;
1896 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
87c418bf 1897 rcu_read_unlock();
7bc570c8
YH
1898
1899 if (copy_to_user(arg, &sr, sizeof(sr)))
1900 return -EFAULT;
1901 return 0;
1902 }
87c418bf 1903 rcu_read_unlock();
7bc570c8
YH
1904 return -EADDRNOTAVAIL;
1905 default:
1906 return -ENOIOCTLCMD;
1907 }
1908}
1909
e2d57766
DM
1910#ifdef CONFIG_COMPAT
1911struct compat_sioc_sg_req6 {
1912 struct sockaddr_in6 src;
1913 struct sockaddr_in6 grp;
1914 compat_ulong_t pktcnt;
1915 compat_ulong_t bytecnt;
1916 compat_ulong_t wrong_if;
1917};
1918
1919struct compat_sioc_mif_req6 {
1920 mifi_t mifi;
1921 compat_ulong_t icount;
1922 compat_ulong_t ocount;
1923 compat_ulong_t ibytes;
1924 compat_ulong_t obytes;
1925};
1926
1927int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1928{
1929 struct compat_sioc_sg_req6 sr;
1930 struct compat_sioc_mif_req6 vr;
6853f21f 1931 struct vif_device *vif;
e2d57766
DM
1932 struct mfc6_cache *c;
1933 struct net *net = sock_net(sk);
b70432f7 1934 struct mr_table *mrt;
e2d57766
DM
1935
1936 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
63159f29 1937 if (!mrt)
e2d57766
DM
1938 return -ENOENT;
1939
1940 switch (cmd) {
1941 case SIOCGETMIFCNT_IN6:
1942 if (copy_from_user(&vr, arg, sizeof(vr)))
1943 return -EFAULT;
1944 if (vr.mifi >= mrt->maxvif)
1945 return -EINVAL;
69d2c867 1946 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
e2d57766 1947 read_lock(&mrt_lock);
b70432f7
YM
1948 vif = &mrt->vif_table[vr.mifi];
1949 if (VIF_EXISTS(mrt, vr.mifi)) {
e2d57766
DM
1950 vr.icount = vif->pkt_in;
1951 vr.ocount = vif->pkt_out;
1952 vr.ibytes = vif->bytes_in;
1953 vr.obytes = vif->bytes_out;
1954 read_unlock(&mrt_lock);
1955
1956 if (copy_to_user(arg, &vr, sizeof(vr)))
1957 return -EFAULT;
1958 return 0;
1959 }
1960 read_unlock(&mrt_lock);
1961 return -EADDRNOTAVAIL;
1962 case SIOCGETSGCNT_IN6:
1963 if (copy_from_user(&sr, arg, sizeof(sr)))
1964 return -EFAULT;
1965
87c418bf 1966 rcu_read_lock();
e2d57766
DM
1967 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1968 if (c) {
494fff56
YM
1969 sr.pktcnt = c->_c.mfc_un.res.pkt;
1970 sr.bytecnt = c->_c.mfc_un.res.bytes;
1971 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
87c418bf 1972 rcu_read_unlock();
e2d57766
DM
1973
1974 if (copy_to_user(arg, &sr, sizeof(sr)))
1975 return -EFAULT;
1976 return 0;
1977 }
87c418bf 1978 rcu_read_unlock();
e2d57766
DM
1979 return -EADDRNOTAVAIL;
1980 default:
1981 return -ENOIOCTLCMD;
1982 }
1983}
1984#endif
7bc570c8 1985
0c4b51f0 1986static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
7bc570c8 1987{
87c11f1d
IS
1988 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1989 IPSTATS_MIB_OUTFORWDATAGRAMS);
1990 IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
1991 IPSTATS_MIB_OUTOCTETS, skb->len);
13206b6b 1992 return dst_output(net, sk, skb);
7bc570c8
YH
1993}
1994
1995/*
1996 * Processing handlers for ip6mr_forward
1997 */
1998
b70432f7 1999static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
f5c6dfde 2000 struct sk_buff *skb, int vifi)
7bc570c8
YH
2001{
2002 struct ipv6hdr *ipv6h;
b70432f7 2003 struct vif_device *vif = &mrt->vif_table[vifi];
7bc570c8
YH
2004 struct net_device *dev;
2005 struct dst_entry *dst;
4c9483b2 2006 struct flowi6 fl6;
7bc570c8 2007
63159f29 2008 if (!vif->dev)
7bc570c8
YH
2009 goto out_free;
2010
14fb64e1
YH
2011#ifdef CONFIG_IPV6_PIMSM_V2
2012 if (vif->flags & MIFF_REGISTER) {
2013 vif->pkt_out++;
2014 vif->bytes_out += skb->len;
dc58c78c
PE
2015 vif->dev->stats.tx_bytes += skb->len;
2016 vif->dev->stats.tx_packets++;
6bd52143 2017 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
8da73b73 2018 goto out_free;
14fb64e1
YH
2019 }
2020#endif
2021
7bc570c8
YH
2022 ipv6h = ipv6_hdr(skb);
2023
4c9483b2
DM
2024 fl6 = (struct flowi6) {
2025 .flowi6_oif = vif->link,
2026 .daddr = ipv6h->daddr,
7bc570c8
YH
2027 };
2028
4c9483b2 2029 dst = ip6_route_output(net, NULL, &fl6);
5095d64d
RL
2030 if (dst->error) {
2031 dst_release(dst);
7bc570c8 2032 goto out_free;
5095d64d 2033 }
7bc570c8 2034
adf30907
ED
2035 skb_dst_drop(skb);
2036 skb_dst_set(skb, dst);
7bc570c8
YH
2037
2038 /*
2039 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2040 * not only before forwarding, but after forwarding on all output
2041 * interfaces. It is clear, if mrouter runs a multicasting
2042 * program, it should receive packets not depending to what interface
2043 * program is joined.
2044 * If we will not make it, the program will have to join on all
2045 * interfaces. On the other hand, multihoming host (or router, but
2046 * not mrouter) cannot join to more than one interface - it will
2047 * result in receiving multiple packets.
2048 */
2049 dev = vif->dev;
2050 skb->dev = dev;
2051 vif->pkt_out++;
2052 vif->bytes_out += skb->len;
2053
2054 /* We are about to write */
2055 /* XXX: extension headers? */
2056 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2057 goto out_free;
2058
2059 ipv6h = ipv6_hdr(skb);
2060 ipv6h->hop_limit--;
2061
2062 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2063
29a26a56
EB
2064 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2065 net, NULL, skb, skb->dev, dev,
7bc570c8
YH
2066 ip6mr_forward2_finish);
2067
2068out_free:
2069 kfree_skb(skb);
2070 return 0;
2071}
2072
b70432f7 2073static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
7bc570c8
YH
2074{
2075 int ct;
6bd52143
PM
2076
2077 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
b70432f7 2078 if (mrt->vif_table[ct].dev == dev)
7bc570c8
YH
2079 break;
2080 }
2081 return ct;
2082}
2083
b70432f7 2084static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
e4a38c0c
PR
2085 struct net_device *dev, struct sk_buff *skb,
2086 struct mfc6_cache *c)
7bc570c8
YH
2087{
2088 int psend = -1;
2089 int vif, ct;
e4a38c0c 2090 int true_vifi = ip6mr_find_vif(mrt, dev);
7bc570c8 2091
494fff56
YM
2092 vif = c->_c.mfc_parent;
2093 c->_c.mfc_un.res.pkt++;
2094 c->_c.mfc_un.res.bytes += skb->len;
2095 c->_c.mfc_un.res.lastuse = jiffies;
7bc570c8 2096
494fff56 2097 if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
660b26dc
ND
2098 struct mfc6_cache *cache_proxy;
2099
40dc2ca3 2100 /* For an (*,G) entry, we only check that the incoming
660b26dc
ND
2101 * interface is part of the static tree.
2102 */
87c418bf 2103 rcu_read_lock();
845c9a7a 2104 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
660b26dc 2105 if (cache_proxy &&
494fff56 2106 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) {
87c418bf 2107 rcu_read_unlock();
660b26dc 2108 goto forward;
87c418bf
YM
2109 }
2110 rcu_read_unlock();
660b26dc
ND
2111 }
2112
14fb64e1
YH
2113 /*
2114 * Wrong interface: drop packet and (maybe) send PIM assert.
2115 */
e4a38c0c 2116 if (mrt->vif_table[vif].dev != dev) {
494fff56 2117 c->_c.mfc_un.res.wrong_if++;
14fb64e1 2118
6bd52143 2119 if (true_vifi >= 0 && mrt->mroute_do_assert &&
14fb64e1
YH
2120 /* pimsm uses asserts, when switching from RPT to SPT,
2121 so that we cannot check that packet arrived on an oif.
2122 It is bad, but otherwise we would need to move pretty
2123 large chunk of pimd to kernel. Ough... --ANK
2124 */
6bd52143 2125 (mrt->mroute_do_pim ||
494fff56 2126 c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
14fb64e1 2127 time_after(jiffies,
494fff56
YM
2128 c->_c.mfc_un.res.last_assert +
2129 MFC_ASSERT_THRESH)) {
2130 c->_c.mfc_un.res.last_assert = jiffies;
6bd52143 2131 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
14fb64e1
YH
2132 }
2133 goto dont_forward;
2134 }
2135
660b26dc 2136forward:
b70432f7
YM
2137 mrt->vif_table[vif].pkt_in++;
2138 mrt->vif_table[vif].bytes_in += skb->len;
7bc570c8
YH
2139
2140 /*
2141 * Forward the frame
2142 */
494fff56
YM
2143 if (ipv6_addr_any(&c->mf6c_origin) &&
2144 ipv6_addr_any(&c->mf6c_mcastgrp)) {
660b26dc 2145 if (true_vifi >= 0 &&
494fff56 2146 true_vifi != c->_c.mfc_parent &&
660b26dc 2147 ipv6_hdr(skb)->hop_limit >
494fff56 2148 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
660b26dc
ND
2149 /* It's an (*,*) entry and the packet is not coming from
2150 * the upstream: forward the packet to the upstream
2151 * only.
2152 */
494fff56 2153 psend = c->_c.mfc_parent;
660b26dc
ND
2154 goto last_forward;
2155 }
2156 goto dont_forward;
2157 }
494fff56
YM
2158 for (ct = c->_c.mfc_un.res.maxvif - 1;
2159 ct >= c->_c.mfc_un.res.minvif; ct--) {
660b26dc 2160 /* For (*,G) entry, don't forward to the incoming interface */
494fff56
YM
2161 if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) &&
2162 ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
7bc570c8
YH
2163 if (psend != -1) {
2164 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2165 if (skb2)
f5c6dfde 2166 ip6mr_forward2(net, mrt, skb2, psend);
7bc570c8
YH
2167 }
2168 psend = ct;
2169 }
2170 }
660b26dc 2171last_forward:
7bc570c8 2172 if (psend != -1) {
f5c6dfde 2173 ip6mr_forward2(net, mrt, skb, psend);
2b52c3ad 2174 return;
7bc570c8
YH
2175 }
2176
14fb64e1 2177dont_forward:
7bc570c8 2178 kfree_skb(skb);
7bc570c8
YH
2179}
2180
2181
2182/*
2183 * Multicast packets for forwarding arrive here
2184 */
2185
2186int ip6_mr_input(struct sk_buff *skb)
2187{
2188 struct mfc6_cache *cache;
8229efda 2189 struct net *net = dev_net(skb->dev);
b70432f7 2190 struct mr_table *mrt;
4c9483b2
DM
2191 struct flowi6 fl6 = {
2192 .flowi6_iif = skb->dev->ifindex,
2193 .flowi6_mark = skb->mark,
d1db275d
PM
2194 };
2195 int err;
e4a38c0c
PR
2196 struct net_device *dev;
2197
2198 /* skb->dev passed in is the master dev for vrfs.
2199 * Get the proper interface that does have a vif associated with it.
2200 */
2201 dev = skb->dev;
2202 if (netif_is_l3_master(skb->dev)) {
2203 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2204 if (!dev) {
2205 kfree_skb(skb);
2206 return -ENODEV;
2207 }
2208 }
d1db275d 2209
4c9483b2 2210 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2015de5f
BG
2211 if (err < 0) {
2212 kfree_skb(skb);
d1db275d 2213 return err;
2015de5f 2214 }
7bc570c8
YH
2215
2216 read_lock(&mrt_lock);
6bd52143 2217 cache = ip6mr_cache_find(mrt,
8229efda 2218 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
63159f29 2219 if (!cache) {
e4a38c0c 2220 int vif = ip6mr_find_vif(mrt, dev);
660b26dc
ND
2221
2222 if (vif >= 0)
2223 cache = ip6mr_cache_find_any(mrt,
2224 &ipv6_hdr(skb)->daddr,
2225 vif);
2226 }
7bc570c8
YH
2227
2228 /*
2229 * No usable cache entry
2230 */
63159f29 2231 if (!cache) {
7bc570c8
YH
2232 int vif;
2233
e4a38c0c 2234 vif = ip6mr_find_vif(mrt, dev);
7bc570c8 2235 if (vif >= 0) {
e4a38c0c 2236 int err = ip6mr_cache_unresolved(mrt, vif, skb, dev);
7bc570c8
YH
2237 read_unlock(&mrt_lock);
2238
2239 return err;
2240 }
2241 read_unlock(&mrt_lock);
2242 kfree_skb(skb);
2243 return -ENODEV;
2244 }
2245
e4a38c0c 2246 ip6_mr_forward(net, mrt, dev, skb, cache);
7bc570c8
YH
2247
2248 read_unlock(&mrt_lock);
2249
2250 return 0;
2251}
2252
2cf75070 2253int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
fd61c6ba 2254 u32 portid)
7bc570c8
YH
2255{
2256 int err;
b70432f7 2257 struct mr_table *mrt;
7bc570c8 2258 struct mfc6_cache *cache;
adf30907 2259 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
7bc570c8 2260
d1db275d 2261 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
63159f29 2262 if (!mrt)
d1db275d
PM
2263 return -ENOENT;
2264
7bc570c8 2265 read_lock(&mrt_lock);
6bd52143 2266 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
660b26dc
ND
2267 if (!cache && skb->dev) {
2268 int vif = ip6mr_find_vif(mrt, skb->dev);
2269
2270 if (vif >= 0)
2271 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2272 vif);
2273 }
7bc570c8
YH
2274
2275 if (!cache) {
2276 struct sk_buff *skb2;
2277 struct ipv6hdr *iph;
2278 struct net_device *dev;
2279 int vif;
2280
7bc570c8 2281 dev = skb->dev;
63159f29 2282 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
7bc570c8
YH
2283 read_unlock(&mrt_lock);
2284 return -ENODEV;
2285 }
2286
2287 /* really correct? */
2288 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2289 if (!skb2) {
2290 read_unlock(&mrt_lock);
2291 return -ENOMEM;
2292 }
2293
2cf75070 2294 NETLINK_CB(skb2).portid = portid;
7bc570c8
YH
2295 skb_reset_transport_header(skb2);
2296
2297 skb_put(skb2, sizeof(struct ipv6hdr));
2298 skb_reset_network_header(skb2);
2299
2300 iph = ipv6_hdr(skb2);
2301 iph->version = 0;
2302 iph->priority = 0;
2303 iph->flow_lbl[0] = 0;
2304 iph->flow_lbl[1] = 0;
2305 iph->flow_lbl[2] = 0;
2306 iph->payload_len = 0;
2307 iph->nexthdr = IPPROTO_NONE;
2308 iph->hop_limit = 0;
4e3fd7a0
AD
2309 iph->saddr = rt->rt6i_src.addr;
2310 iph->daddr = rt->rt6i_dst.addr;
7bc570c8 2311
e4a38c0c 2312 err = ip6mr_cache_unresolved(mrt, vif, skb2, dev);
7bc570c8
YH
2313 read_unlock(&mrt_lock);
2314
2315 return err;
2316 }
2317
7b0db857 2318 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
7bc570c8
YH
2319 read_unlock(&mrt_lock);
2320 return err;
2321}
2322
b70432f7 2323static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
f518338b
ND
2324 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2325 int flags)
5b285cac
PM
2326{
2327 struct nlmsghdr *nlh;
2328 struct rtmsg *rtm;
1eb99af5 2329 int err;
5b285cac 2330
f518338b 2331 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
63159f29 2332 if (!nlh)
5b285cac
PM
2333 return -EMSGSIZE;
2334
2335 rtm = nlmsg_data(nlh);
193c1e47 2336 rtm->rtm_family = RTNL_FAMILY_IP6MR;
5b285cac
PM
2337 rtm->rtm_dst_len = 128;
2338 rtm->rtm_src_len = 128;
2339 rtm->rtm_tos = 0;
2340 rtm->rtm_table = mrt->id;
c78679e8
DM
2341 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2342 goto nla_put_failure;
1eb99af5 2343 rtm->rtm_type = RTN_MULTICAST;
5b285cac 2344 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
494fff56 2345 if (c->_c.mfc_flags & MFC_STATIC)
9a68ac72
ND
2346 rtm->rtm_protocol = RTPROT_STATIC;
2347 else
2348 rtm->rtm_protocol = RTPROT_MROUTED;
5b285cac
PM
2349 rtm->rtm_flags = 0;
2350
930345ea
JB
2351 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2352 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
c78679e8 2353 goto nla_put_failure;
7b0db857 2354 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
1eb99af5
ND
2355 /* do not break the dump if cache is unresolved */
2356 if (err < 0 && err != -ENOENT)
5b285cac
PM
2357 goto nla_put_failure;
2358
053c095a
JB
2359 nlmsg_end(skb, nlh);
2360 return 0;
5b285cac
PM
2361
2362nla_put_failure:
2363 nlmsg_cancel(skb, nlh);
2364 return -EMSGSIZE;
2365}
2366
7b0db857
YM
2367static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2368 u32 portid, u32 seq, struct mr_mfc *c,
2369 int cmd, int flags)
2370{
2371 return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
2372 cmd, flags);
2373}
2374
812e44dd
ND
2375static int mr6_msgsize(bool unresolved, int maxvif)
2376{
2377 size_t len =
2378 NLMSG_ALIGN(sizeof(struct rtmsg))
2379 + nla_total_size(4) /* RTA_TABLE */
2380 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2381 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2382 ;
2383
2384 if (!unresolved)
2385 len = len
2386 + nla_total_size(4) /* RTA_IIF */
2387 + nla_total_size(0) /* RTA_MULTIPATH */
2388 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2389 /* RTA_MFC_STATS */
3d6b66c1 2390 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
812e44dd
ND
2391 ;
2392
2393 return len;
2394}
2395
b70432f7 2396static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
812e44dd
ND
2397 int cmd)
2398{
2399 struct net *net = read_pnet(&mrt->net);
2400 struct sk_buff *skb;
2401 int err = -ENOBUFS;
2402
494fff56 2403 skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
812e44dd 2404 GFP_ATOMIC);
63159f29 2405 if (!skb)
812e44dd
ND
2406 goto errout;
2407
f518338b 2408 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
812e44dd
ND
2409 if (err < 0)
2410 goto errout;
2411
2412 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2413 return;
2414
2415errout:
2416 kfree_skb(skb);
2417 if (err < 0)
2418 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2419}
2420
dd12d15c
JG
2421static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2422{
2423 size_t len =
2424 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2425 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2426 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2427 /* IP6MRA_CREPORT_SRC_ADDR */
2428 + nla_total_size(sizeof(struct in6_addr))
2429 /* IP6MRA_CREPORT_DST_ADDR */
2430 + nla_total_size(sizeof(struct in6_addr))
2431 /* IP6MRA_CREPORT_PKT */
2432 + nla_total_size(payloadlen)
2433 ;
2434
2435 return len;
2436}
2437
b70432f7 2438static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
dd12d15c
JG
2439{
2440 struct net *net = read_pnet(&mrt->net);
2441 struct nlmsghdr *nlh;
2442 struct rtgenmsg *rtgenm;
2443 struct mrt6msg *msg;
2444 struct sk_buff *skb;
2445 struct nlattr *nla;
2446 int payloadlen;
2447
2448 payloadlen = pkt->len - sizeof(struct mrt6msg);
2449 msg = (struct mrt6msg *)skb_transport_header(pkt);
2450
2451 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2452 if (!skb)
2453 goto errout;
2454
2455 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2456 sizeof(struct rtgenmsg), 0);
2457 if (!nlh)
2458 goto errout;
2459 rtgenm = nlmsg_data(nlh);
2460 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2461 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2462 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2463 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2464 &msg->im6_src) ||
2465 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2466 &msg->im6_dst))
2467 goto nla_put_failure;
2468
2469 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2470 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2471 nla_data(nla), payloadlen))
2472 goto nla_put_failure;
2473
2474 nlmsg_end(skb, nlh);
2475
2476 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2477 return;
2478
2479nla_put_failure:
2480 nlmsg_cancel(skb, nlh);
2481errout:
2482 kfree_skb(skb);
2483 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2484}
2485
5b285cac
PM
2486static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2487{
e8ba330a 2488 const struct nlmsghdr *nlh = cb->nlh;
4724676d 2489 struct fib_dump_filter filter = {};
cb167893 2490 int err;
e8ba330a
DA
2491
2492 if (cb->strict_check) {
4724676d 2493 err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh,
effe6792 2494 &filter, cb);
e8ba330a
DA
2495 if (err < 0)
2496 return err;
2497 }
2498
cb167893
DA
2499 if (filter.table_id) {
2500 struct mr_table *mrt;
2501
2502 mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
2503 if (!mrt) {
ae677bbb
DA
2504 if (filter.dump_all_families)
2505 return skb->len;
2506
cb167893
DA
2507 NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
2508 return -ENOENT;
2509 }
2510 err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute,
2511 &mfc_unres_lock, &filter);
2512 return skb->len ? : err;
2513 }
2514
7b0db857 2515 return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
cb167893 2516 _ip6mr_fill_mroute, &mfc_unres_lock, &filter);
5b285cac 2517}