]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/ipv4/ipmr.c
sctp: initialize sin6_flowinfo for ipv6 addrs in sctp_inet6addr_event
[mirror_ubuntu-jammy-kernel.git] / net / ipv4 / ipmr.c
CommitLineData
1da177e4
LT
1/*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
113aa838 4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
1da177e4
LT
12 * Fixes:
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
22 * overflow.
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
f77f13e2 25 * Relax this requirement to work with older peers.
1da177e4
LT
26 *
27 */
28
7c0f6ba6 29#include <linux/uaccess.h>
1da177e4 30#include <linux/types.h>
08009a76 31#include <linux/cache.h>
4fc268d2 32#include <linux/capability.h>
1da177e4 33#include <linux/errno.h>
1da177e4
LT
34#include <linux/mm.h>
35#include <linux/kernel.h>
36#include <linux/fcntl.h>
37#include <linux/stat.h>
38#include <linux/socket.h>
39#include <linux/in.h>
40#include <linux/inet.h>
41#include <linux/netdevice.h>
42#include <linux/inetdevice.h>
43#include <linux/igmp.h>
44#include <linux/proc_fs.h>
45#include <linux/seq_file.h>
46#include <linux/mroute.h>
47#include <linux/init.h>
46f25dff 48#include <linux/if_ether.h>
5a0e3ad6 49#include <linux/slab.h>
457c4cbc 50#include <net/net_namespace.h>
1da177e4
LT
51#include <net/ip.h>
52#include <net/protocol.h>
53#include <linux/skbuff.h>
14c85021 54#include <net/route.h>
1da177e4
LT
55#include <net/icmp.h>
56#include <net/udp.h>
57#include <net/raw.h>
58#include <linux/notifier.h>
59#include <linux/if_arp.h>
60#include <linux/netfilter_ipv4.h>
709b46e8 61#include <linux/compat.h>
bc3b2d7f 62#include <linux/export.h>
0eb71a9d 63#include <linux/rhashtable.h>
c5441932 64#include <net/ip_tunnels.h>
1da177e4 65#include <net/checksum.h>
dc5fc579 66#include <net/netlink.h>
f0ad0860 67#include <net/fib_rules.h>
d67b8c61 68#include <linux/netconf.h>
ccbb0aa6 69#include <net/nexthop.h>
5d8b3e69 70#include <net/switchdev.h>
1da177e4 71
f0ad0860
PM
72struct ipmr_rule {
73 struct fib_rule common;
74};
75
76struct ipmr_result {
77 struct mr_table *mrt;
78};
79
1da177e4 80/* Big lock, protecting vif table, mrt cache and mroute socket state.
a8cb16dd 81 * Note that the changes are semaphored via rtnl_lock.
1da177e4
LT
82 */
83
84static DEFINE_RWLOCK(mrt_lock);
85
7ef8f65d 86/* Multicast router control variables */
1da177e4 87
1da177e4
LT
88/* Special spinlock for queue of unresolved entries */
89static DEFINE_SPINLOCK(mfc_unres_lock);
90
91/* We return to original Alan's scheme. Hash table of resolved
a8cb16dd
ED
92 * entries is changed only in process context and protected
93 * with weak lock mrt_lock. Queue of unresolved entries is protected
94 * with strong spinlock mfc_unres_lock.
95 *
96 * In this case data path is free of exclusive locks at all.
1da177e4
LT
97 */
98
08009a76 99static struct kmem_cache *mrt_cachep __ro_after_init;
1da177e4 100
f0ad0860 101static struct mr_table *ipmr_new_table(struct net *net, u32 id);
acbb219d
FR
102static void ipmr_free_table(struct mr_table *mrt);
103
c4854ec8 104static void ip_mr_forward(struct net *net, struct mr_table *mrt,
4b1f0d33
DS
105 struct net_device *dev, struct sk_buff *skb,
106 struct mfc_cache *cache, int local);
0c12295a 107static int ipmr_cache_report(struct mr_table *mrt,
4feb88e5 108 struct sk_buff *pkt, vifi_t vifi, int assert);
8cd3ac9f
ND
109static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
110 int cmd);
5a645dd8 111static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
0e615e96 112static void mroute_clean_tables(struct mr_table *mrt, bool all);
e99e88a9 113static void ipmr_expire_process(struct timer_list *t);
f0ad0860
PM
114
115#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
116#define ipmr_for_each_table(mrt, net) \
117 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
118
7b0db857
YM
119static struct mr_table *ipmr_mr_table_iter(struct net *net,
120 struct mr_table *mrt)
121{
122 struct mr_table *ret;
123
124 if (!mrt)
125 ret = list_entry_rcu(net->ipv4.mr_tables.next,
126 struct mr_table, list);
127 else
128 ret = list_entry_rcu(mrt->list.next,
129 struct mr_table, list);
130
131 if (&ret->list == &net->ipv4.mr_tables)
132 return NULL;
133 return ret;
134}
135
f0ad0860
PM
136static struct mr_table *ipmr_get_table(struct net *net, u32 id)
137{
138 struct mr_table *mrt;
139
140 ipmr_for_each_table(mrt, net) {
141 if (mrt->id == id)
142 return mrt;
143 }
144 return NULL;
145}
146
da91981b 147static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
f0ad0860
PM
148 struct mr_table **mrt)
149{
f0ad0860 150 int err;
95f4a45d
HFS
151 struct ipmr_result res;
152 struct fib_lookup_arg arg = {
153 .result = &res,
154 .flags = FIB_LOOKUP_NOREF,
155 };
f0ad0860 156
e58e4159
DA
157 /* update flow if oif or iif point to device enslaved to l3mdev */
158 l3mdev_update_flow(net, flowi4_to_flowi(flp4));
159
da91981b
DM
160 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
161 flowi4_to_flowi(flp4), 0, &arg);
f0ad0860
PM
162 if (err < 0)
163 return err;
164 *mrt = res.mrt;
165 return 0;
166}
167
168static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
169 int flags, struct fib_lookup_arg *arg)
170{
171 struct ipmr_result *res = arg->result;
172 struct mr_table *mrt;
1da177e4 173
f0ad0860
PM
174 switch (rule->action) {
175 case FR_ACT_TO_TBL:
176 break;
177 case FR_ACT_UNREACHABLE:
178 return -ENETUNREACH;
179 case FR_ACT_PROHIBIT:
180 return -EACCES;
181 case FR_ACT_BLACKHOLE:
182 default:
183 return -EINVAL;
184 }
185
e58e4159
DA
186 arg->table = fib_rule_get_table(rule, arg);
187
188 mrt = ipmr_get_table(rule->fr_net, arg->table);
51456b29 189 if (!mrt)
f0ad0860
PM
190 return -EAGAIN;
191 res->mrt = mrt;
192 return 0;
193}
194
195static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
196{
197 return 1;
198}
199
200static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
201 FRA_GENERIC_POLICY,
202};
203
204static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
b16fb418
RP
205 struct fib_rule_hdr *frh, struct nlattr **tb,
206 struct netlink_ext_ack *extack)
f0ad0860
PM
207{
208 return 0;
209}
210
211static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
212 struct nlattr **tb)
213{
214 return 1;
215}
216
217static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
218 struct fib_rule_hdr *frh)
219{
220 frh->dst_len = 0;
221 frh->src_len = 0;
222 frh->tos = 0;
223 return 0;
224}
225
04a6f82c 226static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
25239cee 227 .family = RTNL_FAMILY_IPMR,
f0ad0860
PM
228 .rule_size = sizeof(struct ipmr_rule),
229 .addr_size = sizeof(u32),
230 .action = ipmr_rule_action,
231 .match = ipmr_rule_match,
232 .configure = ipmr_rule_configure,
233 .compare = ipmr_rule_compare,
f0ad0860
PM
234 .fill = ipmr_rule_fill,
235 .nlgroup = RTNLGRP_IPV4_RULE,
236 .policy = ipmr_rule_policy,
237 .owner = THIS_MODULE,
238};
239
240static int __net_init ipmr_rules_init(struct net *net)
241{
242 struct fib_rules_ops *ops;
243 struct mr_table *mrt;
244 int err;
245
246 ops = fib_rules_register(&ipmr_rules_ops_template, net);
247 if (IS_ERR(ops))
248 return PTR_ERR(ops);
249
250 INIT_LIST_HEAD(&net->ipv4.mr_tables);
251
252 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
1113ebbc
NA
253 if (IS_ERR(mrt)) {
254 err = PTR_ERR(mrt);
f0ad0860
PM
255 goto err1;
256 }
257
258 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
259 if (err < 0)
260 goto err2;
261
262 net->ipv4.mr_rules_ops = ops;
263 return 0;
264
265err2:
f243e5a7 266 ipmr_free_table(mrt);
f0ad0860
PM
267err1:
268 fib_rules_unregister(ops);
269 return err;
270}
271
272static void __net_exit ipmr_rules_exit(struct net *net)
273{
274 struct mr_table *mrt, *next;
275
ed785309 276 rtnl_lock();
035320d5
ED
277 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
278 list_del(&mrt->list);
acbb219d 279 ipmr_free_table(mrt);
035320d5 280 }
f0ad0860 281 fib_rules_unregister(net->ipv4.mr_rules_ops);
419df12f 282 rtnl_unlock();
f0ad0860 283}
4d65b948
YG
284
285static int ipmr_rules_dump(struct net *net, struct notifier_block *nb)
286{
287 return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR);
288}
289
290static unsigned int ipmr_rules_seq_read(struct net *net)
291{
292 return fib_rules_seq_read(net, RTNL_FAMILY_IPMR);
293}
478e4c2f
YG
294
295bool ipmr_rule_default(const struct fib_rule *rule)
296{
297 return fib_rule_matchall(rule) && rule->table == RT_TABLE_DEFAULT;
298}
299EXPORT_SYMBOL(ipmr_rule_default);
f0ad0860
PM
300#else
301#define ipmr_for_each_table(mrt, net) \
302 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
303
7b0db857
YM
304static struct mr_table *ipmr_mr_table_iter(struct net *net,
305 struct mr_table *mrt)
306{
307 if (!mrt)
308 return net->ipv4.mrt;
309 return NULL;
310}
311
f0ad0860
PM
312static struct mr_table *ipmr_get_table(struct net *net, u32 id)
313{
314 return net->ipv4.mrt;
315}
316
da91981b 317static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
f0ad0860
PM
318 struct mr_table **mrt)
319{
320 *mrt = net->ipv4.mrt;
321 return 0;
322}
323
324static int __net_init ipmr_rules_init(struct net *net)
325{
1113ebbc
NA
326 struct mr_table *mrt;
327
328 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
329 if (IS_ERR(mrt))
330 return PTR_ERR(mrt);
331 net->ipv4.mrt = mrt;
332 return 0;
f0ad0860
PM
333}
334
335static void __net_exit ipmr_rules_exit(struct net *net)
336{
ed785309 337 rtnl_lock();
acbb219d 338 ipmr_free_table(net->ipv4.mrt);
ed785309
WC
339 net->ipv4.mrt = NULL;
340 rtnl_unlock();
f0ad0860 341}
4d65b948
YG
342
343static int ipmr_rules_dump(struct net *net, struct notifier_block *nb)
344{
345 return 0;
346}
347
348static unsigned int ipmr_rules_seq_read(struct net *net)
349{
350 return 0;
351}
478e4c2f
YG
352
353bool ipmr_rule_default(const struct fib_rule *rule)
354{
355 return true;
356}
357EXPORT_SYMBOL(ipmr_rule_default);
f0ad0860
PM
358#endif
359
8fb472c0
NA
360static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
361 const void *ptr)
362{
363 const struct mfc_cache_cmp_arg *cmparg = arg->key;
364 struct mfc_cache *c = (struct mfc_cache *)ptr;
365
366 return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
367 cmparg->mfc_origin != c->mfc_origin;
368}
369
370static const struct rhashtable_params ipmr_rht_params = {
494fff56 371 .head_offset = offsetof(struct mr_mfc, mnode),
8fb472c0
NA
372 .key_offset = offsetof(struct mfc_cache, cmparg),
373 .key_len = sizeof(struct mfc_cache_cmp_arg),
374 .nelem_hint = 3,
375 .locks_mul = 1,
376 .obj_cmpfn = ipmr_hash_cmp,
377 .automatic_shrinking = true,
378};
379
0bbbf0e7
YM
380static void ipmr_new_table_set(struct mr_table *mrt,
381 struct net *net)
382{
383#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
384 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
385#endif
386}
387
845c9a7a
YM
388static struct mfc_cache_cmp_arg ipmr_mr_table_ops_cmparg_any = {
389 .mfc_mcastgrp = htonl(INADDR_ANY),
390 .mfc_origin = htonl(INADDR_ANY),
391};
392
393static struct mr_table_ops ipmr_mr_table_ops = {
394 .rht_params = &ipmr_rht_params,
395 .cmparg_any = &ipmr_mr_table_ops_cmparg_any,
396};
397
f0ad0860
PM
398static struct mr_table *ipmr_new_table(struct net *net, u32 id)
399{
400 struct mr_table *mrt;
1da177e4 401
1113ebbc
NA
402 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
403 if (id != RT_TABLE_DEFAULT && id >= 1000000000)
404 return ERR_PTR(-EINVAL);
405
f0ad0860 406 mrt = ipmr_get_table(net, id);
00db4124 407 if (mrt)
f0ad0860
PM
408 return mrt;
409
845c9a7a 410 return mr_table_alloc(net, id, &ipmr_mr_table_ops,
0bbbf0e7 411 ipmr_expire_process, ipmr_new_table_set);
f0ad0860 412}
1da177e4 413
acbb219d
FR
414static void ipmr_free_table(struct mr_table *mrt)
415{
416 del_timer_sync(&mrt->ipmr_expire_timer);
0e615e96 417 mroute_clean_tables(mrt, true);
8fb472c0 418 rhltable_destroy(&mrt->mfc_hash);
acbb219d
FR
419 kfree(mrt);
420}
421
1da177e4
LT
422/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
423
d607032d
WC
424static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
425{
4feb88e5
BT
426 struct net *net = dev_net(dev);
427
d607032d
WC
428 dev_close(dev);
429
4feb88e5 430 dev = __dev_get_by_name(net, "tunl0");
d607032d 431 if (dev) {
5bc3eb7e 432 const struct net_device_ops *ops = dev->netdev_ops;
d607032d 433 struct ifreq ifr;
d607032d
WC
434 struct ip_tunnel_parm p;
435
436 memset(&p, 0, sizeof(p));
437 p.iph.daddr = v->vifc_rmt_addr.s_addr;
438 p.iph.saddr = v->vifc_lcl_addr.s_addr;
439 p.iph.version = 4;
440 p.iph.ihl = 5;
441 p.iph.protocol = IPPROTO_IPIP;
442 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
443 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
444
5bc3eb7e
SH
445 if (ops->ndo_do_ioctl) {
446 mm_segment_t oldfs = get_fs();
447
448 set_fs(KERNEL_DS);
449 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
450 set_fs(oldfs);
451 }
d607032d
WC
452 }
453}
454
a0b47736
NA
455/* Initialize ipmr pimreg/tunnel in_device */
456static bool ipmr_init_vif_indev(const struct net_device *dev)
457{
458 struct in_device *in_dev;
459
460 ASSERT_RTNL();
461
462 in_dev = __in_dev_get_rtnl(dev);
463 if (!in_dev)
464 return false;
465 ipv4_devconf_setall(in_dev);
466 neigh_parms_data_state_setall(in_dev->arp_parms);
467 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
468
469 return true;
470}
471
7ef8f65d 472static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
1da177e4
LT
473{
474 struct net_device *dev;
475
4feb88e5 476 dev = __dev_get_by_name(net, "tunl0");
1da177e4
LT
477
478 if (dev) {
5bc3eb7e 479 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
480 int err;
481 struct ifreq ifr;
1da177e4 482 struct ip_tunnel_parm p;
1da177e4
LT
483
484 memset(&p, 0, sizeof(p));
485 p.iph.daddr = v->vifc_rmt_addr.s_addr;
486 p.iph.saddr = v->vifc_lcl_addr.s_addr;
487 p.iph.version = 4;
488 p.iph.ihl = 5;
489 p.iph.protocol = IPPROTO_IPIP;
490 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
ba93ef74 491 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
1da177e4 492
5bc3eb7e
SH
493 if (ops->ndo_do_ioctl) {
494 mm_segment_t oldfs = get_fs();
495
496 set_fs(KERNEL_DS);
497 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
498 set_fs(oldfs);
a8cb16dd 499 } else {
5bc3eb7e 500 err = -EOPNOTSUPP;
a8cb16dd 501 }
1da177e4
LT
502 dev = NULL;
503
4feb88e5
BT
504 if (err == 0 &&
505 (dev = __dev_get_by_name(net, p.name)) != NULL) {
1da177e4 506 dev->flags |= IFF_MULTICAST;
a0b47736 507 if (!ipmr_init_vif_indev(dev))
1da177e4 508 goto failure;
1da177e4
LT
509 if (dev_open(dev))
510 goto failure;
7dc00c82 511 dev_hold(dev);
1da177e4
LT
512 }
513 }
514 return dev;
515
516failure:
1da177e4
LT
517 unregister_netdevice(dev);
518 return NULL;
519}
520
c316c629 521#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
6fef4c0c 522static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 523{
4feb88e5 524 struct net *net = dev_net(dev);
f0ad0860 525 struct mr_table *mrt;
da91981b
DM
526 struct flowi4 fl4 = {
527 .flowi4_oif = dev->ifindex,
6a662719 528 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
da91981b 529 .flowi4_mark = skb->mark,
f0ad0860
PM
530 };
531 int err;
532
da91981b 533 err = ipmr_fib_lookup(net, &fl4, &mrt);
e40dbc51
BG
534 if (err < 0) {
535 kfree_skb(skb);
f0ad0860 536 return err;
e40dbc51 537 }
4feb88e5 538
1da177e4 539 read_lock(&mrt_lock);
cf3677ae
PE
540 dev->stats.tx_bytes += skb->len;
541 dev->stats.tx_packets++;
0c12295a 542 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
1da177e4
LT
543 read_unlock(&mrt_lock);
544 kfree_skb(skb);
6ed10654 545 return NETDEV_TX_OK;
1da177e4
LT
546}
547
ee9b9596
ND
548static int reg_vif_get_iflink(const struct net_device *dev)
549{
550 return 0;
551}
552
007c3838
SH
553static const struct net_device_ops reg_vif_netdev_ops = {
554 .ndo_start_xmit = reg_vif_xmit,
ee9b9596 555 .ndo_get_iflink = reg_vif_get_iflink,
007c3838
SH
556};
557
1da177e4
LT
558static void reg_vif_setup(struct net_device *dev)
559{
560 dev->type = ARPHRD_PIMREG;
46f25dff 561 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
1da177e4 562 dev->flags = IFF_NOARP;
70cb4a45 563 dev->netdev_ops = &reg_vif_netdev_ops;
cf124db5 564 dev->needs_free_netdev = true;
403dbb97 565 dev->features |= NETIF_F_NETNS_LOCAL;
1da177e4
LT
566}
567
f0ad0860 568static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
1da177e4
LT
569{
570 struct net_device *dev;
f0ad0860 571 char name[IFNAMSIZ];
1da177e4 572
f0ad0860
PM
573 if (mrt->id == RT_TABLE_DEFAULT)
574 sprintf(name, "pimreg");
575 else
576 sprintf(name, "pimreg%u", mrt->id);
1da177e4 577
c835a677 578 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
1da177e4 579
51456b29 580 if (!dev)
1da177e4
LT
581 return NULL;
582
403dbb97
TG
583 dev_net_set(dev, net);
584
1da177e4
LT
585 if (register_netdevice(dev)) {
586 free_netdev(dev);
587 return NULL;
588 }
1da177e4 589
a0b47736 590 if (!ipmr_init_vif_indev(dev))
1da177e4 591 goto failure;
1da177e4
LT
592 if (dev_open(dev))
593 goto failure;
594
7dc00c82
WC
595 dev_hold(dev);
596
1da177e4
LT
597 return dev;
598
599failure:
1da177e4
LT
600 unregister_netdevice(dev);
601 return NULL;
602}
c316c629
NA
603
604/* called with rcu_read_lock() */
605static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
606 unsigned int pimlen)
607{
608 struct net_device *reg_dev = NULL;
609 struct iphdr *encap;
610
611 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
7ef8f65d 612 /* Check that:
c316c629
NA
613 * a. packet is really sent to a multicast group
614 * b. packet is not a NULL-REGISTER
615 * c. packet is not truncated
616 */
617 if (!ipv4_is_multicast(encap->daddr) ||
618 encap->tot_len == 0 ||
619 ntohs(encap->tot_len) + pimlen > skb->len)
620 return 1;
621
622 read_lock(&mrt_lock);
623 if (mrt->mroute_reg_vif_num >= 0)
624 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
625 read_unlock(&mrt_lock);
626
627 if (!reg_dev)
628 return 1;
629
630 skb->mac_header = skb->network_header;
631 skb_pull(skb, (u8 *)encap - skb->data);
632 skb_reset_network_header(skb);
633 skb->protocol = htons(ETH_P_IP);
634 skb->ip_summed = CHECKSUM_NONE;
635
636 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
637
638 netif_rx(skb);
639
640 return NET_RX_SUCCESS;
641}
642#else
643static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
644{
645 return NULL;
646}
1da177e4
LT
647#endif
648
b362053a
YG
649static int call_ipmr_vif_entry_notifiers(struct net *net,
650 enum fib_event_type event_type,
651 struct vif_device *vif,
652 vifi_t vif_index, u32 tb_id)
653{
bc67a0da
YM
654 return mr_call_vif_notifiers(net, RTNL_FAMILY_IPMR, event_type,
655 vif, vif_index, tb_id,
656 &net->ipv4.ipmr_seq);
b362053a
YG
657}
658
b362053a
YG
659static int call_ipmr_mfc_entry_notifiers(struct net *net,
660 enum fib_event_type event_type,
661 struct mfc_cache *mfc, u32 tb_id)
662{
54c4cad9
YM
663 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IPMR, event_type,
664 &mfc->_c, tb_id, &net->ipv4.ipmr_seq);
b362053a
YG
665}
666
2c53040f
BH
667/**
668 * vif_delete - Delete a VIF entry
7dc00c82 669 * @notify: Set to 1, if the caller is a notifier_call
1da177e4 670 */
0c12295a 671static int vif_delete(struct mr_table *mrt, int vifi, int notify,
d17fa6fa 672 struct list_head *head)
1da177e4 673{
b362053a 674 struct net *net = read_pnet(&mrt->net);
1da177e4
LT
675 struct vif_device *v;
676 struct net_device *dev;
677 struct in_device *in_dev;
678
0c12295a 679 if (vifi < 0 || vifi >= mrt->maxvif)
1da177e4
LT
680 return -EADDRNOTAVAIL;
681
0c12295a 682 v = &mrt->vif_table[vifi];
1da177e4 683
b362053a
YG
684 if (VIF_EXISTS(mrt, vifi))
685 call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_DEL, v, vifi,
686 mrt->id);
687
1da177e4
LT
688 write_lock_bh(&mrt_lock);
689 dev = v->dev;
690 v->dev = NULL;
691
692 if (!dev) {
693 write_unlock_bh(&mrt_lock);
694 return -EADDRNOTAVAIL;
695 }
696
0c12295a
PM
697 if (vifi == mrt->mroute_reg_vif_num)
698 mrt->mroute_reg_vif_num = -1;
1da177e4 699
a8cb16dd 700 if (vifi + 1 == mrt->maxvif) {
1da177e4 701 int tmp;
a8cb16dd
ED
702
703 for (tmp = vifi - 1; tmp >= 0; tmp--) {
0c12295a 704 if (VIF_EXISTS(mrt, tmp))
1da177e4
LT
705 break;
706 }
0c12295a 707 mrt->maxvif = tmp+1;
1da177e4
LT
708 }
709
710 write_unlock_bh(&mrt_lock);
711
712 dev_set_allmulti(dev, -1);
713
a8cb16dd
ED
714 in_dev = __in_dev_get_rtnl(dev);
715 if (in_dev) {
42f811b8 716 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
3b022865 717 inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
d67b8c61
ND
718 NETCONFA_MC_FORWARDING,
719 dev->ifindex, &in_dev->cnf);
1da177e4
LT
720 ip_rt_multicast_event(in_dev);
721 }
722
a8cb16dd 723 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
d17fa6fa 724 unregister_netdevice_queue(dev, head);
1da177e4
LT
725
726 dev_put(dev);
727 return 0;
728}
729
a8c9486b 730static void ipmr_cache_free_rcu(struct rcu_head *head)
5c0a66f5 731{
494fff56 732 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
a8c9486b 733
494fff56 734 kmem_cache_free(mrt_cachep, (struct mfc_cache *)c);
5c0a66f5
BT
735}
736
8c13af2a 737static void ipmr_cache_free(struct mfc_cache *c)
a8c9486b 738{
494fff56 739 call_rcu(&c->_c.rcu, ipmr_cache_free_rcu);
a8c9486b
ED
740}
741
1da177e4 742/* Destroy an unresolved cache entry, killing queued skbs
a8cb16dd 743 * and reporting error to netlink readers.
1da177e4 744 */
0c12295a 745static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
1da177e4 746{
8de53dfb 747 struct net *net = read_pnet(&mrt->net);
1da177e4 748 struct sk_buff *skb;
9ef1d4c7 749 struct nlmsgerr *e;
1da177e4 750
0c12295a 751 atomic_dec(&mrt->cache_resolve_queue_len);
1da177e4 752
494fff56 753 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved))) {
eddc9ec5 754 if (ip_hdr(skb)->version == 0) {
af72868b
JB
755 struct nlmsghdr *nlh = skb_pull(skb,
756 sizeof(struct iphdr));
1da177e4 757 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 758 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1da177e4 759 skb_trim(skb, nlh->nlmsg_len);
573ce260 760 e = nlmsg_data(nlh);
9ef1d4c7
PM
761 e->error = -ETIMEDOUT;
762 memset(&e->msg, 0, sizeof(e->msg));
2942e900 763
15e47304 764 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
a8cb16dd 765 } else {
1da177e4 766 kfree_skb(skb);
a8cb16dd 767 }
1da177e4
LT
768 }
769
5c0a66f5 770 ipmr_cache_free(c);
1da177e4
LT
771}
772
e258beb2 773/* Timer process for the unresolved queue. */
e99e88a9 774static void ipmr_expire_process(struct timer_list *t)
1da177e4 775{
e99e88a9 776 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
494fff56 777 struct mr_mfc *c, *next;
1da177e4 778 unsigned long expires;
494fff56 779 unsigned long now;
1da177e4
LT
780
781 if (!spin_trylock(&mfc_unres_lock)) {
0c12295a 782 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
1da177e4
LT
783 return;
784 }
785
0c12295a 786 if (list_empty(&mrt->mfc_unres_queue))
1da177e4
LT
787 goto out;
788
789 now = jiffies;
790 expires = 10*HZ;
1da177e4 791
0c12295a 792 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1da177e4
LT
793 if (time_after(c->mfc_un.unres.expires, now)) {
794 unsigned long interval = c->mfc_un.unres.expires - now;
795 if (interval < expires)
796 expires = interval;
1da177e4
LT
797 continue;
798 }
799
862465f2 800 list_del(&c->list);
494fff56
YM
801 mroute_netlink_event(mrt, (struct mfc_cache *)c, RTM_DELROUTE);
802 ipmr_destroy_unres(mrt, (struct mfc_cache *)c);
1da177e4
LT
803 }
804
0c12295a
PM
805 if (!list_empty(&mrt->mfc_unres_queue))
806 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
1da177e4
LT
807
808out:
809 spin_unlock(&mfc_unres_lock);
810}
811
812/* Fill oifs list. It is called under write locked mrt_lock. */
494fff56 813static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
d658f8a0 814 unsigned char *ttls)
1da177e4
LT
815{
816 int vifi;
817
818 cache->mfc_un.res.minvif = MAXVIFS;
819 cache->mfc_un.res.maxvif = 0;
820 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
821
0c12295a
PM
822 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
823 if (VIF_EXISTS(mrt, vifi) &&
cf958ae3 824 ttls[vifi] && ttls[vifi] < 255) {
1da177e4
LT
825 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
826 if (cache->mfc_un.res.minvif > vifi)
827 cache->mfc_un.res.minvif = vifi;
828 if (cache->mfc_un.res.maxvif <= vifi)
829 cache->mfc_un.res.maxvif = vifi + 1;
830 }
831 }
90b5ca17 832 cache->mfc_un.res.lastuse = jiffies;
1da177e4
LT
833}
834
0c12295a
PM
835static int vif_add(struct net *net, struct mr_table *mrt,
836 struct vifctl *vifc, int mrtsock)
1da177e4
LT
837{
838 int vifi = vifc->vifc_vifi;
5d8b3e69
YG
839 struct switchdev_attr attr = {
840 .id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
841 };
0c12295a 842 struct vif_device *v = &mrt->vif_table[vifi];
1da177e4
LT
843 struct net_device *dev;
844 struct in_device *in_dev;
d607032d 845 int err;
1da177e4
LT
846
847 /* Is vif busy ? */
0c12295a 848 if (VIF_EXISTS(mrt, vifi))
1da177e4
LT
849 return -EADDRINUSE;
850
851 switch (vifc->vifc_flags) {
1da177e4 852 case VIFF_REGISTER:
1973a4ea 853 if (!ipmr_pimsm_enabled())
c316c629
NA
854 return -EINVAL;
855 /* Special Purpose VIF in PIM
1da177e4
LT
856 * All the packets will be sent to the daemon
857 */
0c12295a 858 if (mrt->mroute_reg_vif_num >= 0)
1da177e4 859 return -EADDRINUSE;
f0ad0860 860 dev = ipmr_reg_vif(net, mrt);
1da177e4
LT
861 if (!dev)
862 return -ENOBUFS;
d607032d
WC
863 err = dev_set_allmulti(dev, 1);
864 if (err) {
865 unregister_netdevice(dev);
7dc00c82 866 dev_put(dev);
d607032d
WC
867 return err;
868 }
1da177e4 869 break;
e905a9ed 870 case VIFF_TUNNEL:
4feb88e5 871 dev = ipmr_new_tunnel(net, vifc);
1da177e4
LT
872 if (!dev)
873 return -ENOBUFS;
d607032d
WC
874 err = dev_set_allmulti(dev, 1);
875 if (err) {
876 ipmr_del_tunnel(dev, vifc);
7dc00c82 877 dev_put(dev);
d607032d
WC
878 return err;
879 }
1da177e4 880 break;
ee5e81f0 881 case VIFF_USE_IFINDEX:
1da177e4 882 case 0:
ee5e81f0
I
883 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
884 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
51456b29 885 if (dev && !__in_dev_get_rtnl(dev)) {
ee5e81f0
I
886 dev_put(dev);
887 return -EADDRNOTAVAIL;
888 }
a8cb16dd 889 } else {
ee5e81f0 890 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
a8cb16dd 891 }
1da177e4
LT
892 if (!dev)
893 return -EADDRNOTAVAIL;
d607032d 894 err = dev_set_allmulti(dev, 1);
7dc00c82
WC
895 if (err) {
896 dev_put(dev);
d607032d 897 return err;
7dc00c82 898 }
1da177e4
LT
899 break;
900 default:
901 return -EINVAL;
902 }
903
a8cb16dd
ED
904 in_dev = __in_dev_get_rtnl(dev);
905 if (!in_dev) {
d0490cfd 906 dev_put(dev);
1da177e4 907 return -EADDRNOTAVAIL;
d0490cfd 908 }
42f811b8 909 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
3b022865
DA
910 inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING,
911 dev->ifindex, &in_dev->cnf);
1da177e4
LT
912 ip_rt_multicast_event(in_dev);
913
a8cb16dd 914 /* Fill in the VIF structures */
6853f21f
YM
915 vif_device_init(v, dev, vifc->vifc_rate_limit,
916 vifc->vifc_threshold,
917 vifc->vifc_flags | (!mrtsock ? VIFF_STATIC : 0),
918 (VIFF_TUNNEL | VIFF_REGISTER));
a8cb16dd 919
5d8b3e69
YG
920 attr.orig_dev = dev;
921 if (!switchdev_port_attr_get(dev, &attr)) {
922 memcpy(v->dev_parent_id.id, attr.u.ppid.id, attr.u.ppid.id_len);
923 v->dev_parent_id.id_len = attr.u.ppid.id_len;
924 } else {
925 v->dev_parent_id.id_len = 0;
926 }
6853f21f 927
c354e124
JK
928 v->local = vifc->vifc_lcl_addr.s_addr;
929 v->remote = vifc->vifc_rmt_addr.s_addr;
1da177e4
LT
930
931 /* And finish update writing critical data */
932 write_lock_bh(&mrt_lock);
c354e124 933 v->dev = dev;
a8cb16dd 934 if (v->flags & VIFF_REGISTER)
0c12295a 935 mrt->mroute_reg_vif_num = vifi;
0c12295a
PM
936 if (vifi+1 > mrt->maxvif)
937 mrt->maxvif = vifi+1;
1da177e4 938 write_unlock_bh(&mrt_lock);
b362053a 939 call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, v, vifi, mrt->id);
1da177e4
LT
940 return 0;
941}
942
a8c9486b 943/* called with rcu_read_lock() */
0c12295a 944static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
4feb88e5
BT
945 __be32 origin,
946 __be32 mcastgrp)
1da177e4 947{
8fb472c0
NA
948 struct mfc_cache_cmp_arg arg = {
949 .mfc_mcastgrp = mcastgrp,
950 .mfc_origin = origin
951 };
660b26dc 952
845c9a7a 953 return mr_mfc_find(mrt, &arg);
660b26dc
ND
954}
955
956/* Look for a (*,G) entry */
957static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
958 __be32 mcastgrp, int vifi)
959{
8fb472c0
NA
960 struct mfc_cache_cmp_arg arg = {
961 .mfc_mcastgrp = mcastgrp,
962 .mfc_origin = htonl(INADDR_ANY)
963 };
660b26dc 964
360eb5da 965 if (mcastgrp == htonl(INADDR_ANY))
845c9a7a
YM
966 return mr_mfc_find_any_parent(mrt, vifi);
967 return mr_mfc_find_any(mrt, vifi, &arg);
660b26dc
ND
968}
969
8fb472c0
NA
970/* Look for a (S,G,iif) entry if parent != -1 */
971static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
972 __be32 origin, __be32 mcastgrp,
973 int parent)
974{
975 struct mfc_cache_cmp_arg arg = {
976 .mfc_mcastgrp = mcastgrp,
977 .mfc_origin = origin,
978 };
8fb472c0 979
845c9a7a 980 return mr_mfc_find_parent(mrt, &arg, parent);
8fb472c0
NA
981}
982
7ef8f65d 983/* Allocate a multicast cache entry */
d658f8a0 984static struct mfc_cache *ipmr_cache_alloc(void)
1da177e4 985{
c354e124 986 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
a8c9486b 987
70a0dec4 988 if (c) {
494fff56
YM
989 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
990 c->_c.mfc_un.res.minvif = MAXVIFS;
8c13af2a 991 c->_c.free = ipmr_cache_free_rcu;
494fff56 992 refcount_set(&c->_c.mfc_un.res.refcount, 1);
70a0dec4 993 }
1da177e4
LT
994 return c;
995}
996
d658f8a0 997static struct mfc_cache *ipmr_cache_alloc_unres(void)
1da177e4 998{
c354e124 999 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
a8c9486b
ED
1000
1001 if (c) {
494fff56
YM
1002 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
1003 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
a8c9486b 1004 }
1da177e4
LT
1005 return c;
1006}
1007
7ef8f65d 1008/* A cache entry has gone into a resolved state from queued */
0c12295a
PM
1009static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
1010 struct mfc_cache *uc, struct mfc_cache *c)
1da177e4
LT
1011{
1012 struct sk_buff *skb;
9ef1d4c7 1013 struct nlmsgerr *e;
1da177e4 1014
a8cb16dd 1015 /* Play the pending entries through our router */
494fff56 1016 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
eddc9ec5 1017 if (ip_hdr(skb)->version == 0) {
af72868b
JB
1018 struct nlmsghdr *nlh = skb_pull(skb,
1019 sizeof(struct iphdr));
1da177e4 1020
7b0db857
YM
1021 if (mr_fill_mroute(mrt, skb, &c->_c,
1022 nlmsg_data(nlh)) > 0) {
a8cb16dd
ED
1023 nlh->nlmsg_len = skb_tail_pointer(skb) -
1024 (u8 *)nlh;
1da177e4
LT
1025 } else {
1026 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 1027 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1da177e4 1028 skb_trim(skb, nlh->nlmsg_len);
573ce260 1029 e = nlmsg_data(nlh);
9ef1d4c7
PM
1030 e->error = -EMSGSIZE;
1031 memset(&e->msg, 0, sizeof(e->msg));
1da177e4 1032 }
2942e900 1033
15e47304 1034 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
a8cb16dd 1035 } else {
4b1f0d33 1036 ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
a8cb16dd 1037 }
1da177e4
LT
1038 }
1039}
1040
5a645dd8 1041/* Bounce a cache query up to mrouted and netlink.
1da177e4 1042 *
c316c629 1043 * Called under mrt_lock.
1da177e4 1044 */
0c12295a 1045static int ipmr_cache_report(struct mr_table *mrt,
4feb88e5 1046 struct sk_buff *pkt, vifi_t vifi, int assert)
1da177e4 1047{
c9bdd4b5 1048 const int ihl = ip_hdrlen(pkt);
c316c629 1049 struct sock *mroute_sk;
1da177e4
LT
1050 struct igmphdr *igmp;
1051 struct igmpmsg *msg;
c316c629 1052 struct sk_buff *skb;
1da177e4
LT
1053 int ret;
1054
c921c207 1055 if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE)
1da177e4
LT
1056 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
1057 else
1da177e4
LT
1058 skb = alloc_skb(128, GFP_ATOMIC);
1059
132adf54 1060 if (!skb)
1da177e4
LT
1061 return -ENOBUFS;
1062
c921c207 1063 if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) {
1da177e4 1064 /* Ugly, but we have no choice with this interface.
a8cb16dd
ED
1065 * Duplicate old header, fix ihl, length etc.
1066 * And all this only to mangle msg->im_msgtype and
1067 * to set msg->im_mbz to "mbz" :-)
1da177e4 1068 */
878c8145
ACM
1069 skb_push(skb, sizeof(struct iphdr));
1070 skb_reset_network_header(skb);
badff6d0 1071 skb_reset_transport_header(skb);
0272ffc4 1072 msg = (struct igmpmsg *)skb_network_header(skb);
d56f90a7 1073 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
c921c207 1074 msg->im_msgtype = assert;
1da177e4 1075 msg->im_mbz = 0;
c921c207
NA
1076 if (assert == IGMPMSG_WRVIFWHOLE)
1077 msg->im_vif = vifi;
1078 else
1079 msg->im_vif = mrt->mroute_reg_vif_num;
eddc9ec5
ACM
1080 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
1081 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
1082 sizeof(struct iphdr));
c316c629
NA
1083 } else {
1084 /* Copy the IP header */
1085 skb_set_network_header(skb, skb->len);
1086 skb_put(skb, ihl);
1087 skb_copy_to_linear_data(skb, pkt->data, ihl);
1088 /* Flag to the kernel this is a route add */
1089 ip_hdr(skb)->protocol = 0;
1090 msg = (struct igmpmsg *)skb_network_header(skb);
1091 msg->im_vif = vifi;
1092 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1093 /* Add our header */
4df864c1 1094 igmp = skb_put(skb, sizeof(struct igmphdr));
c316c629
NA
1095 igmp->type = assert;
1096 msg->im_msgtype = assert;
1097 igmp->code = 0;
1098 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1099 skb->transport_header = skb->network_header;
e905a9ed 1100 }
1da177e4 1101
4c968709
ED
1102 rcu_read_lock();
1103 mroute_sk = rcu_dereference(mrt->mroute_sk);
51456b29 1104 if (!mroute_sk) {
4c968709 1105 rcu_read_unlock();
1da177e4
LT
1106 kfree_skb(skb);
1107 return -EINVAL;
1108 }
1109
5a645dd8
JG
1110 igmpmsg_netlink_event(mrt, skb);
1111
a8cb16dd 1112 /* Deliver to mrouted */
4c968709
ED
1113 ret = sock_queue_rcv_skb(mroute_sk, skb);
1114 rcu_read_unlock();
70a269e6 1115 if (ret < 0) {
e87cc472 1116 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1da177e4
LT
1117 kfree_skb(skb);
1118 }
1119
1120 return ret;
1121}
1122
7ef8f65d
NA
1123/* Queue a packet for resolution. It gets locked cache entry! */
1124static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
4b1f0d33 1125 struct sk_buff *skb, struct net_device *dev)
1da177e4 1126{
8fb472c0
NA
1127 const struct iphdr *iph = ip_hdr(skb);
1128 struct mfc_cache *c;
862465f2 1129 bool found = false;
1da177e4 1130 int err;
1da177e4
LT
1131
1132 spin_lock_bh(&mfc_unres_lock);
494fff56 1133 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
e258beb2 1134 if (c->mfc_mcastgrp == iph->daddr &&
862465f2
PM
1135 c->mfc_origin == iph->saddr) {
1136 found = true;
1da177e4 1137 break;
862465f2 1138 }
1da177e4
LT
1139 }
1140
862465f2 1141 if (!found) {
a8cb16dd 1142 /* Create a new entry if allowable */
0c12295a 1143 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
d658f8a0 1144 (c = ipmr_cache_alloc_unres()) == NULL) {
1da177e4
LT
1145 spin_unlock_bh(&mfc_unres_lock);
1146
1147 kfree_skb(skb);
1148 return -ENOBUFS;
1149 }
1150
a8cb16dd 1151 /* Fill in the new cache entry */
494fff56 1152 c->_c.mfc_parent = -1;
eddc9ec5
ACM
1153 c->mfc_origin = iph->saddr;
1154 c->mfc_mcastgrp = iph->daddr;
1da177e4 1155
a8cb16dd 1156 /* Reflect first query at mrouted. */
0c12295a 1157 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
494fff56 1158
4feb88e5 1159 if (err < 0) {
e905a9ed 1160 /* If the report failed throw the cache entry
1da177e4
LT
1161 out - Brad Parker
1162 */
1163 spin_unlock_bh(&mfc_unres_lock);
1164
5c0a66f5 1165 ipmr_cache_free(c);
1da177e4
LT
1166 kfree_skb(skb);
1167 return err;
1168 }
1169
0c12295a 1170 atomic_inc(&mrt->cache_resolve_queue_len);
494fff56 1171 list_add(&c->_c.list, &mrt->mfc_unres_queue);
8cd3ac9f 1172 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4 1173
278554bd 1174 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
494fff56
YM
1175 mod_timer(&mrt->ipmr_expire_timer,
1176 c->_c.mfc_un.unres.expires);
1da177e4
LT
1177 }
1178
a8cb16dd 1179 /* See if we can append the packet */
494fff56 1180 if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1da177e4
LT
1181 kfree_skb(skb);
1182 err = -ENOBUFS;
1183 } else {
4b1f0d33
DS
1184 if (dev) {
1185 skb->dev = dev;
1186 skb->skb_iif = dev->ifindex;
1187 }
494fff56 1188 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1da177e4
LT
1189 err = 0;
1190 }
1191
1192 spin_unlock_bh(&mfc_unres_lock);
1193 return err;
1194}
1195
7ef8f65d 1196/* MFC cache manipulation by user space mroute daemon */
1da177e4 1197
660b26dc 1198static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1da177e4 1199{
b362053a 1200 struct net *net = read_pnet(&mrt->net);
8fb472c0 1201 struct mfc_cache *c;
1da177e4 1202
8fb472c0
NA
1203 /* The entries are added/deleted only under RTNL */
1204 rcu_read_lock();
1205 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1206 mfc->mfcc_mcastgrp.s_addr, parent);
1207 rcu_read_unlock();
1208 if (!c)
1209 return -ENOENT;
494fff56
YM
1210 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ipmr_rht_params);
1211 list_del_rcu(&c->_c.list);
b362053a 1212 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
8fb472c0 1213 mroute_netlink_event(mrt, c, RTM_DELROUTE);
8c13af2a 1214 mr_cache_put(&c->_c);
1da177e4 1215
8fb472c0 1216 return 0;
1da177e4
LT
1217}
1218
0c12295a 1219static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
660b26dc 1220 struct mfcctl *mfc, int mrtsock, int parent)
1da177e4 1221{
862465f2 1222 struct mfc_cache *uc, *c;
494fff56 1223 struct mr_mfc *_uc;
8fb472c0
NA
1224 bool found;
1225 int ret;
1da177e4 1226
a50436f2
PM
1227 if (mfc->mfcc_parent >= MAXVIFS)
1228 return -ENFILE;
1229
8fb472c0
NA
1230 /* The entries are added/deleted only under RTNL */
1231 rcu_read_lock();
1232 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1233 mfc->mfcc_mcastgrp.s_addr, parent);
1234 rcu_read_unlock();
1235 if (c) {
1da177e4 1236 write_lock_bh(&mrt_lock);
494fff56
YM
1237 c->_c.mfc_parent = mfc->mfcc_parent;
1238 ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1da177e4 1239 if (!mrtsock)
494fff56 1240 c->_c.mfc_flags |= MFC_STATIC;
1da177e4 1241 write_unlock_bh(&mrt_lock);
b362053a
YG
1242 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c,
1243 mrt->id);
8cd3ac9f 1244 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4
LT
1245 return 0;
1246 }
1247
360eb5da 1248 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
660b26dc 1249 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1da177e4
LT
1250 return -EINVAL;
1251
d658f8a0 1252 c = ipmr_cache_alloc();
51456b29 1253 if (!c)
1da177e4
LT
1254 return -ENOMEM;
1255
c354e124
JK
1256 c->mfc_origin = mfc->mfcc_origin.s_addr;
1257 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
494fff56
YM
1258 c->_c.mfc_parent = mfc->mfcc_parent;
1259 ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1da177e4 1260 if (!mrtsock)
494fff56 1261 c->_c.mfc_flags |= MFC_STATIC;
1da177e4 1262
494fff56 1263 ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
8fb472c0
NA
1264 ipmr_rht_params);
1265 if (ret) {
1266 pr_err("ipmr: rhtable insert error %d\n", ret);
1267 ipmr_cache_free(c);
1268 return ret;
1269 }
494fff56 1270 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
7ef8f65d
NA
1271 /* Check to see if we resolved a queued list. If so we
1272 * need to send on the frames and tidy up.
1da177e4 1273 */
b0ebb739 1274 found = false;
1da177e4 1275 spin_lock_bh(&mfc_unres_lock);
494fff56
YM
1276 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1277 uc = (struct mfc_cache *)_uc;
e258beb2 1278 if (uc->mfc_origin == c->mfc_origin &&
1da177e4 1279 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
494fff56 1280 list_del(&_uc->list);
0c12295a 1281 atomic_dec(&mrt->cache_resolve_queue_len);
b0ebb739 1282 found = true;
1da177e4
LT
1283 break;
1284 }
1285 }
0c12295a
PM
1286 if (list_empty(&mrt->mfc_unres_queue))
1287 del_timer(&mrt->ipmr_expire_timer);
1da177e4
LT
1288 spin_unlock_bh(&mfc_unres_lock);
1289
b0ebb739 1290 if (found) {
0c12295a 1291 ipmr_cache_resolve(net, mrt, uc, c);
5c0a66f5 1292 ipmr_cache_free(uc);
1da177e4 1293 }
b362053a 1294 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, c, mrt->id);
8cd3ac9f 1295 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4
LT
1296 return 0;
1297}
1298
7ef8f65d 1299/* Close the multicast socket, and clear the vif tables etc */
0e615e96 1300static void mroute_clean_tables(struct mr_table *mrt, bool all)
1da177e4 1301{
b362053a 1302 struct net *net = read_pnet(&mrt->net);
494fff56
YM
1303 struct mr_mfc *c, *tmp;
1304 struct mfc_cache *cache;
d17fa6fa 1305 LIST_HEAD(list);
8fb472c0 1306 int i;
e905a9ed 1307
a8cb16dd 1308 /* Shut down all active vif entries */
0c12295a 1309 for (i = 0; i < mrt->maxvif; i++) {
0e615e96
NA
1310 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1311 continue;
1312 vif_delete(mrt, i, 0, &list);
1da177e4 1313 }
d17fa6fa 1314 unregister_netdevice_many(&list);
1da177e4 1315
a8cb16dd 1316 /* Wipe the cache */
8fb472c0
NA
1317 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1318 if (!all && (c->mfc_flags & MFC_STATIC))
1319 continue;
1320 rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1321 list_del_rcu(&c->list);
494fff56
YM
1322 cache = (struct mfc_cache *)c;
1323 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache,
b362053a 1324 mrt->id);
494fff56 1325 mroute_netlink_event(mrt, cache, RTM_DELROUTE);
8c13af2a 1326 mr_cache_put(c);
1da177e4
LT
1327 }
1328
0c12295a 1329 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1da177e4 1330 spin_lock_bh(&mfc_unres_lock);
8fb472c0 1331 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
862465f2 1332 list_del(&c->list);
494fff56
YM
1333 cache = (struct mfc_cache *)c;
1334 mroute_netlink_event(mrt, cache, RTM_DELROUTE);
1335 ipmr_destroy_unres(mrt, cache);
1da177e4
LT
1336 }
1337 spin_unlock_bh(&mfc_unres_lock);
1338 }
1339}
1340
4c968709
ED
1341/* called from ip_ra_control(), before an RCU grace period,
1342 * we dont need to call synchronize_rcu() here
1343 */
1da177e4
LT
1344static void mrtsock_destruct(struct sock *sk)
1345{
4feb88e5 1346 struct net *net = sock_net(sk);
f0ad0860 1347 struct mr_table *mrt;
4feb88e5 1348
128aaa98 1349 rtnl_lock();
f0ad0860 1350 ipmr_for_each_table(mrt, net) {
4c968709 1351 if (sk == rtnl_dereference(mrt->mroute_sk)) {
f0ad0860 1352 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
3b022865
DA
1353 inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1354 NETCONFA_MC_FORWARDING,
d67b8c61
ND
1355 NETCONFA_IFINDEX_ALL,
1356 net->ipv4.devconf_all);
a9b3cd7f 1357 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
0e615e96 1358 mroute_clean_tables(mrt, false);
f0ad0860 1359 }
1da177e4 1360 }
128aaa98 1361 rtnl_unlock();
1da177e4
LT
1362}
1363
7ef8f65d
NA
1364/* Socket options and virtual interface manipulation. The whole
1365 * virtual interface system is a complete heap, but unfortunately
1366 * that's how BSD mrouted happens to think. Maybe one day with a proper
1367 * MOSPF/PIM router set up we can clean this up.
1da177e4 1368 */
e905a9ed 1369
29e97d21
NA
1370int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1371 unsigned int optlen)
1da177e4 1372{
4feb88e5 1373 struct net *net = sock_net(sk);
29e97d21 1374 int val, ret = 0, parent = 0;
f0ad0860 1375 struct mr_table *mrt;
29e97d21
NA
1376 struct vifctl vif;
1377 struct mfcctl mfc;
c921c207 1378 bool do_wrvifwhole;
29e97d21 1379 u32 uval;
f0ad0860 1380
29e97d21
NA
1381 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1382 rtnl_lock();
5e1859fb 1383 if (sk->sk_type != SOCK_RAW ||
29e97d21
NA
1384 inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1385 ret = -EOPNOTSUPP;
1386 goto out_unlock;
1387 }
5e1859fb 1388
f0ad0860 1389 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
29e97d21
NA
1390 if (!mrt) {
1391 ret = -ENOENT;
1392 goto out_unlock;
1393 }
132adf54 1394 if (optname != MRT_INIT) {
33d480ce 1395 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
29e97d21
NA
1396 !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1397 ret = -EACCES;
1398 goto out_unlock;
1399 }
1da177e4
LT
1400 }
1401
132adf54
SH
1402 switch (optname) {
1403 case MRT_INIT:
42e6b89c 1404 if (optlen != sizeof(int)) {
29e97d21 1405 ret = -EINVAL;
42e6b89c
NA
1406 break;
1407 }
1408 if (rtnl_dereference(mrt->mroute_sk)) {
29e97d21 1409 ret = -EADDRINUSE;
29e97d21 1410 break;
42e6b89c 1411 }
132adf54
SH
1412
1413 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1414 if (ret == 0) {
cf778b00 1415 rcu_assign_pointer(mrt->mroute_sk, sk);
4feb88e5 1416 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
3b022865
DA
1417 inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1418 NETCONFA_MC_FORWARDING,
d67b8c61
ND
1419 NETCONFA_IFINDEX_ALL,
1420 net->ipv4.devconf_all);
132adf54 1421 }
29e97d21 1422 break;
132adf54 1423 case MRT_DONE:
29e97d21
NA
1424 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1425 ret = -EACCES;
1426 } else {
128aaa98
KT
1427 /* We need to unlock here because mrtsock_destruct takes
1428 * care of rtnl itself and we can't change that due to
1429 * the IP_ROUTER_ALERT setsockopt which runs without it.
1430 */
1431 rtnl_unlock();
29e97d21 1432 ret = ip_ra_control(sk, 0, NULL);
128aaa98 1433 goto out;
29e97d21
NA
1434 }
1435 break;
132adf54
SH
1436 case MRT_ADD_VIF:
1437 case MRT_DEL_VIF:
29e97d21
NA
1438 if (optlen != sizeof(vif)) {
1439 ret = -EINVAL;
1440 break;
1441 }
1442 if (copy_from_user(&vif, optval, sizeof(vif))) {
1443 ret = -EFAULT;
1444 break;
1445 }
1446 if (vif.vifc_vifi >= MAXVIFS) {
1447 ret = -ENFILE;
1448 break;
1449 }
c354e124 1450 if (optname == MRT_ADD_VIF) {
4c968709
ED
1451 ret = vif_add(net, mrt, &vif,
1452 sk == rtnl_dereference(mrt->mroute_sk));
132adf54 1453 } else {
0c12295a 1454 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
132adf54 1455 }
29e97d21 1456 break;
7ef8f65d
NA
1457 /* Manipulate the forwarding caches. These live
1458 * in a sort of kernel/user symbiosis.
1459 */
132adf54
SH
1460 case MRT_ADD_MFC:
1461 case MRT_DEL_MFC:
660b26dc 1462 parent = -1;
fcfd6dfa 1463 /* fall through */
660b26dc
ND
1464 case MRT_ADD_MFC_PROXY:
1465 case MRT_DEL_MFC_PROXY:
29e97d21
NA
1466 if (optlen != sizeof(mfc)) {
1467 ret = -EINVAL;
1468 break;
1469 }
1470 if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1471 ret = -EFAULT;
1472 break;
1473 }
660b26dc
ND
1474 if (parent == 0)
1475 parent = mfc.mfcc_parent;
660b26dc
ND
1476 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1477 ret = ipmr_mfc_delete(mrt, &mfc, parent);
132adf54 1478 else
4c968709 1479 ret = ipmr_mfc_add(net, mrt, &mfc,
660b26dc
ND
1480 sk == rtnl_dereference(mrt->mroute_sk),
1481 parent);
29e97d21 1482 break;
7ef8f65d 1483 /* Control PIM assert. */
132adf54 1484 case MRT_ASSERT:
29e97d21
NA
1485 if (optlen != sizeof(val)) {
1486 ret = -EINVAL;
1487 break;
1488 }
1489 if (get_user(val, (int __user *)optval)) {
1490 ret = -EFAULT;
1491 break;
1492 }
1493 mrt->mroute_do_assert = val;
1494 break;
132adf54 1495 case MRT_PIM:
1973a4ea 1496 if (!ipmr_pimsm_enabled()) {
29e97d21
NA
1497 ret = -ENOPROTOOPT;
1498 break;
1499 }
1500 if (optlen != sizeof(val)) {
1501 ret = -EINVAL;
1502 break;
1503 }
1504 if (get_user(val, (int __user *)optval)) {
1505 ret = -EFAULT;
1506 break;
1507 }
ba93ef74 1508
c921c207 1509 do_wrvifwhole = (val == IGMPMSG_WRVIFWHOLE);
29e97d21
NA
1510 val = !!val;
1511 if (val != mrt->mroute_do_pim) {
1512 mrt->mroute_do_pim = val;
1513 mrt->mroute_do_assert = val;
c921c207 1514 mrt->mroute_do_wrvifwhole = do_wrvifwhole;
1da177e4 1515 }
29e97d21 1516 break;
f0ad0860 1517 case MRT_TABLE:
29e97d21
NA
1518 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1519 ret = -ENOPROTOOPT;
1520 break;
1521 }
1522 if (optlen != sizeof(uval)) {
1523 ret = -EINVAL;
1524 break;
1525 }
1526 if (get_user(uval, (u32 __user *)optval)) {
1527 ret = -EFAULT;
1528 break;
1529 }
f0ad0860 1530
4c968709
ED
1531 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1532 ret = -EBUSY;
1533 } else {
29e97d21 1534 mrt = ipmr_new_table(net, uval);
1113ebbc
NA
1535 if (IS_ERR(mrt))
1536 ret = PTR_ERR(mrt);
5e1859fb 1537 else
29e97d21 1538 raw_sk(sk)->ipmr_table = uval;
4c968709 1539 }
29e97d21 1540 break;
7ef8f65d 1541 /* Spurious command, or MRT_VERSION which you cannot set. */
132adf54 1542 default:
29e97d21 1543 ret = -ENOPROTOOPT;
1da177e4 1544 }
29e97d21
NA
1545out_unlock:
1546 rtnl_unlock();
128aaa98 1547out:
29e97d21 1548 return ret;
1da177e4
LT
1549}
1550
7ef8f65d 1551/* Getsock opt support for the multicast routing system. */
c354e124 1552int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1da177e4
LT
1553{
1554 int olr;
1555 int val;
4feb88e5 1556 struct net *net = sock_net(sk);
f0ad0860
PM
1557 struct mr_table *mrt;
1558
5e1859fb
ED
1559 if (sk->sk_type != SOCK_RAW ||
1560 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1561 return -EOPNOTSUPP;
1562
f0ad0860 1563 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1564 if (!mrt)
f0ad0860 1565 return -ENOENT;
1da177e4 1566
fe9ef3ce
NA
1567 switch (optname) {
1568 case MRT_VERSION:
1569 val = 0x0305;
1570 break;
1571 case MRT_PIM:
1973a4ea 1572 if (!ipmr_pimsm_enabled())
fe9ef3ce
NA
1573 return -ENOPROTOOPT;
1574 val = mrt->mroute_do_pim;
1575 break;
1576 case MRT_ASSERT:
1577 val = mrt->mroute_do_assert;
1578 break;
1579 default:
1da177e4 1580 return -ENOPROTOOPT;
fe9ef3ce 1581 }
1da177e4
LT
1582
1583 if (get_user(olr, optlen))
1584 return -EFAULT;
1da177e4
LT
1585 olr = min_t(unsigned int, olr, sizeof(int));
1586 if (olr < 0)
1587 return -EINVAL;
c354e124 1588 if (put_user(olr, optlen))
1da177e4 1589 return -EFAULT;
c354e124 1590 if (copy_to_user(optval, &val, olr))
1da177e4
LT
1591 return -EFAULT;
1592 return 0;
1593}
1594
7ef8f65d 1595/* The IP multicast ioctl support routines. */
1da177e4
LT
1596int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1597{
1598 struct sioc_sg_req sr;
1599 struct sioc_vif_req vr;
1600 struct vif_device *vif;
1601 struct mfc_cache *c;
4feb88e5 1602 struct net *net = sock_net(sk);
f0ad0860
PM
1603 struct mr_table *mrt;
1604
1605 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1606 if (!mrt)
f0ad0860 1607 return -ENOENT;
e905a9ed 1608
132adf54
SH
1609 switch (cmd) {
1610 case SIOCGETVIFCNT:
c354e124 1611 if (copy_from_user(&vr, arg, sizeof(vr)))
132adf54 1612 return -EFAULT;
0c12295a 1613 if (vr.vifi >= mrt->maxvif)
132adf54
SH
1614 return -EINVAL;
1615 read_lock(&mrt_lock);
0c12295a
PM
1616 vif = &mrt->vif_table[vr.vifi];
1617 if (VIF_EXISTS(mrt, vr.vifi)) {
c354e124
JK
1618 vr.icount = vif->pkt_in;
1619 vr.ocount = vif->pkt_out;
1620 vr.ibytes = vif->bytes_in;
1621 vr.obytes = vif->bytes_out;
1da177e4 1622 read_unlock(&mrt_lock);
1da177e4 1623
c354e124 1624 if (copy_to_user(arg, &vr, sizeof(vr)))
132adf54
SH
1625 return -EFAULT;
1626 return 0;
1627 }
1628 read_unlock(&mrt_lock);
1629 return -EADDRNOTAVAIL;
1630 case SIOCGETSGCNT:
c354e124 1631 if (copy_from_user(&sr, arg, sizeof(sr)))
132adf54
SH
1632 return -EFAULT;
1633
a8c9486b 1634 rcu_read_lock();
0c12295a 1635 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
132adf54 1636 if (c) {
494fff56
YM
1637 sr.pktcnt = c->_c.mfc_un.res.pkt;
1638 sr.bytecnt = c->_c.mfc_un.res.bytes;
1639 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
a8c9486b 1640 rcu_read_unlock();
132adf54 1641
c354e124 1642 if (copy_to_user(arg, &sr, sizeof(sr)))
132adf54
SH
1643 return -EFAULT;
1644 return 0;
1645 }
a8c9486b 1646 rcu_read_unlock();
132adf54
SH
1647 return -EADDRNOTAVAIL;
1648 default:
1649 return -ENOIOCTLCMD;
1da177e4
LT
1650 }
1651}
1652
709b46e8
EB
1653#ifdef CONFIG_COMPAT
1654struct compat_sioc_sg_req {
1655 struct in_addr src;
1656 struct in_addr grp;
1657 compat_ulong_t pktcnt;
1658 compat_ulong_t bytecnt;
1659 compat_ulong_t wrong_if;
1660};
1661
ca6b8bb0
DM
1662struct compat_sioc_vif_req {
1663 vifi_t vifi; /* Which iface */
1664 compat_ulong_t icount;
1665 compat_ulong_t ocount;
1666 compat_ulong_t ibytes;
1667 compat_ulong_t obytes;
1668};
1669
709b46e8
EB
1670int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1671{
0033d5ad 1672 struct compat_sioc_sg_req sr;
ca6b8bb0
DM
1673 struct compat_sioc_vif_req vr;
1674 struct vif_device *vif;
709b46e8
EB
1675 struct mfc_cache *c;
1676 struct net *net = sock_net(sk);
1677 struct mr_table *mrt;
1678
1679 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1680 if (!mrt)
709b46e8
EB
1681 return -ENOENT;
1682
1683 switch (cmd) {
ca6b8bb0
DM
1684 case SIOCGETVIFCNT:
1685 if (copy_from_user(&vr, arg, sizeof(vr)))
1686 return -EFAULT;
1687 if (vr.vifi >= mrt->maxvif)
1688 return -EINVAL;
1689 read_lock(&mrt_lock);
1690 vif = &mrt->vif_table[vr.vifi];
1691 if (VIF_EXISTS(mrt, vr.vifi)) {
1692 vr.icount = vif->pkt_in;
1693 vr.ocount = vif->pkt_out;
1694 vr.ibytes = vif->bytes_in;
1695 vr.obytes = vif->bytes_out;
1696 read_unlock(&mrt_lock);
1697
1698 if (copy_to_user(arg, &vr, sizeof(vr)))
1699 return -EFAULT;
1700 return 0;
1701 }
1702 read_unlock(&mrt_lock);
1703 return -EADDRNOTAVAIL;
709b46e8
EB
1704 case SIOCGETSGCNT:
1705 if (copy_from_user(&sr, arg, sizeof(sr)))
1706 return -EFAULT;
1707
1708 rcu_read_lock();
1709 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1710 if (c) {
494fff56
YM
1711 sr.pktcnt = c->_c.mfc_un.res.pkt;
1712 sr.bytecnt = c->_c.mfc_un.res.bytes;
1713 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
709b46e8
EB
1714 rcu_read_unlock();
1715
1716 if (copy_to_user(arg, &sr, sizeof(sr)))
1717 return -EFAULT;
1718 return 0;
1719 }
1720 rcu_read_unlock();
1721 return -EADDRNOTAVAIL;
1722 default:
1723 return -ENOIOCTLCMD;
1724 }
1725}
1726#endif
1727
1da177e4
LT
1728static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1729{
351638e7 1730 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4feb88e5 1731 struct net *net = dev_net(dev);
f0ad0860 1732 struct mr_table *mrt;
1da177e4
LT
1733 struct vif_device *v;
1734 int ct;
e9dc8653 1735
1da177e4
LT
1736 if (event != NETDEV_UNREGISTER)
1737 return NOTIFY_DONE;
f0ad0860
PM
1738
1739 ipmr_for_each_table(mrt, net) {
1740 v = &mrt->vif_table[0];
1741 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1742 if (v->dev == dev)
e92036a6 1743 vif_delete(mrt, ct, 1, NULL);
f0ad0860 1744 }
1da177e4
LT
1745 }
1746 return NOTIFY_DONE;
1747}
1748
c354e124 1749static struct notifier_block ip_mr_notifier = {
1da177e4
LT
1750 .notifier_call = ipmr_device_event,
1751};
1752
7ef8f65d
NA
1753/* Encapsulate a packet by attaching a valid IPIP header to it.
1754 * This avoids tunnel drivers and other mess and gives us the speed so
1755 * important for multicast video.
1da177e4 1756 */
b6a7719a
HFS
1757static void ip_encap(struct net *net, struct sk_buff *skb,
1758 __be32 saddr, __be32 daddr)
1da177e4 1759{
8856dfa3 1760 struct iphdr *iph;
b71d1d42 1761 const struct iphdr *old_iph = ip_hdr(skb);
8856dfa3
ACM
1762
1763 skb_push(skb, sizeof(struct iphdr));
b0e380b1 1764 skb->transport_header = skb->network_header;
8856dfa3 1765 skb_reset_network_header(skb);
eddc9ec5 1766 iph = ip_hdr(skb);
1da177e4 1767
a8cb16dd 1768 iph->version = 4;
e023dd64
ACM
1769 iph->tos = old_iph->tos;
1770 iph->ttl = old_iph->ttl;
1da177e4
LT
1771 iph->frag_off = 0;
1772 iph->daddr = daddr;
1773 iph->saddr = saddr;
1774 iph->protocol = IPPROTO_IPIP;
1775 iph->ihl = 5;
1776 iph->tot_len = htons(skb->len);
b6a7719a 1777 ip_select_ident(net, skb, NULL);
1da177e4
LT
1778 ip_send_check(iph);
1779
1da177e4
LT
1780 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1781 nf_reset(skb);
1782}
1783
0c4b51f0
EB
1784static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1785 struct sk_buff *skb)
1da177e4 1786{
a8cb16dd 1787 struct ip_options *opt = &(IPCB(skb)->opt);
1da177e4 1788
73186df8
DM
1789 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1790 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1da177e4
LT
1791
1792 if (unlikely(opt->optlen))
1793 ip_forward_options(skb);
1794
13206b6b 1795 return dst_output(net, sk, skb);
1da177e4
LT
1796}
1797
a5bc9294
YG
1798#ifdef CONFIG_NET_SWITCHDEV
1799static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1800 int in_vifi, int out_vifi)
1801{
1802 struct vif_device *out_vif = &mrt->vif_table[out_vifi];
1803 struct vif_device *in_vif = &mrt->vif_table[in_vifi];
1804
1805 if (!skb->offload_mr_fwd_mark)
1806 return false;
1807 if (!out_vif->dev_parent_id.id_len || !in_vif->dev_parent_id.id_len)
1808 return false;
1809 return netdev_phys_item_id_same(&out_vif->dev_parent_id,
1810 &in_vif->dev_parent_id);
1811}
1812#else
1813static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1814 int in_vifi, int out_vifi)
1815{
1816 return false;
1817}
1818#endif
1819
7ef8f65d 1820/* Processing handlers for ipmr_forward */
1da177e4 1821
0c12295a 1822static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
a5bc9294
YG
1823 int in_vifi, struct sk_buff *skb,
1824 struct mfc_cache *c, int vifi)
1da177e4 1825{
eddc9ec5 1826 const struct iphdr *iph = ip_hdr(skb);
0c12295a 1827 struct vif_device *vif = &mrt->vif_table[vifi];
1da177e4
LT
1828 struct net_device *dev;
1829 struct rtable *rt;
31e4543d 1830 struct flowi4 fl4;
1da177e4
LT
1831 int encap = 0;
1832
51456b29 1833 if (!vif->dev)
1da177e4
LT
1834 goto out_free;
1835
1da177e4
LT
1836 if (vif->flags & VIFF_REGISTER) {
1837 vif->pkt_out++;
c354e124 1838 vif->bytes_out += skb->len;
cf3677ae
PE
1839 vif->dev->stats.tx_bytes += skb->len;
1840 vif->dev->stats.tx_packets++;
0c12295a 1841 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
69ebbf58 1842 goto out_free;
1da177e4 1843 }
1da177e4 1844
a5bc9294
YG
1845 if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
1846 goto out_free;
1847
a8cb16dd 1848 if (vif->flags & VIFF_TUNNEL) {
31e4543d 1849 rt = ip_route_output_ports(net, &fl4, NULL,
78fbfd8a
DM
1850 vif->remote, vif->local,
1851 0, 0,
1852 IPPROTO_IPIP,
1853 RT_TOS(iph->tos), vif->link);
b23dd4fe 1854 if (IS_ERR(rt))
1da177e4
LT
1855 goto out_free;
1856 encap = sizeof(struct iphdr);
1857 } else {
31e4543d 1858 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
78fbfd8a
DM
1859 0, 0,
1860 IPPROTO_IPIP,
1861 RT_TOS(iph->tos), vif->link);
b23dd4fe 1862 if (IS_ERR(rt))
1da177e4
LT
1863 goto out_free;
1864 }
1865
d8d1f30b 1866 dev = rt->dst.dev;
1da177e4 1867
d8d1f30b 1868 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1da177e4 1869 /* Do not fragment multicasts. Alas, IPv4 does not
a8cb16dd
ED
1870 * allow to send ICMP, so that packets will disappear
1871 * to blackhole.
1da177e4 1872 */
73186df8 1873 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
1874 ip_rt_put(rt);
1875 goto out_free;
1876 }
1877
d8d1f30b 1878 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1da177e4
LT
1879
1880 if (skb_cow(skb, encap)) {
e905a9ed 1881 ip_rt_put(rt);
1da177e4
LT
1882 goto out_free;
1883 }
1884
1885 vif->pkt_out++;
c354e124 1886 vif->bytes_out += skb->len;
1da177e4 1887
adf30907 1888 skb_dst_drop(skb);
d8d1f30b 1889 skb_dst_set(skb, &rt->dst);
eddc9ec5 1890 ip_decrease_ttl(ip_hdr(skb));
1da177e4
LT
1891
1892 /* FIXME: forward and output firewalls used to be called here.
a8cb16dd
ED
1893 * What do we do with netfilter? -- RR
1894 */
1da177e4 1895 if (vif->flags & VIFF_TUNNEL) {
b6a7719a 1896 ip_encap(net, skb, vif->local, vif->remote);
1da177e4 1897 /* FIXME: extra output firewall step used to be here. --RR */
2f4c02d4
PE
1898 vif->dev->stats.tx_packets++;
1899 vif->dev->stats.tx_bytes += skb->len;
1da177e4
LT
1900 }
1901
9ee6c5dc 1902 IPCB(skb)->flags |= IPSKB_FORWARDED;
1da177e4 1903
7ef8f65d 1904 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1da177e4
LT
1905 * not only before forwarding, but after forwarding on all output
1906 * interfaces. It is clear, if mrouter runs a multicasting
1907 * program, it should receive packets not depending to what interface
1908 * program is joined.
1909 * If we will not make it, the program will have to join on all
1910 * interfaces. On the other hand, multihoming host (or router, but
1911 * not mrouter) cannot join to more than one interface - it will
1912 * result in receiving multiple packets.
1913 */
29a26a56
EB
1914 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1915 net, NULL, skb, skb->dev, dev,
1da177e4
LT
1916 ipmr_forward_finish);
1917 return;
1918
1919out_free:
1920 kfree_skb(skb);
1da177e4
LT
1921}
1922
0c12295a 1923static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1da177e4
LT
1924{
1925 int ct;
0c12295a
PM
1926
1927 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1928 if (mrt->vif_table[ct].dev == dev)
1da177e4
LT
1929 break;
1930 }
1931 return ct;
1932}
1933
1934/* "local" means that we should preserve one skb (for local delivery) */
c4854ec8 1935static void ip_mr_forward(struct net *net, struct mr_table *mrt,
4b1f0d33 1936 struct net_device *dev, struct sk_buff *skb,
494fff56 1937 struct mfc_cache *c, int local)
1da177e4 1938{
4b1f0d33 1939 int true_vifi = ipmr_find_vif(mrt, dev);
1da177e4
LT
1940 int psend = -1;
1941 int vif, ct;
1942
494fff56
YM
1943 vif = c->_c.mfc_parent;
1944 c->_c.mfc_un.res.pkt++;
1945 c->_c.mfc_un.res.bytes += skb->len;
1946 c->_c.mfc_un.res.lastuse = jiffies;
1da177e4 1947
494fff56 1948 if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
660b26dc
ND
1949 struct mfc_cache *cache_proxy;
1950
1951 /* For an (*,G) entry, we only check that the incomming
1952 * interface is part of the static tree.
1953 */
845c9a7a 1954 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
660b26dc 1955 if (cache_proxy &&
494fff56 1956 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255)
660b26dc
ND
1957 goto forward;
1958 }
1959
7ef8f65d 1960 /* Wrong interface: drop packet and (maybe) send PIM assert. */
4b1f0d33 1961 if (mrt->vif_table[vif].dev != dev) {
c7537967 1962 if (rt_is_output_route(skb_rtable(skb))) {
1da177e4 1963 /* It is our own packet, looped back.
a8cb16dd
ED
1964 * Very complicated situation...
1965 *
1966 * The best workaround until routing daemons will be
1967 * fixed is not to redistribute packet, if it was
1968 * send through wrong interface. It means, that
1969 * multicast applications WILL NOT work for
1970 * (S,G), which have default multicast route pointing
1971 * to wrong oif. In any case, it is not a good
1972 * idea to use multicasting applications on router.
1da177e4
LT
1973 */
1974 goto dont_forward;
1975 }
1976
494fff56 1977 c->_c.mfc_un.res.wrong_if++;
1da177e4 1978
0c12295a 1979 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1da177e4 1980 /* pimsm uses asserts, when switching from RPT to SPT,
a8cb16dd
ED
1981 * so that we cannot check that packet arrived on an oif.
1982 * It is bad, but otherwise we would need to move pretty
1983 * large chunk of pimd to kernel. Ough... --ANK
1da177e4 1984 */
0c12295a 1985 (mrt->mroute_do_pim ||
494fff56 1986 c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
e905a9ed 1987 time_after(jiffies,
494fff56
YM
1988 c->_c.mfc_un.res.last_assert +
1989 MFC_ASSERT_THRESH)) {
1990 c->_c.mfc_un.res.last_assert = jiffies;
0c12295a 1991 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
c921c207
NA
1992 if (mrt->mroute_do_wrvifwhole)
1993 ipmr_cache_report(mrt, skb, true_vifi,
1994 IGMPMSG_WRVIFWHOLE);
1da177e4
LT
1995 }
1996 goto dont_forward;
1997 }
1998
660b26dc 1999forward:
0c12295a
PM
2000 mrt->vif_table[vif].pkt_in++;
2001 mrt->vif_table[vif].bytes_in += skb->len;
1da177e4 2002
7ef8f65d 2003 /* Forward the frame */
494fff56
YM
2004 if (c->mfc_origin == htonl(INADDR_ANY) &&
2005 c->mfc_mcastgrp == htonl(INADDR_ANY)) {
660b26dc 2006 if (true_vifi >= 0 &&
494fff56 2007 true_vifi != c->_c.mfc_parent &&
660b26dc 2008 ip_hdr(skb)->ttl >
494fff56 2009 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
660b26dc
ND
2010 /* It's an (*,*) entry and the packet is not coming from
2011 * the upstream: forward the packet to the upstream
2012 * only.
2013 */
494fff56 2014 psend = c->_c.mfc_parent;
660b26dc
ND
2015 goto last_forward;
2016 }
2017 goto dont_forward;
2018 }
494fff56
YM
2019 for (ct = c->_c.mfc_un.res.maxvif - 1;
2020 ct >= c->_c.mfc_un.res.minvif; ct--) {
660b26dc 2021 /* For (*,G) entry, don't forward to the incoming interface */
494fff56 2022 if ((c->mfc_origin != htonl(INADDR_ANY) ||
360eb5da 2023 ct != true_vifi) &&
494fff56 2024 ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) {
1da177e4
LT
2025 if (psend != -1) {
2026 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
a8cb16dd 2027
1da177e4 2028 if (skb2)
a5bc9294 2029 ipmr_queue_xmit(net, mrt, true_vifi,
494fff56 2030 skb2, c, psend);
1da177e4 2031 }
c354e124 2032 psend = ct;
1da177e4
LT
2033 }
2034 }
660b26dc 2035last_forward:
1da177e4
LT
2036 if (psend != -1) {
2037 if (local) {
2038 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
a8cb16dd 2039
1da177e4 2040 if (skb2)
a5bc9294 2041 ipmr_queue_xmit(net, mrt, true_vifi, skb2,
494fff56 2042 c, psend);
1da177e4 2043 } else {
494fff56 2044 ipmr_queue_xmit(net, mrt, true_vifi, skb, c, psend);
c4854ec8 2045 return;
1da177e4
LT
2046 }
2047 }
2048
2049dont_forward:
2050 if (!local)
2051 kfree_skb(skb);
1da177e4
LT
2052}
2053
417da66f 2054static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
ee3f1aaf 2055{
417da66f
DM
2056 struct rtable *rt = skb_rtable(skb);
2057 struct iphdr *iph = ip_hdr(skb);
da91981b 2058 struct flowi4 fl4 = {
417da66f
DM
2059 .daddr = iph->daddr,
2060 .saddr = iph->saddr,
b0fe4a31 2061 .flowi4_tos = RT_TOS(iph->tos),
4fd551d7
DM
2062 .flowi4_oif = (rt_is_output_route(rt) ?
2063 skb->dev->ifindex : 0),
2064 .flowi4_iif = (rt_is_output_route(rt) ?
1fb9489b 2065 LOOPBACK_IFINDEX :
4fd551d7 2066 skb->dev->ifindex),
b4869889 2067 .flowi4_mark = skb->mark,
ee3f1aaf
DM
2068 };
2069 struct mr_table *mrt;
2070 int err;
2071
da91981b 2072 err = ipmr_fib_lookup(net, &fl4, &mrt);
ee3f1aaf
DM
2073 if (err)
2074 return ERR_PTR(err);
2075 return mrt;
2076}
1da177e4 2077
7ef8f65d
NA
2078/* Multicast packets for forwarding arrive here
2079 * Called with rcu_read_lock();
1da177e4 2080 */
1da177e4
LT
2081int ip_mr_input(struct sk_buff *skb)
2082{
2083 struct mfc_cache *cache;
4feb88e5 2084 struct net *net = dev_net(skb->dev);
511c3f92 2085 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
f0ad0860 2086 struct mr_table *mrt;
bcfc7d33
TW
2087 struct net_device *dev;
2088
2089 /* skb->dev passed in is the loX master dev for vrfs.
2090 * As there are no vifs associated with loopback devices,
2091 * get the proper interface that does have a vif associated with it.
2092 */
2093 dev = skb->dev;
2094 if (netif_is_l3_master(skb->dev)) {
2095 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2096 if (!dev) {
2097 kfree_skb(skb);
2098 return -ENODEV;
2099 }
2100 }
1da177e4
LT
2101
2102 /* Packet is looped back after forward, it should not be
a8cb16dd 2103 * forwarded second time, but still can be delivered locally.
1da177e4 2104 */
4c968709 2105 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1da177e4
LT
2106 goto dont_forward;
2107
417da66f 2108 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
2109 if (IS_ERR(mrt)) {
2110 kfree_skb(skb);
2111 return PTR_ERR(mrt);
e40dbc51 2112 }
1da177e4 2113 if (!local) {
4c968709
ED
2114 if (IPCB(skb)->opt.router_alert) {
2115 if (ip_call_ra_chain(skb))
2116 return 0;
2117 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
2118 /* IGMPv1 (and broken IGMPv2 implementations sort of
2119 * Cisco IOS <= 11.2(8)) do not put router alert
2120 * option to IGMP packets destined to routable
2121 * groups. It is very bad, because it means
2122 * that we can forward NO IGMP messages.
2123 */
2124 struct sock *mroute_sk;
2125
2126 mroute_sk = rcu_dereference(mrt->mroute_sk);
2127 if (mroute_sk) {
2128 nf_reset(skb);
2129 raw_rcv(mroute_sk, skb);
2130 return 0;
2131 }
1da177e4
LT
2132 }
2133 }
2134
a8c9486b 2135 /* already under rcu_read_lock() */
0c12295a 2136 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
51456b29 2137 if (!cache) {
bcfc7d33 2138 int vif = ipmr_find_vif(mrt, dev);
660b26dc
ND
2139
2140 if (vif >= 0)
2141 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2142 vif);
2143 }
1da177e4 2144
7ef8f65d 2145 /* No usable cache entry */
51456b29 2146 if (!cache) {
1da177e4
LT
2147 int vif;
2148
2149 if (local) {
2150 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2151 ip_local_deliver(skb);
51456b29 2152 if (!skb2)
1da177e4 2153 return -ENOBUFS;
1da177e4
LT
2154 skb = skb2;
2155 }
2156
a8c9486b 2157 read_lock(&mrt_lock);
bcfc7d33 2158 vif = ipmr_find_vif(mrt, dev);
1da177e4 2159 if (vif >= 0) {
4b1f0d33 2160 int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
1da177e4
LT
2161 read_unlock(&mrt_lock);
2162
0eae88f3 2163 return err2;
1da177e4
LT
2164 }
2165 read_unlock(&mrt_lock);
2166 kfree_skb(skb);
2167 return -ENODEV;
2168 }
2169
a8c9486b 2170 read_lock(&mrt_lock);
4b1f0d33 2171 ip_mr_forward(net, mrt, dev, skb, cache, local);
1da177e4
LT
2172 read_unlock(&mrt_lock);
2173
2174 if (local)
2175 return ip_local_deliver(skb);
2176
2177 return 0;
2178
2179dont_forward:
2180 if (local)
2181 return ip_local_deliver(skb);
2182 kfree_skb(skb);
2183 return 0;
2184}
2185
b1879204 2186#ifdef CONFIG_IP_PIMSM_V1
7ef8f65d 2187/* Handle IGMP messages of PIMv1 */
a8cb16dd 2188int pim_rcv_v1(struct sk_buff *skb)
b1879204
IJ
2189{
2190 struct igmphdr *pim;
4feb88e5 2191 struct net *net = dev_net(skb->dev);
f0ad0860 2192 struct mr_table *mrt;
b1879204
IJ
2193
2194 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2195 goto drop;
2196
2197 pim = igmp_hdr(skb);
2198
417da66f 2199 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
2200 if (IS_ERR(mrt))
2201 goto drop;
0c12295a 2202 if (!mrt->mroute_do_pim ||
b1879204
IJ
2203 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2204 goto drop;
2205
f0ad0860 2206 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
b1879204
IJ
2207drop:
2208 kfree_skb(skb);
2209 }
1da177e4
LT
2210 return 0;
2211}
2212#endif
2213
2214#ifdef CONFIG_IP_PIMSM_V2
a8cb16dd 2215static int pim_rcv(struct sk_buff *skb)
1da177e4
LT
2216{
2217 struct pimreghdr *pim;
f0ad0860
PM
2218 struct net *net = dev_net(skb->dev);
2219 struct mr_table *mrt;
1da177e4 2220
b1879204 2221 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1da177e4
LT
2222 goto drop;
2223
9c70220b 2224 pim = (struct pimreghdr *)skb_transport_header(skb);
56245cae 2225 if (pim->type != ((PIM_VERSION << 4) | (PIM_TYPE_REGISTER)) ||
a8cb16dd 2226 (pim->flags & PIM_NULL_REGISTER) ||
e905a9ed 2227 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
d3bc23e7 2228 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1da177e4
LT
2229 goto drop;
2230
417da66f 2231 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
2232 if (IS_ERR(mrt))
2233 goto drop;
f0ad0860 2234 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
b1879204
IJ
2235drop:
2236 kfree_skb(skb);
2237 }
1da177e4
LT
2238 return 0;
2239}
2240#endif
2241
9a1b9496
DM
2242int ipmr_get_route(struct net *net, struct sk_buff *skb,
2243 __be32 saddr, __be32 daddr,
9f09eaea 2244 struct rtmsg *rtm, u32 portid)
1da177e4 2245{
1da177e4 2246 struct mfc_cache *cache;
9a1b9496
DM
2247 struct mr_table *mrt;
2248 int err;
1da177e4 2249
f0ad0860 2250 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2251 if (!mrt)
f0ad0860
PM
2252 return -ENOENT;
2253
a8c9486b 2254 rcu_read_lock();
9a1b9496 2255 cache = ipmr_cache_find(mrt, saddr, daddr);
51456b29 2256 if (!cache && skb->dev) {
660b26dc 2257 int vif = ipmr_find_vif(mrt, skb->dev);
1da177e4 2258
660b26dc
ND
2259 if (vif >= 0)
2260 cache = ipmr_cache_find_any(mrt, daddr, vif);
2261 }
51456b29 2262 if (!cache) {
72287490 2263 struct sk_buff *skb2;
eddc9ec5 2264 struct iphdr *iph;
1da177e4 2265 struct net_device *dev;
a8cb16dd 2266 int vif = -1;
1da177e4 2267
1da177e4 2268 dev = skb->dev;
a8c9486b 2269 read_lock(&mrt_lock);
a8cb16dd
ED
2270 if (dev)
2271 vif = ipmr_find_vif(mrt, dev);
2272 if (vif < 0) {
1da177e4 2273 read_unlock(&mrt_lock);
a8c9486b 2274 rcu_read_unlock();
1da177e4
LT
2275 return -ENODEV;
2276 }
72287490
AK
2277 skb2 = skb_clone(skb, GFP_ATOMIC);
2278 if (!skb2) {
2279 read_unlock(&mrt_lock);
a8c9486b 2280 rcu_read_unlock();
72287490
AK
2281 return -ENOMEM;
2282 }
2283
2cf75070 2284 NETLINK_CB(skb2).portid = portid;
e2d1bca7
ACM
2285 skb_push(skb2, sizeof(struct iphdr));
2286 skb_reset_network_header(skb2);
eddc9ec5
ACM
2287 iph = ip_hdr(skb2);
2288 iph->ihl = sizeof(struct iphdr) >> 2;
9a1b9496
DM
2289 iph->saddr = saddr;
2290 iph->daddr = daddr;
eddc9ec5 2291 iph->version = 0;
4b1f0d33 2292 err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
1da177e4 2293 read_unlock(&mrt_lock);
a8c9486b 2294 rcu_read_unlock();
1da177e4
LT
2295 return err;
2296 }
2297
a8c9486b 2298 read_lock(&mrt_lock);
7b0db857 2299 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
1da177e4 2300 read_unlock(&mrt_lock);
a8c9486b 2301 rcu_read_unlock();
1da177e4
LT
2302 return err;
2303}
2304
cb6a4e46 2305static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
65886f43
ND
2306 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2307 int flags)
cb6a4e46
PM
2308{
2309 struct nlmsghdr *nlh;
2310 struct rtmsg *rtm;
1eb99af5 2311 int err;
cb6a4e46 2312
65886f43 2313 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
51456b29 2314 if (!nlh)
cb6a4e46
PM
2315 return -EMSGSIZE;
2316
2317 rtm = nlmsg_data(nlh);
2318 rtm->rtm_family = RTNL_FAMILY_IPMR;
2319 rtm->rtm_dst_len = 32;
2320 rtm->rtm_src_len = 32;
2321 rtm->rtm_tos = 0;
2322 rtm->rtm_table = mrt->id;
f3756b79
DM
2323 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2324 goto nla_put_failure;
cb6a4e46
PM
2325 rtm->rtm_type = RTN_MULTICAST;
2326 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
494fff56 2327 if (c->_c.mfc_flags & MFC_STATIC)
9a68ac72
ND
2328 rtm->rtm_protocol = RTPROT_STATIC;
2329 else
2330 rtm->rtm_protocol = RTPROT_MROUTED;
cb6a4e46
PM
2331 rtm->rtm_flags = 0;
2332
930345ea
JB
2333 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2334 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
f3756b79 2335 goto nla_put_failure;
7b0db857 2336 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
1eb99af5
ND
2337 /* do not break the dump if cache is unresolved */
2338 if (err < 0 && err != -ENOENT)
cb6a4e46
PM
2339 goto nla_put_failure;
2340
053c095a
JB
2341 nlmsg_end(skb, nlh);
2342 return 0;
cb6a4e46
PM
2343
2344nla_put_failure:
2345 nlmsg_cancel(skb, nlh);
2346 return -EMSGSIZE;
2347}
2348
7b0db857
YM
2349static int _ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2350 u32 portid, u32 seq, struct mr_mfc *c, int cmd,
2351 int flags)
2352{
2353 return ipmr_fill_mroute(mrt, skb, portid, seq, (struct mfc_cache *)c,
2354 cmd, flags);
2355}
2356
8cd3ac9f
ND
2357static size_t mroute_msgsize(bool unresolved, int maxvif)
2358{
2359 size_t len =
2360 NLMSG_ALIGN(sizeof(struct rtmsg))
2361 + nla_total_size(4) /* RTA_TABLE */
2362 + nla_total_size(4) /* RTA_SRC */
2363 + nla_total_size(4) /* RTA_DST */
2364 ;
2365
2366 if (!unresolved)
2367 len = len
2368 + nla_total_size(4) /* RTA_IIF */
2369 + nla_total_size(0) /* RTA_MULTIPATH */
2370 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2371 /* RTA_MFC_STATS */
a9a08042 2372 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
8cd3ac9f
ND
2373 ;
2374
2375 return len;
2376}
2377
2378static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2379 int cmd)
2380{
2381 struct net *net = read_pnet(&mrt->net);
2382 struct sk_buff *skb;
2383 int err = -ENOBUFS;
2384
494fff56
YM
2385 skb = nlmsg_new(mroute_msgsize(mfc->_c.mfc_parent >= MAXVIFS,
2386 mrt->maxvif),
8cd3ac9f 2387 GFP_ATOMIC);
51456b29 2388 if (!skb)
8cd3ac9f
ND
2389 goto errout;
2390
65886f43 2391 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
8cd3ac9f
ND
2392 if (err < 0)
2393 goto errout;
2394
2395 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2396 return;
2397
2398errout:
2399 kfree_skb(skb);
2400 if (err < 0)
2401 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2402}
2403
5a645dd8
JG
2404static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
2405{
2406 size_t len =
2407 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2408 + nla_total_size(1) /* IPMRA_CREPORT_MSGTYPE */
2409 + nla_total_size(4) /* IPMRA_CREPORT_VIF_ID */
2410 + nla_total_size(4) /* IPMRA_CREPORT_SRC_ADDR */
2411 + nla_total_size(4) /* IPMRA_CREPORT_DST_ADDR */
2412 /* IPMRA_CREPORT_PKT */
2413 + nla_total_size(payloadlen)
2414 ;
2415
2416 return len;
2417}
2418
2419static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2420{
2421 struct net *net = read_pnet(&mrt->net);
2422 struct nlmsghdr *nlh;
2423 struct rtgenmsg *rtgenm;
2424 struct igmpmsg *msg;
2425 struct sk_buff *skb;
2426 struct nlattr *nla;
2427 int payloadlen;
2428
2429 payloadlen = pkt->len - sizeof(struct igmpmsg);
2430 msg = (struct igmpmsg *)skb_network_header(pkt);
2431
2432 skb = nlmsg_new(igmpmsg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2433 if (!skb)
2434 goto errout;
2435
2436 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2437 sizeof(struct rtgenmsg), 0);
2438 if (!nlh)
2439 goto errout;
2440 rtgenm = nlmsg_data(nlh);
2441 rtgenm->rtgen_family = RTNL_FAMILY_IPMR;
2442 if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) ||
2443 nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif) ||
2444 nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR,
2445 msg->im_src.s_addr) ||
2446 nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR,
2447 msg->im_dst.s_addr))
2448 goto nla_put_failure;
2449
2450 nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen);
2451 if (!nla || skb_copy_bits(pkt, sizeof(struct igmpmsg),
2452 nla_data(nla), payloadlen))
2453 goto nla_put_failure;
2454
2455 nlmsg_end(skb, nlh);
2456
2457 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE_R, NULL, GFP_ATOMIC);
2458 return;
2459
2460nla_put_failure:
2461 nlmsg_cancel(skb, nlh);
2462errout:
2463 kfree_skb(skb);
2464 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS);
2465}
2466
4f75ba69
DS
2467static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2468 struct netlink_ext_ack *extack)
2469{
2470 struct net *net = sock_net(in_skb->sk);
2471 struct nlattr *tb[RTA_MAX + 1];
2472 struct sk_buff *skb = NULL;
2473 struct mfc_cache *cache;
2474 struct mr_table *mrt;
2475 struct rtmsg *rtm;
2476 __be32 src, grp;
2477 u32 tableid;
2478 int err;
2479
2480 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
2481 rtm_ipv4_policy, extack);
2482 if (err < 0)
2483 goto errout;
2484
2485 rtm = nlmsg_data(nlh);
2486
2487 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2488 grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2489 tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
2490
2491 mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
2e3d232e
DC
2492 if (!mrt) {
2493 err = -ENOENT;
4f75ba69
DS
2494 goto errout_free;
2495 }
2496
2497 /* entries are added/deleted only under RTNL */
2498 rcu_read_lock();
2499 cache = ipmr_cache_find(mrt, src, grp);
2500 rcu_read_unlock();
2501 if (!cache) {
2502 err = -ENOENT;
2503 goto errout_free;
2504 }
2505
2506 skb = nlmsg_new(mroute_msgsize(false, mrt->maxvif), GFP_KERNEL);
2507 if (!skb) {
2508 err = -ENOBUFS;
2509 goto errout_free;
2510 }
2511
2512 err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
2513 nlh->nlmsg_seq, cache,
2514 RTM_NEWROUTE, 0);
2515 if (err < 0)
2516 goto errout_free;
2517
2518 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2519
2520errout:
2521 return err;
2522
2523errout_free:
2524 kfree_skb(skb);
2525 goto errout;
2526}
2527
cb6a4e46
PM
2528static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2529{
4724676d 2530 struct fib_dump_filter filter = {};
cb167893 2531 int err;
4724676d 2532
e8ba330a 2533 if (cb->strict_check) {
4724676d 2534 err = ip_valid_fib_dump_req(sock_net(skb->sk), cb->nlh,
effe6792 2535 &filter, cb);
e8ba330a
DA
2536 if (err < 0)
2537 return err;
2538 }
2539
cb167893
DA
2540 if (filter.table_id) {
2541 struct mr_table *mrt;
2542
2543 mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
2544 if (!mrt) {
ae677bbb
DA
2545 if (filter.dump_all_families)
2546 return skb->len;
2547
cb167893
DA
2548 NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
2549 return -ENOENT;
2550 }
2551 err = mr_table_dump(mrt, skb, cb, _ipmr_fill_mroute,
2552 &mfc_unres_lock, &filter);
2553 return skb->len ? : err;
2554 }
2555
7b0db857 2556 return mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter,
cb167893 2557 _ipmr_fill_mroute, &mfc_unres_lock, &filter);
cb6a4e46
PM
2558}
2559
ccbb0aa6
NA
2560static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
2561 [RTA_SRC] = { .type = NLA_U32 },
2562 [RTA_DST] = { .type = NLA_U32 },
2563 [RTA_IIF] = { .type = NLA_U32 },
2564 [RTA_TABLE] = { .type = NLA_U32 },
2565 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2566};
2567
2568static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
2569{
2570 switch (rtm_protocol) {
2571 case RTPROT_STATIC:
2572 case RTPROT_MROUTED:
2573 return true;
2574 }
2575 return false;
2576}
2577
2578static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
2579{
2580 struct rtnexthop *rtnh = nla_data(nla);
2581 int remaining = nla_len(nla), vifi = 0;
2582
2583 while (rtnh_ok(rtnh, remaining)) {
2584 mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
2585 if (++vifi == MAXVIFS)
2586 break;
2587 rtnh = rtnh_next(rtnh, &remaining);
2588 }
2589
2590 return remaining > 0 ? -EINVAL : vifi;
2591}
2592
2593/* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2594static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
2595 struct mfcctl *mfcc, int *mrtsock,
c21ef3e3
DA
2596 struct mr_table **mrtret,
2597 struct netlink_ext_ack *extack)
ccbb0aa6
NA
2598{
2599 struct net_device *dev = NULL;
2600 u32 tblid = RT_TABLE_DEFAULT;
2601 struct mr_table *mrt;
2602 struct nlattr *attr;
2603 struct rtmsg *rtm;
2604 int ret, rem;
2605
fceb6435 2606 ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy,
c21ef3e3 2607 extack);
ccbb0aa6
NA
2608 if (ret < 0)
2609 goto out;
2610 rtm = nlmsg_data(nlh);
2611
2612 ret = -EINVAL;
2613 if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
2614 rtm->rtm_type != RTN_MULTICAST ||
2615 rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
2616 !ipmr_rtm_validate_proto(rtm->rtm_protocol))
2617 goto out;
2618
2619 memset(mfcc, 0, sizeof(*mfcc));
2620 mfcc->mfcc_parent = -1;
2621 ret = 0;
2622 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
2623 switch (nla_type(attr)) {
2624 case RTA_SRC:
2625 mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
2626 break;
2627 case RTA_DST:
2628 mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
2629 break;
2630 case RTA_IIF:
2631 dev = __dev_get_by_index(net, nla_get_u32(attr));
2632 if (!dev) {
2633 ret = -ENODEV;
2634 goto out;
2635 }
2636 break;
2637 case RTA_MULTIPATH:
2638 if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
2639 ret = -EINVAL;
2640 goto out;
2641 }
2642 break;
2643 case RTA_PREFSRC:
2644 ret = 1;
2645 break;
2646 case RTA_TABLE:
2647 tblid = nla_get_u32(attr);
2648 break;
2649 }
2650 }
2651 mrt = ipmr_get_table(net, tblid);
2652 if (!mrt) {
2653 ret = -ENOENT;
2654 goto out;
2655 }
2656 *mrtret = mrt;
2657 *mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
2658 if (dev)
2659 mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2660
2661out:
2662 return ret;
2663}
2664
2665/* takes care of both newroute and delroute */
c21ef3e3
DA
2666static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
2667 struct netlink_ext_ack *extack)
ccbb0aa6
NA
2668{
2669 struct net *net = sock_net(skb->sk);
2670 int ret, mrtsock, parent;
2671 struct mr_table *tbl;
2672 struct mfcctl mfcc;
2673
2674 mrtsock = 0;
2675 tbl = NULL;
c21ef3e3 2676 ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack);
ccbb0aa6
NA
2677 if (ret < 0)
2678 return ret;
2679
2680 parent = ret ? mfcc.mfcc_parent : -1;
2681 if (nlh->nlmsg_type == RTM_NEWROUTE)
2682 return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
2683 else
2684 return ipmr_mfc_delete(tbl, &mfcc, parent);
2685}
2686
772c344d
NA
2687static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
2688{
2689 u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
2690
2691 if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
2692 nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
2693 nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM,
2694 mrt->mroute_reg_vif_num) ||
2695 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
2696 mrt->mroute_do_assert) ||
c921c207
NA
2697 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim) ||
2698 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
2699 mrt->mroute_do_wrvifwhole))
772c344d
NA
2700 return false;
2701
2702 return true;
2703}
2704
2705static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
2706{
2707 struct nlattr *vif_nest;
2708 struct vif_device *vif;
2709
2710 /* if the VIF doesn't exist just continue */
2711 if (!VIF_EXISTS(mrt, vifid))
2712 return true;
2713
2714 vif = &mrt->vif_table[vifid];
2715 vif_nest = nla_nest_start(skb, IPMRA_VIF);
2716 if (!vif_nest)
2717 return false;
2718 if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) ||
2719 nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
2720 nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
2721 nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in,
2722 IPMRA_VIFA_PAD) ||
2723 nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, vif->bytes_out,
2724 IPMRA_VIFA_PAD) ||
2725 nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, vif->pkt_in,
2726 IPMRA_VIFA_PAD) ||
2727 nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, vif->pkt_out,
2728 IPMRA_VIFA_PAD) ||
2729 nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) ||
2730 nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) {
2731 nla_nest_cancel(skb, vif_nest);
2732 return false;
2733 }
2734 nla_nest_end(skb, vif_nest);
2735
2736 return true;
2737}
2738
14fc5bb2
DA
2739static int ipmr_valid_dumplink(const struct nlmsghdr *nlh,
2740 struct netlink_ext_ack *extack)
2741{
2742 struct ifinfomsg *ifm;
2743
2744 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2745 NL_SET_ERR_MSG(extack, "ipv4: Invalid header for ipmr link dump");
2746 return -EINVAL;
2747 }
2748
2749 if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
2750 NL_SET_ERR_MSG(extack, "Invalid data after header in ipmr link dump");
2751 return -EINVAL;
2752 }
2753
2754 ifm = nlmsg_data(nlh);
2755 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2756 ifm->ifi_change || ifm->ifi_index) {
2757 NL_SET_ERR_MSG(extack, "Invalid values in header for ipmr link dump request");
2758 return -EINVAL;
2759 }
2760
2761 return 0;
2762}
2763
772c344d
NA
2764static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
2765{
2766 struct net *net = sock_net(skb->sk);
2767 struct nlmsghdr *nlh = NULL;
2768 unsigned int t = 0, s_t;
2769 unsigned int e = 0, s_e;
2770 struct mr_table *mrt;
2771
14fc5bb2
DA
2772 if (cb->strict_check) {
2773 int err = ipmr_valid_dumplink(cb->nlh, cb->extack);
2774
2775 if (err < 0)
2776 return err;
2777 }
2778
772c344d
NA
2779 s_t = cb->args[0];
2780 s_e = cb->args[1];
2781
2782 ipmr_for_each_table(mrt, net) {
2783 struct nlattr *vifs, *af;
2784 struct ifinfomsg *hdr;
2785 u32 i;
2786
2787 if (t < s_t)
2788 goto skip_table;
2789 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
2790 cb->nlh->nlmsg_seq, RTM_NEWLINK,
2791 sizeof(*hdr), NLM_F_MULTI);
2792 if (!nlh)
2793 break;
2794
2795 hdr = nlmsg_data(nlh);
2796 memset(hdr, 0, sizeof(*hdr));
2797 hdr->ifi_family = RTNL_FAMILY_IPMR;
2798
2799 af = nla_nest_start(skb, IFLA_AF_SPEC);
2800 if (!af) {
2801 nlmsg_cancel(skb, nlh);
2802 goto out;
2803 }
2804
2805 if (!ipmr_fill_table(mrt, skb)) {
2806 nlmsg_cancel(skb, nlh);
2807 goto out;
2808 }
2809
2810 vifs = nla_nest_start(skb, IPMRA_TABLE_VIFS);
2811 if (!vifs) {
2812 nla_nest_end(skb, af);
2813 nlmsg_end(skb, nlh);
2814 goto out;
2815 }
2816 for (i = 0; i < mrt->maxvif; i++) {
2817 if (e < s_e)
2818 goto skip_entry;
2819 if (!ipmr_fill_vif(mrt, i, skb)) {
2820 nla_nest_end(skb, vifs);
2821 nla_nest_end(skb, af);
2822 nlmsg_end(skb, nlh);
2823 goto out;
2824 }
2825skip_entry:
2826 e++;
2827 }
2828 s_e = 0;
2829 e = 0;
2830 nla_nest_end(skb, vifs);
2831 nla_nest_end(skb, af);
2832 nlmsg_end(skb, nlh);
2833skip_table:
2834 t++;
2835 }
2836
2837out:
2838 cb->args[1] = e;
2839 cb->args[0] = t;
2840
2841 return skb->len;
2842}
2843
e905a9ed 2844#ifdef CONFIG_PROC_FS
7ef8f65d
NA
2845/* The /proc interfaces to multicast routing :
2846 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
1da177e4 2847 */
1da177e4
LT
2848
2849static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
ba93ef74 2850 __acquires(mrt_lock)
1da177e4 2851{
3feda6b4 2852 struct mr_vif_iter *iter = seq->private;
f6bb4514 2853 struct net *net = seq_file_net(seq);
f0ad0860
PM
2854 struct mr_table *mrt;
2855
2856 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2857 if (!mrt)
f0ad0860
PM
2858 return ERR_PTR(-ENOENT);
2859
2860 iter->mrt = mrt;
f6bb4514 2861
1da177e4 2862 read_lock(&mrt_lock);
3feda6b4 2863 return mr_vif_seq_start(seq, pos);
1da177e4
LT
2864}
2865
2866static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
ba93ef74 2867 __releases(mrt_lock)
1da177e4
LT
2868{
2869 read_unlock(&mrt_lock);
2870}
2871
2872static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2873{
3feda6b4 2874 struct mr_vif_iter *iter = seq->private;
f0ad0860 2875 struct mr_table *mrt = iter->mrt;
f6bb4514 2876
1da177e4 2877 if (v == SEQ_START_TOKEN) {
e905a9ed 2878 seq_puts(seq,
1da177e4
LT
2879 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2880 } else {
2881 const struct vif_device *vif = v;
6853f21f
YM
2882 const char *name = vif->dev ?
2883 vif->dev->name : "none";
1da177e4
LT
2884
2885 seq_printf(seq,
91e6dd82 2886 "%2td %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
0c12295a 2887 vif - mrt->vif_table,
e905a9ed 2888 name, vif->bytes_in, vif->pkt_in,
1da177e4
LT
2889 vif->bytes_out, vif->pkt_out,
2890 vif->flags, vif->local, vif->remote);
2891 }
2892 return 0;
2893}
2894
f690808e 2895static const struct seq_operations ipmr_vif_seq_ops = {
1da177e4 2896 .start = ipmr_vif_seq_start,
3feda6b4 2897 .next = mr_vif_seq_next,
1da177e4
LT
2898 .stop = ipmr_vif_seq_stop,
2899 .show = ipmr_vif_seq_show,
2900};
2901
1da177e4
LT
2902static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2903{
f6bb4514 2904 struct net *net = seq_file_net(seq);
f0ad0860 2905 struct mr_table *mrt;
f6bb4514 2906
f0ad0860 2907 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2908 if (!mrt)
f0ad0860 2909 return ERR_PTR(-ENOENT);
f6bb4514 2910
c8d61968 2911 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
1da177e4
LT
2912}
2913
2914static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2915{
2916 int n;
2917
2918 if (v == SEQ_START_TOKEN) {
e905a9ed 2919 seq_puts(seq,
1da177e4
LT
2920 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2921 } else {
2922 const struct mfc_cache *mfc = v;
c8d61968 2923 const struct mr_mfc_iter *it = seq->private;
f0ad0860 2924 const struct mr_table *mrt = it->mrt;
e905a9ed 2925
0eae88f3
ED
2926 seq_printf(seq, "%08X %08X %-3hd",
2927 (__force u32) mfc->mfc_mcastgrp,
2928 (__force u32) mfc->mfc_origin,
494fff56 2929 mfc->_c.mfc_parent);
1da177e4 2930
0c12295a 2931 if (it->cache != &mrt->mfc_unres_queue) {
1ea472e2 2932 seq_printf(seq, " %8lu %8lu %8lu",
494fff56
YM
2933 mfc->_c.mfc_un.res.pkt,
2934 mfc->_c.mfc_un.res.bytes,
2935 mfc->_c.mfc_un.res.wrong_if);
2936 for (n = mfc->_c.mfc_un.res.minvif;
2937 n < mfc->_c.mfc_un.res.maxvif; n++) {
0c12295a 2938 if (VIF_EXISTS(mrt, n) &&
494fff56 2939 mfc->_c.mfc_un.res.ttls[n] < 255)
cf958ae3 2940 seq_printf(seq,
e905a9ed 2941 " %2d:%-3d",
494fff56 2942 n, mfc->_c.mfc_un.res.ttls[n]);
1da177e4 2943 }
1ea472e2
BT
2944 } else {
2945 /* unresolved mfc_caches don't contain
2946 * pkt, bytes and wrong_if values
2947 */
2948 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
1da177e4
LT
2949 }
2950 seq_putc(seq, '\n');
2951 }
2952 return 0;
2953}
2954
f690808e 2955static const struct seq_operations ipmr_mfc_seq_ops = {
1da177e4 2956 .start = ipmr_mfc_seq_start,
c8d61968
YM
2957 .next = mr_mfc_seq_next,
2958 .stop = mr_mfc_seq_stop,
1da177e4
LT
2959 .show = ipmr_mfc_seq_show,
2960};
e905a9ed 2961#endif
1da177e4
LT
2962
2963#ifdef CONFIG_IP_PIMSM_V2
32613090 2964static const struct net_protocol pim_protocol = {
1da177e4 2965 .handler = pim_rcv,
403dbb97 2966 .netns_ok = 1,
1da177e4
LT
2967};
2968#endif
2969
4d65b948
YG
2970static unsigned int ipmr_seq_read(struct net *net)
2971{
2972 ASSERT_RTNL();
2973
2974 return net->ipv4.ipmr_seq + ipmr_rules_seq_read(net);
2975}
2976
2977static int ipmr_dump(struct net *net, struct notifier_block *nb)
2978{
cdc9f944
YM
2979 return mr_dump(net, nb, RTNL_FAMILY_IPMR, ipmr_rules_dump,
2980 ipmr_mr_table_iter, &mrt_lock);
4d65b948
YG
2981}
2982
2983static const struct fib_notifier_ops ipmr_notifier_ops_template = {
2984 .family = RTNL_FAMILY_IPMR,
2985 .fib_seq_read = ipmr_seq_read,
2986 .fib_dump = ipmr_dump,
2987 .owner = THIS_MODULE,
2988};
2989
ef739d8a 2990static int __net_init ipmr_notifier_init(struct net *net)
4d65b948
YG
2991{
2992 struct fib_notifier_ops *ops;
2993
2994 net->ipv4.ipmr_seq = 0;
2995
2996 ops = fib_notifier_ops_register(&ipmr_notifier_ops_template, net);
2997 if (IS_ERR(ops))
2998 return PTR_ERR(ops);
2999 net->ipv4.ipmr_notifier_ops = ops;
3000
3001 return 0;
3002}
3003
3004static void __net_exit ipmr_notifier_exit(struct net *net)
3005{
3006 fib_notifier_ops_unregister(net->ipv4.ipmr_notifier_ops);
3007 net->ipv4.ipmr_notifier_ops = NULL;
3008}
3009
7ef8f65d 3010/* Setup for IP multicast routing */
cf958ae3
BT
3011static int __net_init ipmr_net_init(struct net *net)
3012{
f0ad0860 3013 int err;
cf958ae3 3014
4d65b948
YG
3015 err = ipmr_notifier_init(net);
3016 if (err)
3017 goto ipmr_notifier_fail;
3018
f0ad0860
PM
3019 err = ipmr_rules_init(net);
3020 if (err < 0)
4d65b948 3021 goto ipmr_rules_fail;
f6bb4514
BT
3022
3023#ifdef CONFIG_PROC_FS
3024 err = -ENOMEM;
c3506372
CH
3025 if (!proc_create_net("ip_mr_vif", 0, net->proc_net, &ipmr_vif_seq_ops,
3026 sizeof(struct mr_vif_iter)))
f6bb4514 3027 goto proc_vif_fail;
c3506372
CH
3028 if (!proc_create_net("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
3029 sizeof(struct mr_mfc_iter)))
f6bb4514
BT
3030 goto proc_cache_fail;
3031#endif
2bb8b26c
BT
3032 return 0;
3033
f6bb4514
BT
3034#ifdef CONFIG_PROC_FS
3035proc_cache_fail:
ece31ffd 3036 remove_proc_entry("ip_mr_vif", net->proc_net);
f6bb4514 3037proc_vif_fail:
f0ad0860 3038 ipmr_rules_exit(net);
f6bb4514 3039#endif
4d65b948
YG
3040ipmr_rules_fail:
3041 ipmr_notifier_exit(net);
3042ipmr_notifier_fail:
cf958ae3
BT
3043 return err;
3044}
3045
3046static void __net_exit ipmr_net_exit(struct net *net)
3047{
f6bb4514 3048#ifdef CONFIG_PROC_FS
ece31ffd
G
3049 remove_proc_entry("ip_mr_cache", net->proc_net);
3050 remove_proc_entry("ip_mr_vif", net->proc_net);
f6bb4514 3051#endif
4d65b948 3052 ipmr_notifier_exit(net);
f0ad0860 3053 ipmr_rules_exit(net);
cf958ae3
BT
3054}
3055
3056static struct pernet_operations ipmr_net_ops = {
3057 .init = ipmr_net_init,
3058 .exit = ipmr_net_exit,
3059};
e905a9ed 3060
03d2f897 3061int __init ip_mr_init(void)
1da177e4 3062{
03d2f897
WC
3063 int err;
3064
1da177e4
LT
3065 mrt_cachep = kmem_cache_create("ip_mrt_cache",
3066 sizeof(struct mfc_cache),
a8c9486b 3067 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
20c2df83 3068 NULL);
03d2f897 3069
cf958ae3
BT
3070 err = register_pernet_subsys(&ipmr_net_ops);
3071 if (err)
3072 goto reg_pernet_fail;
3073
03d2f897
WC
3074 err = register_netdevice_notifier(&ip_mr_notifier);
3075 if (err)
3076 goto reg_notif_fail;
403dbb97
TG
3077#ifdef CONFIG_IP_PIMSM_V2
3078 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
058bd4d2 3079 pr_err("%s: can't add PIM protocol\n", __func__);
403dbb97
TG
3080 err = -EAGAIN;
3081 goto add_proto_fail;
3082 }
3083#endif
c7ac8679 3084 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
b97bac64 3085 ipmr_rtm_getroute, ipmr_rtm_dumproute, 0);
ccbb0aa6 3086 rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
b97bac64 3087 ipmr_rtm_route, NULL, 0);
ccbb0aa6 3088 rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
b97bac64 3089 ipmr_rtm_route, NULL, 0);
772c344d
NA
3090
3091 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK,
b97bac64 3092 NULL, ipmr_rtm_dumplink, 0);
03d2f897 3093 return 0;
f6bb4514 3094
403dbb97
TG
3095#ifdef CONFIG_IP_PIMSM_V2
3096add_proto_fail:
3097 unregister_netdevice_notifier(&ip_mr_notifier);
3098#endif
c3e38896 3099reg_notif_fail:
cf958ae3
BT
3100 unregister_pernet_subsys(&ipmr_net_ops);
3101reg_pernet_fail:
c3e38896 3102 kmem_cache_destroy(mrt_cachep);
03d2f897 3103 return err;
1da177e4 3104}