]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/ipv4/ipmr.c
ipmr: Add reference count to MFC entries
[mirror_ubuntu-jammy-kernel.git] / net / ipv4 / ipmr.c
CommitLineData
1da177e4
LT
1/*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
113aa838 4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
1da177e4
LT
12 * Fixes:
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
22 * overflow.
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
f77f13e2 25 * Relax this requirement to work with older peers.
1da177e4
LT
26 *
27 */
28
7c0f6ba6 29#include <linux/uaccess.h>
1da177e4 30#include <linux/types.h>
4fc268d2 31#include <linux/capability.h>
1da177e4
LT
32#include <linux/errno.h>
33#include <linux/timer.h>
34#include <linux/mm.h>
35#include <linux/kernel.h>
36#include <linux/fcntl.h>
37#include <linux/stat.h>
38#include <linux/socket.h>
39#include <linux/in.h>
40#include <linux/inet.h>
41#include <linux/netdevice.h>
42#include <linux/inetdevice.h>
43#include <linux/igmp.h>
44#include <linux/proc_fs.h>
45#include <linux/seq_file.h>
46#include <linux/mroute.h>
47#include <linux/init.h>
46f25dff 48#include <linux/if_ether.h>
5a0e3ad6 49#include <linux/slab.h>
457c4cbc 50#include <net/net_namespace.h>
1da177e4
LT
51#include <net/ip.h>
52#include <net/protocol.h>
53#include <linux/skbuff.h>
14c85021 54#include <net/route.h>
1da177e4
LT
55#include <net/sock.h>
56#include <net/icmp.h>
57#include <net/udp.h>
58#include <net/raw.h>
59#include <linux/notifier.h>
60#include <linux/if_arp.h>
61#include <linux/netfilter_ipv4.h>
709b46e8 62#include <linux/compat.h>
bc3b2d7f 63#include <linux/export.h>
c5441932 64#include <net/ip_tunnels.h>
1da177e4 65#include <net/checksum.h>
dc5fc579 66#include <net/netlink.h>
f0ad0860 67#include <net/fib_rules.h>
d67b8c61 68#include <linux/netconf.h>
ccbb0aa6 69#include <net/nexthop.h>
1da177e4 70
f0ad0860
PM
71struct ipmr_rule {
72 struct fib_rule common;
73};
74
75struct ipmr_result {
76 struct mr_table *mrt;
77};
78
1da177e4 79/* Big lock, protecting vif table, mrt cache and mroute socket state.
a8cb16dd 80 * Note that the changes are semaphored via rtnl_lock.
1da177e4
LT
81 */
82
83static DEFINE_RWLOCK(mrt_lock);
84
7ef8f65d 85/* Multicast router control variables */
1da177e4 86
1da177e4
LT
87/* Special spinlock for queue of unresolved entries */
88static DEFINE_SPINLOCK(mfc_unres_lock);
89
90/* We return to original Alan's scheme. Hash table of resolved
a8cb16dd
ED
91 * entries is changed only in process context and protected
92 * with weak lock mrt_lock. Queue of unresolved entries is protected
93 * with strong spinlock mfc_unres_lock.
94 *
95 * In this case data path is free of exclusive locks at all.
1da177e4
LT
96 */
97
e18b890b 98static struct kmem_cache *mrt_cachep __read_mostly;
1da177e4 99
f0ad0860 100static struct mr_table *ipmr_new_table(struct net *net, u32 id);
acbb219d
FR
101static void ipmr_free_table(struct mr_table *mrt);
102
c4854ec8 103static void ip_mr_forward(struct net *net, struct mr_table *mrt,
4b1f0d33
DS
104 struct net_device *dev, struct sk_buff *skb,
105 struct mfc_cache *cache, int local);
0c12295a 106static int ipmr_cache_report(struct mr_table *mrt,
4feb88e5 107 struct sk_buff *pkt, vifi_t vifi, int assert);
cb6a4e46
PM
108static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
109 struct mfc_cache *c, struct rtmsg *rtm);
8cd3ac9f
ND
110static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
111 int cmd);
5a645dd8 112static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
0e615e96 113static void mroute_clean_tables(struct mr_table *mrt, bool all);
f0ad0860
PM
114static void ipmr_expire_process(unsigned long arg);
115
116#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
117#define ipmr_for_each_table(mrt, net) \
118 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
119
120static struct mr_table *ipmr_get_table(struct net *net, u32 id)
121{
122 struct mr_table *mrt;
123
124 ipmr_for_each_table(mrt, net) {
125 if (mrt->id == id)
126 return mrt;
127 }
128 return NULL;
129}
130
da91981b 131static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
f0ad0860
PM
132 struct mr_table **mrt)
133{
f0ad0860 134 int err;
95f4a45d
HFS
135 struct ipmr_result res;
136 struct fib_lookup_arg arg = {
137 .result = &res,
138 .flags = FIB_LOOKUP_NOREF,
139 };
f0ad0860 140
e58e4159
DA
141 /* update flow if oif or iif point to device enslaved to l3mdev */
142 l3mdev_update_flow(net, flowi4_to_flowi(flp4));
143
da91981b
DM
144 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
145 flowi4_to_flowi(flp4), 0, &arg);
f0ad0860
PM
146 if (err < 0)
147 return err;
148 *mrt = res.mrt;
149 return 0;
150}
151
152static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
153 int flags, struct fib_lookup_arg *arg)
154{
155 struct ipmr_result *res = arg->result;
156 struct mr_table *mrt;
1da177e4 157
f0ad0860
PM
158 switch (rule->action) {
159 case FR_ACT_TO_TBL:
160 break;
161 case FR_ACT_UNREACHABLE:
162 return -ENETUNREACH;
163 case FR_ACT_PROHIBIT:
164 return -EACCES;
165 case FR_ACT_BLACKHOLE:
166 default:
167 return -EINVAL;
168 }
169
e58e4159
DA
170 arg->table = fib_rule_get_table(rule, arg);
171
172 mrt = ipmr_get_table(rule->fr_net, arg->table);
51456b29 173 if (!mrt)
f0ad0860
PM
174 return -EAGAIN;
175 res->mrt = mrt;
176 return 0;
177}
178
179static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
180{
181 return 1;
182}
183
184static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
185 FRA_GENERIC_POLICY,
186};
187
188static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
189 struct fib_rule_hdr *frh, struct nlattr **tb)
190{
191 return 0;
192}
193
194static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
195 struct nlattr **tb)
196{
197 return 1;
198}
199
200static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
201 struct fib_rule_hdr *frh)
202{
203 frh->dst_len = 0;
204 frh->src_len = 0;
205 frh->tos = 0;
206 return 0;
207}
208
04a6f82c 209static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
25239cee 210 .family = RTNL_FAMILY_IPMR,
f0ad0860
PM
211 .rule_size = sizeof(struct ipmr_rule),
212 .addr_size = sizeof(u32),
213 .action = ipmr_rule_action,
214 .match = ipmr_rule_match,
215 .configure = ipmr_rule_configure,
216 .compare = ipmr_rule_compare,
f0ad0860
PM
217 .fill = ipmr_rule_fill,
218 .nlgroup = RTNLGRP_IPV4_RULE,
219 .policy = ipmr_rule_policy,
220 .owner = THIS_MODULE,
221};
222
223static int __net_init ipmr_rules_init(struct net *net)
224{
225 struct fib_rules_ops *ops;
226 struct mr_table *mrt;
227 int err;
228
229 ops = fib_rules_register(&ipmr_rules_ops_template, net);
230 if (IS_ERR(ops))
231 return PTR_ERR(ops);
232
233 INIT_LIST_HEAD(&net->ipv4.mr_tables);
234
235 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
1113ebbc
NA
236 if (IS_ERR(mrt)) {
237 err = PTR_ERR(mrt);
f0ad0860
PM
238 goto err1;
239 }
240
241 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
242 if (err < 0)
243 goto err2;
244
245 net->ipv4.mr_rules_ops = ops;
246 return 0;
247
248err2:
f243e5a7 249 ipmr_free_table(mrt);
f0ad0860
PM
250err1:
251 fib_rules_unregister(ops);
252 return err;
253}
254
255static void __net_exit ipmr_rules_exit(struct net *net)
256{
257 struct mr_table *mrt, *next;
258
ed785309 259 rtnl_lock();
035320d5
ED
260 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
261 list_del(&mrt->list);
acbb219d 262 ipmr_free_table(mrt);
035320d5 263 }
f0ad0860 264 fib_rules_unregister(net->ipv4.mr_rules_ops);
419df12f 265 rtnl_unlock();
f0ad0860
PM
266}
267#else
268#define ipmr_for_each_table(mrt, net) \
269 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
270
271static struct mr_table *ipmr_get_table(struct net *net, u32 id)
272{
273 return net->ipv4.mrt;
274}
275
da91981b 276static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
f0ad0860
PM
277 struct mr_table **mrt)
278{
279 *mrt = net->ipv4.mrt;
280 return 0;
281}
282
283static int __net_init ipmr_rules_init(struct net *net)
284{
1113ebbc
NA
285 struct mr_table *mrt;
286
287 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
288 if (IS_ERR(mrt))
289 return PTR_ERR(mrt);
290 net->ipv4.mrt = mrt;
291 return 0;
f0ad0860
PM
292}
293
294static void __net_exit ipmr_rules_exit(struct net *net)
295{
ed785309 296 rtnl_lock();
acbb219d 297 ipmr_free_table(net->ipv4.mrt);
ed785309
WC
298 net->ipv4.mrt = NULL;
299 rtnl_unlock();
f0ad0860
PM
300}
301#endif
302
8fb472c0
NA
303static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
304 const void *ptr)
305{
306 const struct mfc_cache_cmp_arg *cmparg = arg->key;
307 struct mfc_cache *c = (struct mfc_cache *)ptr;
308
309 return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
310 cmparg->mfc_origin != c->mfc_origin;
311}
312
313static const struct rhashtable_params ipmr_rht_params = {
314 .head_offset = offsetof(struct mfc_cache, mnode),
315 .key_offset = offsetof(struct mfc_cache, cmparg),
316 .key_len = sizeof(struct mfc_cache_cmp_arg),
317 .nelem_hint = 3,
318 .locks_mul = 1,
319 .obj_cmpfn = ipmr_hash_cmp,
320 .automatic_shrinking = true,
321};
322
f0ad0860
PM
323static struct mr_table *ipmr_new_table(struct net *net, u32 id)
324{
325 struct mr_table *mrt;
1da177e4 326
1113ebbc
NA
327 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
328 if (id != RT_TABLE_DEFAULT && id >= 1000000000)
329 return ERR_PTR(-EINVAL);
330
f0ad0860 331 mrt = ipmr_get_table(net, id);
00db4124 332 if (mrt)
f0ad0860
PM
333 return mrt;
334
335 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
51456b29 336 if (!mrt)
1113ebbc 337 return ERR_PTR(-ENOMEM);
8de53dfb 338 write_pnet(&mrt->net, net);
f0ad0860
PM
339 mrt->id = id;
340
8fb472c0
NA
341 rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
342 INIT_LIST_HEAD(&mrt->mfc_cache_list);
f0ad0860
PM
343 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
344
345 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
346 (unsigned long)mrt);
347
f0ad0860 348 mrt->mroute_reg_vif_num = -1;
f0ad0860
PM
349#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
350 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
351#endif
352 return mrt;
353}
1da177e4 354
acbb219d
FR
355static void ipmr_free_table(struct mr_table *mrt)
356{
357 del_timer_sync(&mrt->ipmr_expire_timer);
0e615e96 358 mroute_clean_tables(mrt, true);
8fb472c0 359 rhltable_destroy(&mrt->mfc_hash);
acbb219d
FR
360 kfree(mrt);
361}
362
1da177e4
LT
363/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
364
d607032d
WC
365static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
366{
4feb88e5
BT
367 struct net *net = dev_net(dev);
368
d607032d
WC
369 dev_close(dev);
370
4feb88e5 371 dev = __dev_get_by_name(net, "tunl0");
d607032d 372 if (dev) {
5bc3eb7e 373 const struct net_device_ops *ops = dev->netdev_ops;
d607032d 374 struct ifreq ifr;
d607032d
WC
375 struct ip_tunnel_parm p;
376
377 memset(&p, 0, sizeof(p));
378 p.iph.daddr = v->vifc_rmt_addr.s_addr;
379 p.iph.saddr = v->vifc_lcl_addr.s_addr;
380 p.iph.version = 4;
381 p.iph.ihl = 5;
382 p.iph.protocol = IPPROTO_IPIP;
383 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
384 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
385
5bc3eb7e
SH
386 if (ops->ndo_do_ioctl) {
387 mm_segment_t oldfs = get_fs();
388
389 set_fs(KERNEL_DS);
390 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
391 set_fs(oldfs);
392 }
d607032d
WC
393 }
394}
395
a0b47736
NA
396/* Initialize ipmr pimreg/tunnel in_device */
397static bool ipmr_init_vif_indev(const struct net_device *dev)
398{
399 struct in_device *in_dev;
400
401 ASSERT_RTNL();
402
403 in_dev = __in_dev_get_rtnl(dev);
404 if (!in_dev)
405 return false;
406 ipv4_devconf_setall(in_dev);
407 neigh_parms_data_state_setall(in_dev->arp_parms);
408 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
409
410 return true;
411}
412
7ef8f65d 413static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
1da177e4
LT
414{
415 struct net_device *dev;
416
4feb88e5 417 dev = __dev_get_by_name(net, "tunl0");
1da177e4
LT
418
419 if (dev) {
5bc3eb7e 420 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
421 int err;
422 struct ifreq ifr;
1da177e4 423 struct ip_tunnel_parm p;
1da177e4
LT
424
425 memset(&p, 0, sizeof(p));
426 p.iph.daddr = v->vifc_rmt_addr.s_addr;
427 p.iph.saddr = v->vifc_lcl_addr.s_addr;
428 p.iph.version = 4;
429 p.iph.ihl = 5;
430 p.iph.protocol = IPPROTO_IPIP;
431 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
ba93ef74 432 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
1da177e4 433
5bc3eb7e
SH
434 if (ops->ndo_do_ioctl) {
435 mm_segment_t oldfs = get_fs();
436
437 set_fs(KERNEL_DS);
438 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
439 set_fs(oldfs);
a8cb16dd 440 } else {
5bc3eb7e 441 err = -EOPNOTSUPP;
a8cb16dd 442 }
1da177e4
LT
443 dev = NULL;
444
4feb88e5
BT
445 if (err == 0 &&
446 (dev = __dev_get_by_name(net, p.name)) != NULL) {
1da177e4 447 dev->flags |= IFF_MULTICAST;
a0b47736 448 if (!ipmr_init_vif_indev(dev))
1da177e4 449 goto failure;
1da177e4
LT
450 if (dev_open(dev))
451 goto failure;
7dc00c82 452 dev_hold(dev);
1da177e4
LT
453 }
454 }
455 return dev;
456
457failure:
1da177e4
LT
458 unregister_netdevice(dev);
459 return NULL;
460}
461
c316c629 462#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
6fef4c0c 463static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 464{
4feb88e5 465 struct net *net = dev_net(dev);
f0ad0860 466 struct mr_table *mrt;
da91981b
DM
467 struct flowi4 fl4 = {
468 .flowi4_oif = dev->ifindex,
6a662719 469 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
da91981b 470 .flowi4_mark = skb->mark,
f0ad0860
PM
471 };
472 int err;
473
da91981b 474 err = ipmr_fib_lookup(net, &fl4, &mrt);
e40dbc51
BG
475 if (err < 0) {
476 kfree_skb(skb);
f0ad0860 477 return err;
e40dbc51 478 }
4feb88e5 479
1da177e4 480 read_lock(&mrt_lock);
cf3677ae
PE
481 dev->stats.tx_bytes += skb->len;
482 dev->stats.tx_packets++;
0c12295a 483 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
1da177e4
LT
484 read_unlock(&mrt_lock);
485 kfree_skb(skb);
6ed10654 486 return NETDEV_TX_OK;
1da177e4
LT
487}
488
ee9b9596
ND
489static int reg_vif_get_iflink(const struct net_device *dev)
490{
491 return 0;
492}
493
007c3838
SH
494static const struct net_device_ops reg_vif_netdev_ops = {
495 .ndo_start_xmit = reg_vif_xmit,
ee9b9596 496 .ndo_get_iflink = reg_vif_get_iflink,
007c3838
SH
497};
498
1da177e4
LT
499static void reg_vif_setup(struct net_device *dev)
500{
501 dev->type = ARPHRD_PIMREG;
46f25dff 502 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
1da177e4 503 dev->flags = IFF_NOARP;
70cb4a45 504 dev->netdev_ops = &reg_vif_netdev_ops;
cf124db5 505 dev->needs_free_netdev = true;
403dbb97 506 dev->features |= NETIF_F_NETNS_LOCAL;
1da177e4
LT
507}
508
f0ad0860 509static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
1da177e4
LT
510{
511 struct net_device *dev;
f0ad0860 512 char name[IFNAMSIZ];
1da177e4 513
f0ad0860
PM
514 if (mrt->id == RT_TABLE_DEFAULT)
515 sprintf(name, "pimreg");
516 else
517 sprintf(name, "pimreg%u", mrt->id);
1da177e4 518
c835a677 519 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
1da177e4 520
51456b29 521 if (!dev)
1da177e4
LT
522 return NULL;
523
403dbb97
TG
524 dev_net_set(dev, net);
525
1da177e4
LT
526 if (register_netdevice(dev)) {
527 free_netdev(dev);
528 return NULL;
529 }
1da177e4 530
a0b47736 531 if (!ipmr_init_vif_indev(dev))
1da177e4 532 goto failure;
1da177e4
LT
533 if (dev_open(dev))
534 goto failure;
535
7dc00c82
WC
536 dev_hold(dev);
537
1da177e4
LT
538 return dev;
539
540failure:
1da177e4
LT
541 unregister_netdevice(dev);
542 return NULL;
543}
c316c629
NA
544
545/* called with rcu_read_lock() */
546static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
547 unsigned int pimlen)
548{
549 struct net_device *reg_dev = NULL;
550 struct iphdr *encap;
551
552 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
7ef8f65d 553 /* Check that:
c316c629
NA
554 * a. packet is really sent to a multicast group
555 * b. packet is not a NULL-REGISTER
556 * c. packet is not truncated
557 */
558 if (!ipv4_is_multicast(encap->daddr) ||
559 encap->tot_len == 0 ||
560 ntohs(encap->tot_len) + pimlen > skb->len)
561 return 1;
562
563 read_lock(&mrt_lock);
564 if (mrt->mroute_reg_vif_num >= 0)
565 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
566 read_unlock(&mrt_lock);
567
568 if (!reg_dev)
569 return 1;
570
571 skb->mac_header = skb->network_header;
572 skb_pull(skb, (u8 *)encap - skb->data);
573 skb_reset_network_header(skb);
574 skb->protocol = htons(ETH_P_IP);
575 skb->ip_summed = CHECKSUM_NONE;
576
577 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
578
579 netif_rx(skb);
580
581 return NET_RX_SUCCESS;
582}
583#else
584static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
585{
586 return NULL;
587}
1da177e4
LT
588#endif
589
2c53040f
BH
590/**
591 * vif_delete - Delete a VIF entry
7dc00c82 592 * @notify: Set to 1, if the caller is a notifier_call
1da177e4 593 */
0c12295a 594static int vif_delete(struct mr_table *mrt, int vifi, int notify,
d17fa6fa 595 struct list_head *head)
1da177e4
LT
596{
597 struct vif_device *v;
598 struct net_device *dev;
599 struct in_device *in_dev;
600
0c12295a 601 if (vifi < 0 || vifi >= mrt->maxvif)
1da177e4
LT
602 return -EADDRNOTAVAIL;
603
0c12295a 604 v = &mrt->vif_table[vifi];
1da177e4
LT
605
606 write_lock_bh(&mrt_lock);
607 dev = v->dev;
608 v->dev = NULL;
609
610 if (!dev) {
611 write_unlock_bh(&mrt_lock);
612 return -EADDRNOTAVAIL;
613 }
614
0c12295a
PM
615 if (vifi == mrt->mroute_reg_vif_num)
616 mrt->mroute_reg_vif_num = -1;
1da177e4 617
a8cb16dd 618 if (vifi + 1 == mrt->maxvif) {
1da177e4 619 int tmp;
a8cb16dd
ED
620
621 for (tmp = vifi - 1; tmp >= 0; tmp--) {
0c12295a 622 if (VIF_EXISTS(mrt, tmp))
1da177e4
LT
623 break;
624 }
0c12295a 625 mrt->maxvif = tmp+1;
1da177e4
LT
626 }
627
628 write_unlock_bh(&mrt_lock);
629
630 dev_set_allmulti(dev, -1);
631
a8cb16dd
ED
632 in_dev = __in_dev_get_rtnl(dev);
633 if (in_dev) {
42f811b8 634 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
3b022865 635 inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
d67b8c61
ND
636 NETCONFA_MC_FORWARDING,
637 dev->ifindex, &in_dev->cnf);
1da177e4
LT
638 ip_rt_multicast_event(in_dev);
639 }
640
a8cb16dd 641 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
d17fa6fa 642 unregister_netdevice_queue(dev, head);
1da177e4
LT
643
644 dev_put(dev);
645 return 0;
646}
647
a8c9486b 648static void ipmr_cache_free_rcu(struct rcu_head *head)
5c0a66f5 649{
a8c9486b
ED
650 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
651
5c0a66f5
BT
652 kmem_cache_free(mrt_cachep, c);
653}
654
310ebbba 655void ipmr_cache_free(struct mfc_cache *c)
a8c9486b
ED
656{
657 call_rcu(&c->rcu, ipmr_cache_free_rcu);
658}
310ebbba 659EXPORT_SYMBOL(ipmr_cache_free);
a8c9486b 660
1da177e4 661/* Destroy an unresolved cache entry, killing queued skbs
a8cb16dd 662 * and reporting error to netlink readers.
1da177e4 663 */
0c12295a 664static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
1da177e4 665{
8de53dfb 666 struct net *net = read_pnet(&mrt->net);
1da177e4 667 struct sk_buff *skb;
9ef1d4c7 668 struct nlmsgerr *e;
1da177e4 669
0c12295a 670 atomic_dec(&mrt->cache_resolve_queue_len);
1da177e4 671
c354e124 672 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
eddc9ec5 673 if (ip_hdr(skb)->version == 0) {
af72868b
JB
674 struct nlmsghdr *nlh = skb_pull(skb,
675 sizeof(struct iphdr));
1da177e4 676 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 677 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1da177e4 678 skb_trim(skb, nlh->nlmsg_len);
573ce260 679 e = nlmsg_data(nlh);
9ef1d4c7
PM
680 e->error = -ETIMEDOUT;
681 memset(&e->msg, 0, sizeof(e->msg));
2942e900 682
15e47304 683 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
a8cb16dd 684 } else {
1da177e4 685 kfree_skb(skb);
a8cb16dd 686 }
1da177e4
LT
687 }
688
5c0a66f5 689 ipmr_cache_free(c);
1da177e4
LT
690}
691
e258beb2 692/* Timer process for the unresolved queue. */
e258beb2 693static void ipmr_expire_process(unsigned long arg)
1da177e4 694{
0c12295a 695 struct mr_table *mrt = (struct mr_table *)arg;
1da177e4
LT
696 unsigned long now;
697 unsigned long expires;
862465f2 698 struct mfc_cache *c, *next;
1da177e4
LT
699
700 if (!spin_trylock(&mfc_unres_lock)) {
0c12295a 701 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
1da177e4
LT
702 return;
703 }
704
0c12295a 705 if (list_empty(&mrt->mfc_unres_queue))
1da177e4
LT
706 goto out;
707
708 now = jiffies;
709 expires = 10*HZ;
1da177e4 710
0c12295a 711 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1da177e4
LT
712 if (time_after(c->mfc_un.unres.expires, now)) {
713 unsigned long interval = c->mfc_un.unres.expires - now;
714 if (interval < expires)
715 expires = interval;
1da177e4
LT
716 continue;
717 }
718
862465f2 719 list_del(&c->list);
8cd3ac9f 720 mroute_netlink_event(mrt, c, RTM_DELROUTE);
0c12295a 721 ipmr_destroy_unres(mrt, c);
1da177e4
LT
722 }
723
0c12295a
PM
724 if (!list_empty(&mrt->mfc_unres_queue))
725 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
1da177e4
LT
726
727out:
728 spin_unlock(&mfc_unres_lock);
729}
730
731/* Fill oifs list. It is called under write locked mrt_lock. */
0c12295a 732static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
d658f8a0 733 unsigned char *ttls)
1da177e4
LT
734{
735 int vifi;
736
737 cache->mfc_un.res.minvif = MAXVIFS;
738 cache->mfc_un.res.maxvif = 0;
739 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
740
0c12295a
PM
741 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
742 if (VIF_EXISTS(mrt, vifi) &&
cf958ae3 743 ttls[vifi] && ttls[vifi] < 255) {
1da177e4
LT
744 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
745 if (cache->mfc_un.res.minvif > vifi)
746 cache->mfc_un.res.minvif = vifi;
747 if (cache->mfc_un.res.maxvif <= vifi)
748 cache->mfc_un.res.maxvif = vifi + 1;
749 }
750 }
90b5ca17 751 cache->mfc_un.res.lastuse = jiffies;
1da177e4
LT
752}
753
0c12295a
PM
754static int vif_add(struct net *net, struct mr_table *mrt,
755 struct vifctl *vifc, int mrtsock)
1da177e4
LT
756{
757 int vifi = vifc->vifc_vifi;
0c12295a 758 struct vif_device *v = &mrt->vif_table[vifi];
1da177e4
LT
759 struct net_device *dev;
760 struct in_device *in_dev;
d607032d 761 int err;
1da177e4
LT
762
763 /* Is vif busy ? */
0c12295a 764 if (VIF_EXISTS(mrt, vifi))
1da177e4
LT
765 return -EADDRINUSE;
766
767 switch (vifc->vifc_flags) {
1da177e4 768 case VIFF_REGISTER:
1973a4ea 769 if (!ipmr_pimsm_enabled())
c316c629
NA
770 return -EINVAL;
771 /* Special Purpose VIF in PIM
1da177e4
LT
772 * All the packets will be sent to the daemon
773 */
0c12295a 774 if (mrt->mroute_reg_vif_num >= 0)
1da177e4 775 return -EADDRINUSE;
f0ad0860 776 dev = ipmr_reg_vif(net, mrt);
1da177e4
LT
777 if (!dev)
778 return -ENOBUFS;
d607032d
WC
779 err = dev_set_allmulti(dev, 1);
780 if (err) {
781 unregister_netdevice(dev);
7dc00c82 782 dev_put(dev);
d607032d
WC
783 return err;
784 }
1da177e4 785 break;
e905a9ed 786 case VIFF_TUNNEL:
4feb88e5 787 dev = ipmr_new_tunnel(net, vifc);
1da177e4
LT
788 if (!dev)
789 return -ENOBUFS;
d607032d
WC
790 err = dev_set_allmulti(dev, 1);
791 if (err) {
792 ipmr_del_tunnel(dev, vifc);
7dc00c82 793 dev_put(dev);
d607032d
WC
794 return err;
795 }
1da177e4 796 break;
ee5e81f0 797 case VIFF_USE_IFINDEX:
1da177e4 798 case 0:
ee5e81f0
I
799 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
800 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
51456b29 801 if (dev && !__in_dev_get_rtnl(dev)) {
ee5e81f0
I
802 dev_put(dev);
803 return -EADDRNOTAVAIL;
804 }
a8cb16dd 805 } else {
ee5e81f0 806 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
a8cb16dd 807 }
1da177e4
LT
808 if (!dev)
809 return -EADDRNOTAVAIL;
d607032d 810 err = dev_set_allmulti(dev, 1);
7dc00c82
WC
811 if (err) {
812 dev_put(dev);
d607032d 813 return err;
7dc00c82 814 }
1da177e4
LT
815 break;
816 default:
817 return -EINVAL;
818 }
819
a8cb16dd
ED
820 in_dev = __in_dev_get_rtnl(dev);
821 if (!in_dev) {
d0490cfd 822 dev_put(dev);
1da177e4 823 return -EADDRNOTAVAIL;
d0490cfd 824 }
42f811b8 825 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
3b022865
DA
826 inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING,
827 dev->ifindex, &in_dev->cnf);
1da177e4
LT
828 ip_rt_multicast_event(in_dev);
829
a8cb16dd
ED
830 /* Fill in the VIF structures */
831
c354e124
JK
832 v->rate_limit = vifc->vifc_rate_limit;
833 v->local = vifc->vifc_lcl_addr.s_addr;
834 v->remote = vifc->vifc_rmt_addr.s_addr;
835 v->flags = vifc->vifc_flags;
1da177e4
LT
836 if (!mrtsock)
837 v->flags |= VIFF_STATIC;
c354e124 838 v->threshold = vifc->vifc_threshold;
1da177e4
LT
839 v->bytes_in = 0;
840 v->bytes_out = 0;
841 v->pkt_in = 0;
842 v->pkt_out = 0;
843 v->link = dev->ifindex;
a8cb16dd 844 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
a54acb3a 845 v->link = dev_get_iflink(dev);
1da177e4
LT
846
847 /* And finish update writing critical data */
848 write_lock_bh(&mrt_lock);
c354e124 849 v->dev = dev;
a8cb16dd 850 if (v->flags & VIFF_REGISTER)
0c12295a 851 mrt->mroute_reg_vif_num = vifi;
0c12295a
PM
852 if (vifi+1 > mrt->maxvif)
853 mrt->maxvif = vifi+1;
1da177e4
LT
854 write_unlock_bh(&mrt_lock);
855 return 0;
856}
857
a8c9486b 858/* called with rcu_read_lock() */
0c12295a 859static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
4feb88e5
BT
860 __be32 origin,
861 __be32 mcastgrp)
1da177e4 862{
8fb472c0
NA
863 struct mfc_cache_cmp_arg arg = {
864 .mfc_mcastgrp = mcastgrp,
865 .mfc_origin = origin
866 };
867 struct rhlist_head *tmp, *list;
1da177e4
LT
868 struct mfc_cache *c;
869
8fb472c0
NA
870 list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
871 rhl_for_each_entry_rcu(c, tmp, list, mnode)
872 return c;
873
862465f2 874 return NULL;
1da177e4
LT
875}
876
660b26dc
ND
877/* Look for a (*,*,oif) entry */
878static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
879 int vifi)
880{
8fb472c0
NA
881 struct mfc_cache_cmp_arg arg = {
882 .mfc_mcastgrp = htonl(INADDR_ANY),
883 .mfc_origin = htonl(INADDR_ANY)
884 };
885 struct rhlist_head *tmp, *list;
660b26dc
ND
886 struct mfc_cache *c;
887
8fb472c0
NA
888 list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
889 rhl_for_each_entry_rcu(c, tmp, list, mnode)
890 if (c->mfc_un.res.ttls[vifi] < 255)
660b26dc
ND
891 return c;
892
893 return NULL;
894}
895
896/* Look for a (*,G) entry */
897static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
898 __be32 mcastgrp, int vifi)
899{
8fb472c0
NA
900 struct mfc_cache_cmp_arg arg = {
901 .mfc_mcastgrp = mcastgrp,
902 .mfc_origin = htonl(INADDR_ANY)
903 };
904 struct rhlist_head *tmp, *list;
660b26dc
ND
905 struct mfc_cache *c, *proxy;
906
360eb5da 907 if (mcastgrp == htonl(INADDR_ANY))
660b26dc
ND
908 goto skip;
909
8fb472c0
NA
910 list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
911 rhl_for_each_entry_rcu(c, tmp, list, mnode) {
912 if (c->mfc_un.res.ttls[vifi] < 255)
913 return c;
914
915 /* It's ok if the vifi is part of the static tree */
916 proxy = ipmr_cache_find_any_parent(mrt, c->mfc_parent);
917 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
918 return c;
919 }
660b26dc
ND
920
921skip:
922 return ipmr_cache_find_any_parent(mrt, vifi);
923}
924
8fb472c0
NA
925/* Look for a (S,G,iif) entry if parent != -1 */
926static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
927 __be32 origin, __be32 mcastgrp,
928 int parent)
929{
930 struct mfc_cache_cmp_arg arg = {
931 .mfc_mcastgrp = mcastgrp,
932 .mfc_origin = origin,
933 };
934 struct rhlist_head *tmp, *list;
935 struct mfc_cache *c;
936
937 list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
938 rhl_for_each_entry_rcu(c, tmp, list, mnode)
939 if (parent == -1 || parent == c->mfc_parent)
940 return c;
941
942 return NULL;
943}
944
7ef8f65d 945/* Allocate a multicast cache entry */
d658f8a0 946static struct mfc_cache *ipmr_cache_alloc(void)
1da177e4 947{
c354e124 948 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
a8c9486b 949
70a0dec4
TG
950 if (c) {
951 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
a8c9486b 952 c->mfc_un.res.minvif = MAXVIFS;
310ebbba 953 refcount_set(&c->mfc_un.res.refcount, 1);
70a0dec4 954 }
1da177e4
LT
955 return c;
956}
957
d658f8a0 958static struct mfc_cache *ipmr_cache_alloc_unres(void)
1da177e4 959{
c354e124 960 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
a8c9486b
ED
961
962 if (c) {
963 skb_queue_head_init(&c->mfc_un.unres.unresolved);
964 c->mfc_un.unres.expires = jiffies + 10*HZ;
965 }
1da177e4
LT
966 return c;
967}
968
7ef8f65d 969/* A cache entry has gone into a resolved state from queued */
0c12295a
PM
970static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
971 struct mfc_cache *uc, struct mfc_cache *c)
1da177e4
LT
972{
973 struct sk_buff *skb;
9ef1d4c7 974 struct nlmsgerr *e;
1da177e4 975
a8cb16dd 976 /* Play the pending entries through our router */
c354e124 977 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
eddc9ec5 978 if (ip_hdr(skb)->version == 0) {
af72868b
JB
979 struct nlmsghdr *nlh = skb_pull(skb,
980 sizeof(struct iphdr));
1da177e4 981
573ce260 982 if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
a8cb16dd
ED
983 nlh->nlmsg_len = skb_tail_pointer(skb) -
984 (u8 *)nlh;
1da177e4
LT
985 } else {
986 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 987 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1da177e4 988 skb_trim(skb, nlh->nlmsg_len);
573ce260 989 e = nlmsg_data(nlh);
9ef1d4c7
PM
990 e->error = -EMSGSIZE;
991 memset(&e->msg, 0, sizeof(e->msg));
1da177e4 992 }
2942e900 993
15e47304 994 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
a8cb16dd 995 } else {
4b1f0d33 996 ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
a8cb16dd 997 }
1da177e4
LT
998 }
999}
1000
5a645dd8 1001/* Bounce a cache query up to mrouted and netlink.
1da177e4 1002 *
c316c629 1003 * Called under mrt_lock.
1da177e4 1004 */
0c12295a 1005static int ipmr_cache_report(struct mr_table *mrt,
4feb88e5 1006 struct sk_buff *pkt, vifi_t vifi, int assert)
1da177e4 1007{
c9bdd4b5 1008 const int ihl = ip_hdrlen(pkt);
c316c629 1009 struct sock *mroute_sk;
1da177e4
LT
1010 struct igmphdr *igmp;
1011 struct igmpmsg *msg;
c316c629 1012 struct sk_buff *skb;
1da177e4
LT
1013 int ret;
1014
1da177e4
LT
1015 if (assert == IGMPMSG_WHOLEPKT)
1016 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
1017 else
1da177e4
LT
1018 skb = alloc_skb(128, GFP_ATOMIC);
1019
132adf54 1020 if (!skb)
1da177e4
LT
1021 return -ENOBUFS;
1022
1da177e4
LT
1023 if (assert == IGMPMSG_WHOLEPKT) {
1024 /* Ugly, but we have no choice with this interface.
a8cb16dd
ED
1025 * Duplicate old header, fix ihl, length etc.
1026 * And all this only to mangle msg->im_msgtype and
1027 * to set msg->im_mbz to "mbz" :-)
1da177e4 1028 */
878c8145
ACM
1029 skb_push(skb, sizeof(struct iphdr));
1030 skb_reset_network_header(skb);
badff6d0 1031 skb_reset_transport_header(skb);
0272ffc4 1032 msg = (struct igmpmsg *)skb_network_header(skb);
d56f90a7 1033 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
1da177e4
LT
1034 msg->im_msgtype = IGMPMSG_WHOLEPKT;
1035 msg->im_mbz = 0;
0c12295a 1036 msg->im_vif = mrt->mroute_reg_vif_num;
eddc9ec5
ACM
1037 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
1038 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
1039 sizeof(struct iphdr));
c316c629
NA
1040 } else {
1041 /* Copy the IP header */
1042 skb_set_network_header(skb, skb->len);
1043 skb_put(skb, ihl);
1044 skb_copy_to_linear_data(skb, pkt->data, ihl);
1045 /* Flag to the kernel this is a route add */
1046 ip_hdr(skb)->protocol = 0;
1047 msg = (struct igmpmsg *)skb_network_header(skb);
1048 msg->im_vif = vifi;
1049 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1050 /* Add our header */
4df864c1 1051 igmp = skb_put(skb, sizeof(struct igmphdr));
c316c629
NA
1052 igmp->type = assert;
1053 msg->im_msgtype = assert;
1054 igmp->code = 0;
1055 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1056 skb->transport_header = skb->network_header;
e905a9ed 1057 }
1da177e4 1058
4c968709
ED
1059 rcu_read_lock();
1060 mroute_sk = rcu_dereference(mrt->mroute_sk);
51456b29 1061 if (!mroute_sk) {
4c968709 1062 rcu_read_unlock();
1da177e4
LT
1063 kfree_skb(skb);
1064 return -EINVAL;
1065 }
1066
5a645dd8
JG
1067 igmpmsg_netlink_event(mrt, skb);
1068
a8cb16dd 1069 /* Deliver to mrouted */
4c968709
ED
1070 ret = sock_queue_rcv_skb(mroute_sk, skb);
1071 rcu_read_unlock();
70a269e6 1072 if (ret < 0) {
e87cc472 1073 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1da177e4
LT
1074 kfree_skb(skb);
1075 }
1076
1077 return ret;
1078}
1079
7ef8f65d
NA
1080/* Queue a packet for resolution. It gets locked cache entry! */
1081static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
4b1f0d33 1082 struct sk_buff *skb, struct net_device *dev)
1da177e4 1083{
8fb472c0
NA
1084 const struct iphdr *iph = ip_hdr(skb);
1085 struct mfc_cache *c;
862465f2 1086 bool found = false;
1da177e4 1087 int err;
1da177e4
LT
1088
1089 spin_lock_bh(&mfc_unres_lock);
0c12295a 1090 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
e258beb2 1091 if (c->mfc_mcastgrp == iph->daddr &&
862465f2
PM
1092 c->mfc_origin == iph->saddr) {
1093 found = true;
1da177e4 1094 break;
862465f2 1095 }
1da177e4
LT
1096 }
1097
862465f2 1098 if (!found) {
a8cb16dd 1099 /* Create a new entry if allowable */
0c12295a 1100 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
d658f8a0 1101 (c = ipmr_cache_alloc_unres()) == NULL) {
1da177e4
LT
1102 spin_unlock_bh(&mfc_unres_lock);
1103
1104 kfree_skb(skb);
1105 return -ENOBUFS;
1106 }
1107
a8cb16dd 1108 /* Fill in the new cache entry */
eddc9ec5
ACM
1109 c->mfc_parent = -1;
1110 c->mfc_origin = iph->saddr;
1111 c->mfc_mcastgrp = iph->daddr;
1da177e4 1112
a8cb16dd 1113 /* Reflect first query at mrouted. */
0c12295a 1114 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
4feb88e5 1115 if (err < 0) {
e905a9ed 1116 /* If the report failed throw the cache entry
1da177e4
LT
1117 out - Brad Parker
1118 */
1119 spin_unlock_bh(&mfc_unres_lock);
1120
5c0a66f5 1121 ipmr_cache_free(c);
1da177e4
LT
1122 kfree_skb(skb);
1123 return err;
1124 }
1125
0c12295a
PM
1126 atomic_inc(&mrt->cache_resolve_queue_len);
1127 list_add(&c->list, &mrt->mfc_unres_queue);
8cd3ac9f 1128 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4 1129
278554bd
DM
1130 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1131 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1da177e4
LT
1132 }
1133
a8cb16dd 1134 /* See if we can append the packet */
a8cb16dd 1135 if (c->mfc_un.unres.unresolved.qlen > 3) {
1da177e4
LT
1136 kfree_skb(skb);
1137 err = -ENOBUFS;
1138 } else {
4b1f0d33
DS
1139 if (dev) {
1140 skb->dev = dev;
1141 skb->skb_iif = dev->ifindex;
1142 }
c354e124 1143 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1da177e4
LT
1144 err = 0;
1145 }
1146
1147 spin_unlock_bh(&mfc_unres_lock);
1148 return err;
1149}
1150
7ef8f65d 1151/* MFC cache manipulation by user space mroute daemon */
1da177e4 1152
660b26dc 1153static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1da177e4 1154{
8fb472c0 1155 struct mfc_cache *c;
1da177e4 1156
8fb472c0
NA
1157 /* The entries are added/deleted only under RTNL */
1158 rcu_read_lock();
1159 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1160 mfc->mfcc_mcastgrp.s_addr, parent);
1161 rcu_read_unlock();
1162 if (!c)
1163 return -ENOENT;
1164 rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1165 list_del_rcu(&c->list);
1166 mroute_netlink_event(mrt, c, RTM_DELROUTE);
310ebbba 1167 ipmr_cache_put(c);
1da177e4 1168
8fb472c0 1169 return 0;
1da177e4
LT
1170}
1171
0c12295a 1172static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
660b26dc 1173 struct mfcctl *mfc, int mrtsock, int parent)
1da177e4 1174{
862465f2 1175 struct mfc_cache *uc, *c;
8fb472c0
NA
1176 bool found;
1177 int ret;
1da177e4 1178
a50436f2
PM
1179 if (mfc->mfcc_parent >= MAXVIFS)
1180 return -ENFILE;
1181
8fb472c0
NA
1182 /* The entries are added/deleted only under RTNL */
1183 rcu_read_lock();
1184 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1185 mfc->mfcc_mcastgrp.s_addr, parent);
1186 rcu_read_unlock();
1187 if (c) {
1da177e4
LT
1188 write_lock_bh(&mrt_lock);
1189 c->mfc_parent = mfc->mfcc_parent;
0c12295a 1190 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1da177e4
LT
1191 if (!mrtsock)
1192 c->mfc_flags |= MFC_STATIC;
1193 write_unlock_bh(&mrt_lock);
8cd3ac9f 1194 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4
LT
1195 return 0;
1196 }
1197
360eb5da 1198 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
660b26dc 1199 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1da177e4
LT
1200 return -EINVAL;
1201
d658f8a0 1202 c = ipmr_cache_alloc();
51456b29 1203 if (!c)
1da177e4
LT
1204 return -ENOMEM;
1205
c354e124
JK
1206 c->mfc_origin = mfc->mfcc_origin.s_addr;
1207 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1208 c->mfc_parent = mfc->mfcc_parent;
0c12295a 1209 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1da177e4
LT
1210 if (!mrtsock)
1211 c->mfc_flags |= MFC_STATIC;
1212
8fb472c0
NA
1213 ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->mnode,
1214 ipmr_rht_params);
1215 if (ret) {
1216 pr_err("ipmr: rhtable insert error %d\n", ret);
1217 ipmr_cache_free(c);
1218 return ret;
1219 }
1220 list_add_tail_rcu(&c->list, &mrt->mfc_cache_list);
7ef8f65d
NA
1221 /* Check to see if we resolved a queued list. If so we
1222 * need to send on the frames and tidy up.
1da177e4 1223 */
b0ebb739 1224 found = false;
1da177e4 1225 spin_lock_bh(&mfc_unres_lock);
0c12295a 1226 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
e258beb2 1227 if (uc->mfc_origin == c->mfc_origin &&
1da177e4 1228 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
862465f2 1229 list_del(&uc->list);
0c12295a 1230 atomic_dec(&mrt->cache_resolve_queue_len);
b0ebb739 1231 found = true;
1da177e4
LT
1232 break;
1233 }
1234 }
0c12295a
PM
1235 if (list_empty(&mrt->mfc_unres_queue))
1236 del_timer(&mrt->ipmr_expire_timer);
1da177e4
LT
1237 spin_unlock_bh(&mfc_unres_lock);
1238
b0ebb739 1239 if (found) {
0c12295a 1240 ipmr_cache_resolve(net, mrt, uc, c);
5c0a66f5 1241 ipmr_cache_free(uc);
1da177e4 1242 }
8cd3ac9f 1243 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4
LT
1244 return 0;
1245}
1246
7ef8f65d 1247/* Close the multicast socket, and clear the vif tables etc */
0e615e96 1248static void mroute_clean_tables(struct mr_table *mrt, bool all)
1da177e4 1249{
8fb472c0 1250 struct mfc_cache *c, *tmp;
d17fa6fa 1251 LIST_HEAD(list);
8fb472c0 1252 int i;
e905a9ed 1253
a8cb16dd 1254 /* Shut down all active vif entries */
0c12295a 1255 for (i = 0; i < mrt->maxvif; i++) {
0e615e96
NA
1256 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1257 continue;
1258 vif_delete(mrt, i, 0, &list);
1da177e4 1259 }
d17fa6fa 1260 unregister_netdevice_many(&list);
1da177e4 1261
a8cb16dd 1262 /* Wipe the cache */
8fb472c0
NA
1263 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1264 if (!all && (c->mfc_flags & MFC_STATIC))
1265 continue;
1266 rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1267 list_del_rcu(&c->list);
1268 mroute_netlink_event(mrt, c, RTM_DELROUTE);
310ebbba 1269 ipmr_cache_put(c);
1da177e4
LT
1270 }
1271
0c12295a 1272 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1da177e4 1273 spin_lock_bh(&mfc_unres_lock);
8fb472c0 1274 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
862465f2 1275 list_del(&c->list);
8cd3ac9f 1276 mroute_netlink_event(mrt, c, RTM_DELROUTE);
0c12295a 1277 ipmr_destroy_unres(mrt, c);
1da177e4
LT
1278 }
1279 spin_unlock_bh(&mfc_unres_lock);
1280 }
1281}
1282
4c968709
ED
1283/* called from ip_ra_control(), before an RCU grace period,
1284 * we dont need to call synchronize_rcu() here
1285 */
1da177e4
LT
1286static void mrtsock_destruct(struct sock *sk)
1287{
4feb88e5 1288 struct net *net = sock_net(sk);
f0ad0860 1289 struct mr_table *mrt;
4feb88e5 1290
1215e51e 1291 ASSERT_RTNL();
f0ad0860 1292 ipmr_for_each_table(mrt, net) {
4c968709 1293 if (sk == rtnl_dereference(mrt->mroute_sk)) {
f0ad0860 1294 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
3b022865
DA
1295 inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1296 NETCONFA_MC_FORWARDING,
d67b8c61
ND
1297 NETCONFA_IFINDEX_ALL,
1298 net->ipv4.devconf_all);
a9b3cd7f 1299 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
0e615e96 1300 mroute_clean_tables(mrt, false);
f0ad0860 1301 }
1da177e4 1302 }
1da177e4
LT
1303}
1304
7ef8f65d
NA
1305/* Socket options and virtual interface manipulation. The whole
1306 * virtual interface system is a complete heap, but unfortunately
1307 * that's how BSD mrouted happens to think. Maybe one day with a proper
1308 * MOSPF/PIM router set up we can clean this up.
1da177e4 1309 */
e905a9ed 1310
29e97d21
NA
1311int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1312 unsigned int optlen)
1da177e4 1313{
4feb88e5 1314 struct net *net = sock_net(sk);
29e97d21 1315 int val, ret = 0, parent = 0;
f0ad0860 1316 struct mr_table *mrt;
29e97d21
NA
1317 struct vifctl vif;
1318 struct mfcctl mfc;
1319 u32 uval;
f0ad0860 1320
29e97d21
NA
1321 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1322 rtnl_lock();
5e1859fb 1323 if (sk->sk_type != SOCK_RAW ||
29e97d21
NA
1324 inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1325 ret = -EOPNOTSUPP;
1326 goto out_unlock;
1327 }
5e1859fb 1328
f0ad0860 1329 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
29e97d21
NA
1330 if (!mrt) {
1331 ret = -ENOENT;
1332 goto out_unlock;
1333 }
132adf54 1334 if (optname != MRT_INIT) {
33d480ce 1335 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
29e97d21
NA
1336 !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1337 ret = -EACCES;
1338 goto out_unlock;
1339 }
1da177e4
LT
1340 }
1341
132adf54
SH
1342 switch (optname) {
1343 case MRT_INIT:
42e6b89c 1344 if (optlen != sizeof(int)) {
29e97d21 1345 ret = -EINVAL;
42e6b89c
NA
1346 break;
1347 }
1348 if (rtnl_dereference(mrt->mroute_sk)) {
29e97d21 1349 ret = -EADDRINUSE;
29e97d21 1350 break;
42e6b89c 1351 }
132adf54
SH
1352
1353 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1354 if (ret == 0) {
cf778b00 1355 rcu_assign_pointer(mrt->mroute_sk, sk);
4feb88e5 1356 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
3b022865
DA
1357 inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1358 NETCONFA_MC_FORWARDING,
d67b8c61
ND
1359 NETCONFA_IFINDEX_ALL,
1360 net->ipv4.devconf_all);
132adf54 1361 }
29e97d21 1362 break;
132adf54 1363 case MRT_DONE:
29e97d21
NA
1364 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1365 ret = -EACCES;
1366 } else {
29e97d21 1367 ret = ip_ra_control(sk, 0, NULL);
1215e51e 1368 goto out_unlock;
29e97d21
NA
1369 }
1370 break;
132adf54
SH
1371 case MRT_ADD_VIF:
1372 case MRT_DEL_VIF:
29e97d21
NA
1373 if (optlen != sizeof(vif)) {
1374 ret = -EINVAL;
1375 break;
1376 }
1377 if (copy_from_user(&vif, optval, sizeof(vif))) {
1378 ret = -EFAULT;
1379 break;
1380 }
1381 if (vif.vifc_vifi >= MAXVIFS) {
1382 ret = -ENFILE;
1383 break;
1384 }
c354e124 1385 if (optname == MRT_ADD_VIF) {
4c968709
ED
1386 ret = vif_add(net, mrt, &vif,
1387 sk == rtnl_dereference(mrt->mroute_sk));
132adf54 1388 } else {
0c12295a 1389 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
132adf54 1390 }
29e97d21 1391 break;
7ef8f65d
NA
1392 /* Manipulate the forwarding caches. These live
1393 * in a sort of kernel/user symbiosis.
1394 */
132adf54
SH
1395 case MRT_ADD_MFC:
1396 case MRT_DEL_MFC:
660b26dc
ND
1397 parent = -1;
1398 case MRT_ADD_MFC_PROXY:
1399 case MRT_DEL_MFC_PROXY:
29e97d21
NA
1400 if (optlen != sizeof(mfc)) {
1401 ret = -EINVAL;
1402 break;
1403 }
1404 if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1405 ret = -EFAULT;
1406 break;
1407 }
660b26dc
ND
1408 if (parent == 0)
1409 parent = mfc.mfcc_parent;
660b26dc
ND
1410 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1411 ret = ipmr_mfc_delete(mrt, &mfc, parent);
132adf54 1412 else
4c968709 1413 ret = ipmr_mfc_add(net, mrt, &mfc,
660b26dc
ND
1414 sk == rtnl_dereference(mrt->mroute_sk),
1415 parent);
29e97d21 1416 break;
7ef8f65d 1417 /* Control PIM assert. */
132adf54 1418 case MRT_ASSERT:
29e97d21
NA
1419 if (optlen != sizeof(val)) {
1420 ret = -EINVAL;
1421 break;
1422 }
1423 if (get_user(val, (int __user *)optval)) {
1424 ret = -EFAULT;
1425 break;
1426 }
1427 mrt->mroute_do_assert = val;
1428 break;
132adf54 1429 case MRT_PIM:
1973a4ea 1430 if (!ipmr_pimsm_enabled()) {
29e97d21
NA
1431 ret = -ENOPROTOOPT;
1432 break;
1433 }
1434 if (optlen != sizeof(val)) {
1435 ret = -EINVAL;
1436 break;
1437 }
1438 if (get_user(val, (int __user *)optval)) {
1439 ret = -EFAULT;
1440 break;
1441 }
ba93ef74 1442
29e97d21
NA
1443 val = !!val;
1444 if (val != mrt->mroute_do_pim) {
1445 mrt->mroute_do_pim = val;
1446 mrt->mroute_do_assert = val;
1da177e4 1447 }
29e97d21 1448 break;
f0ad0860 1449 case MRT_TABLE:
29e97d21
NA
1450 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1451 ret = -ENOPROTOOPT;
1452 break;
1453 }
1454 if (optlen != sizeof(uval)) {
1455 ret = -EINVAL;
1456 break;
1457 }
1458 if (get_user(uval, (u32 __user *)optval)) {
1459 ret = -EFAULT;
1460 break;
1461 }
f0ad0860 1462
4c968709
ED
1463 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1464 ret = -EBUSY;
1465 } else {
29e97d21 1466 mrt = ipmr_new_table(net, uval);
1113ebbc
NA
1467 if (IS_ERR(mrt))
1468 ret = PTR_ERR(mrt);
5e1859fb 1469 else
29e97d21 1470 raw_sk(sk)->ipmr_table = uval;
4c968709 1471 }
29e97d21 1472 break;
7ef8f65d 1473 /* Spurious command, or MRT_VERSION which you cannot set. */
132adf54 1474 default:
29e97d21 1475 ret = -ENOPROTOOPT;
1da177e4 1476 }
29e97d21
NA
1477out_unlock:
1478 rtnl_unlock();
29e97d21 1479 return ret;
1da177e4
LT
1480}
1481
7ef8f65d 1482/* Getsock opt support for the multicast routing system. */
c354e124 1483int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1da177e4
LT
1484{
1485 int olr;
1486 int val;
4feb88e5 1487 struct net *net = sock_net(sk);
f0ad0860
PM
1488 struct mr_table *mrt;
1489
5e1859fb
ED
1490 if (sk->sk_type != SOCK_RAW ||
1491 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1492 return -EOPNOTSUPP;
1493
f0ad0860 1494 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1495 if (!mrt)
f0ad0860 1496 return -ENOENT;
1da177e4 1497
fe9ef3ce
NA
1498 switch (optname) {
1499 case MRT_VERSION:
1500 val = 0x0305;
1501 break;
1502 case MRT_PIM:
1973a4ea 1503 if (!ipmr_pimsm_enabled())
fe9ef3ce
NA
1504 return -ENOPROTOOPT;
1505 val = mrt->mroute_do_pim;
1506 break;
1507 case MRT_ASSERT:
1508 val = mrt->mroute_do_assert;
1509 break;
1510 default:
1da177e4 1511 return -ENOPROTOOPT;
fe9ef3ce 1512 }
1da177e4
LT
1513
1514 if (get_user(olr, optlen))
1515 return -EFAULT;
1da177e4
LT
1516 olr = min_t(unsigned int, olr, sizeof(int));
1517 if (olr < 0)
1518 return -EINVAL;
c354e124 1519 if (put_user(olr, optlen))
1da177e4 1520 return -EFAULT;
c354e124 1521 if (copy_to_user(optval, &val, olr))
1da177e4
LT
1522 return -EFAULT;
1523 return 0;
1524}
1525
7ef8f65d 1526/* The IP multicast ioctl support routines. */
1da177e4
LT
1527int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1528{
1529 struct sioc_sg_req sr;
1530 struct sioc_vif_req vr;
1531 struct vif_device *vif;
1532 struct mfc_cache *c;
4feb88e5 1533 struct net *net = sock_net(sk);
f0ad0860
PM
1534 struct mr_table *mrt;
1535
1536 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1537 if (!mrt)
f0ad0860 1538 return -ENOENT;
e905a9ed 1539
132adf54
SH
1540 switch (cmd) {
1541 case SIOCGETVIFCNT:
c354e124 1542 if (copy_from_user(&vr, arg, sizeof(vr)))
132adf54 1543 return -EFAULT;
0c12295a 1544 if (vr.vifi >= mrt->maxvif)
132adf54
SH
1545 return -EINVAL;
1546 read_lock(&mrt_lock);
0c12295a
PM
1547 vif = &mrt->vif_table[vr.vifi];
1548 if (VIF_EXISTS(mrt, vr.vifi)) {
c354e124
JK
1549 vr.icount = vif->pkt_in;
1550 vr.ocount = vif->pkt_out;
1551 vr.ibytes = vif->bytes_in;
1552 vr.obytes = vif->bytes_out;
1da177e4 1553 read_unlock(&mrt_lock);
1da177e4 1554
c354e124 1555 if (copy_to_user(arg, &vr, sizeof(vr)))
132adf54
SH
1556 return -EFAULT;
1557 return 0;
1558 }
1559 read_unlock(&mrt_lock);
1560 return -EADDRNOTAVAIL;
1561 case SIOCGETSGCNT:
c354e124 1562 if (copy_from_user(&sr, arg, sizeof(sr)))
132adf54
SH
1563 return -EFAULT;
1564
a8c9486b 1565 rcu_read_lock();
0c12295a 1566 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
132adf54
SH
1567 if (c) {
1568 sr.pktcnt = c->mfc_un.res.pkt;
1569 sr.bytecnt = c->mfc_un.res.bytes;
1570 sr.wrong_if = c->mfc_un.res.wrong_if;
a8c9486b 1571 rcu_read_unlock();
132adf54 1572
c354e124 1573 if (copy_to_user(arg, &sr, sizeof(sr)))
132adf54
SH
1574 return -EFAULT;
1575 return 0;
1576 }
a8c9486b 1577 rcu_read_unlock();
132adf54
SH
1578 return -EADDRNOTAVAIL;
1579 default:
1580 return -ENOIOCTLCMD;
1da177e4
LT
1581 }
1582}
1583
709b46e8
EB
1584#ifdef CONFIG_COMPAT
1585struct compat_sioc_sg_req {
1586 struct in_addr src;
1587 struct in_addr grp;
1588 compat_ulong_t pktcnt;
1589 compat_ulong_t bytecnt;
1590 compat_ulong_t wrong_if;
1591};
1592
ca6b8bb0
DM
1593struct compat_sioc_vif_req {
1594 vifi_t vifi; /* Which iface */
1595 compat_ulong_t icount;
1596 compat_ulong_t ocount;
1597 compat_ulong_t ibytes;
1598 compat_ulong_t obytes;
1599};
1600
709b46e8
EB
1601int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1602{
0033d5ad 1603 struct compat_sioc_sg_req sr;
ca6b8bb0
DM
1604 struct compat_sioc_vif_req vr;
1605 struct vif_device *vif;
709b46e8
EB
1606 struct mfc_cache *c;
1607 struct net *net = sock_net(sk);
1608 struct mr_table *mrt;
1609
1610 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1611 if (!mrt)
709b46e8
EB
1612 return -ENOENT;
1613
1614 switch (cmd) {
ca6b8bb0
DM
1615 case SIOCGETVIFCNT:
1616 if (copy_from_user(&vr, arg, sizeof(vr)))
1617 return -EFAULT;
1618 if (vr.vifi >= mrt->maxvif)
1619 return -EINVAL;
1620 read_lock(&mrt_lock);
1621 vif = &mrt->vif_table[vr.vifi];
1622 if (VIF_EXISTS(mrt, vr.vifi)) {
1623 vr.icount = vif->pkt_in;
1624 vr.ocount = vif->pkt_out;
1625 vr.ibytes = vif->bytes_in;
1626 vr.obytes = vif->bytes_out;
1627 read_unlock(&mrt_lock);
1628
1629 if (copy_to_user(arg, &vr, sizeof(vr)))
1630 return -EFAULT;
1631 return 0;
1632 }
1633 read_unlock(&mrt_lock);
1634 return -EADDRNOTAVAIL;
709b46e8
EB
1635 case SIOCGETSGCNT:
1636 if (copy_from_user(&sr, arg, sizeof(sr)))
1637 return -EFAULT;
1638
1639 rcu_read_lock();
1640 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1641 if (c) {
1642 sr.pktcnt = c->mfc_un.res.pkt;
1643 sr.bytecnt = c->mfc_un.res.bytes;
1644 sr.wrong_if = c->mfc_un.res.wrong_if;
1645 rcu_read_unlock();
1646
1647 if (copy_to_user(arg, &sr, sizeof(sr)))
1648 return -EFAULT;
1649 return 0;
1650 }
1651 rcu_read_unlock();
1652 return -EADDRNOTAVAIL;
1653 default:
1654 return -ENOIOCTLCMD;
1655 }
1656}
1657#endif
1658
1da177e4
LT
1659static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1660{
351638e7 1661 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4feb88e5 1662 struct net *net = dev_net(dev);
f0ad0860 1663 struct mr_table *mrt;
1da177e4
LT
1664 struct vif_device *v;
1665 int ct;
e9dc8653 1666
1da177e4
LT
1667 if (event != NETDEV_UNREGISTER)
1668 return NOTIFY_DONE;
f0ad0860
PM
1669
1670 ipmr_for_each_table(mrt, net) {
1671 v = &mrt->vif_table[0];
1672 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1673 if (v->dev == dev)
e92036a6 1674 vif_delete(mrt, ct, 1, NULL);
f0ad0860 1675 }
1da177e4
LT
1676 }
1677 return NOTIFY_DONE;
1678}
1679
c354e124 1680static struct notifier_block ip_mr_notifier = {
1da177e4
LT
1681 .notifier_call = ipmr_device_event,
1682};
1683
7ef8f65d
NA
1684/* Encapsulate a packet by attaching a valid IPIP header to it.
1685 * This avoids tunnel drivers and other mess and gives us the speed so
1686 * important for multicast video.
1da177e4 1687 */
b6a7719a
HFS
1688static void ip_encap(struct net *net, struct sk_buff *skb,
1689 __be32 saddr, __be32 daddr)
1da177e4 1690{
8856dfa3 1691 struct iphdr *iph;
b71d1d42 1692 const struct iphdr *old_iph = ip_hdr(skb);
8856dfa3
ACM
1693
1694 skb_push(skb, sizeof(struct iphdr));
b0e380b1 1695 skb->transport_header = skb->network_header;
8856dfa3 1696 skb_reset_network_header(skb);
eddc9ec5 1697 iph = ip_hdr(skb);
1da177e4 1698
a8cb16dd 1699 iph->version = 4;
e023dd64
ACM
1700 iph->tos = old_iph->tos;
1701 iph->ttl = old_iph->ttl;
1da177e4
LT
1702 iph->frag_off = 0;
1703 iph->daddr = daddr;
1704 iph->saddr = saddr;
1705 iph->protocol = IPPROTO_IPIP;
1706 iph->ihl = 5;
1707 iph->tot_len = htons(skb->len);
b6a7719a 1708 ip_select_ident(net, skb, NULL);
1da177e4
LT
1709 ip_send_check(iph);
1710
1da177e4
LT
1711 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1712 nf_reset(skb);
1713}
1714
0c4b51f0
EB
1715static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1716 struct sk_buff *skb)
1da177e4 1717{
a8cb16dd 1718 struct ip_options *opt = &(IPCB(skb)->opt);
1da177e4 1719
73186df8
DM
1720 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1721 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1da177e4
LT
1722
1723 if (unlikely(opt->optlen))
1724 ip_forward_options(skb);
1725
13206b6b 1726 return dst_output(net, sk, skb);
1da177e4
LT
1727}
1728
7ef8f65d 1729/* Processing handlers for ipmr_forward */
1da177e4 1730
0c12295a
PM
1731static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1732 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1da177e4 1733{
eddc9ec5 1734 const struct iphdr *iph = ip_hdr(skb);
0c12295a 1735 struct vif_device *vif = &mrt->vif_table[vifi];
1da177e4
LT
1736 struct net_device *dev;
1737 struct rtable *rt;
31e4543d 1738 struct flowi4 fl4;
1da177e4
LT
1739 int encap = 0;
1740
51456b29 1741 if (!vif->dev)
1da177e4
LT
1742 goto out_free;
1743
1da177e4
LT
1744 if (vif->flags & VIFF_REGISTER) {
1745 vif->pkt_out++;
c354e124 1746 vif->bytes_out += skb->len;
cf3677ae
PE
1747 vif->dev->stats.tx_bytes += skb->len;
1748 vif->dev->stats.tx_packets++;
0c12295a 1749 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
69ebbf58 1750 goto out_free;
1da177e4 1751 }
1da177e4 1752
a8cb16dd 1753 if (vif->flags & VIFF_TUNNEL) {
31e4543d 1754 rt = ip_route_output_ports(net, &fl4, NULL,
78fbfd8a
DM
1755 vif->remote, vif->local,
1756 0, 0,
1757 IPPROTO_IPIP,
1758 RT_TOS(iph->tos), vif->link);
b23dd4fe 1759 if (IS_ERR(rt))
1da177e4
LT
1760 goto out_free;
1761 encap = sizeof(struct iphdr);
1762 } else {
31e4543d 1763 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
78fbfd8a
DM
1764 0, 0,
1765 IPPROTO_IPIP,
1766 RT_TOS(iph->tos), vif->link);
b23dd4fe 1767 if (IS_ERR(rt))
1da177e4
LT
1768 goto out_free;
1769 }
1770
d8d1f30b 1771 dev = rt->dst.dev;
1da177e4 1772
d8d1f30b 1773 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1da177e4 1774 /* Do not fragment multicasts. Alas, IPv4 does not
a8cb16dd
ED
1775 * allow to send ICMP, so that packets will disappear
1776 * to blackhole.
1da177e4 1777 */
73186df8 1778 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
1779 ip_rt_put(rt);
1780 goto out_free;
1781 }
1782
d8d1f30b 1783 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1da177e4
LT
1784
1785 if (skb_cow(skb, encap)) {
e905a9ed 1786 ip_rt_put(rt);
1da177e4
LT
1787 goto out_free;
1788 }
1789
1790 vif->pkt_out++;
c354e124 1791 vif->bytes_out += skb->len;
1da177e4 1792
adf30907 1793 skb_dst_drop(skb);
d8d1f30b 1794 skb_dst_set(skb, &rt->dst);
eddc9ec5 1795 ip_decrease_ttl(ip_hdr(skb));
1da177e4
LT
1796
1797 /* FIXME: forward and output firewalls used to be called here.
a8cb16dd
ED
1798 * What do we do with netfilter? -- RR
1799 */
1da177e4 1800 if (vif->flags & VIFF_TUNNEL) {
b6a7719a 1801 ip_encap(net, skb, vif->local, vif->remote);
1da177e4 1802 /* FIXME: extra output firewall step used to be here. --RR */
2f4c02d4
PE
1803 vif->dev->stats.tx_packets++;
1804 vif->dev->stats.tx_bytes += skb->len;
1da177e4
LT
1805 }
1806
9ee6c5dc 1807 IPCB(skb)->flags |= IPSKB_FORWARDED;
1da177e4 1808
7ef8f65d 1809 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1da177e4
LT
1810 * not only before forwarding, but after forwarding on all output
1811 * interfaces. It is clear, if mrouter runs a multicasting
1812 * program, it should receive packets not depending to what interface
1813 * program is joined.
1814 * If we will not make it, the program will have to join on all
1815 * interfaces. On the other hand, multihoming host (or router, but
1816 * not mrouter) cannot join to more than one interface - it will
1817 * result in receiving multiple packets.
1818 */
29a26a56
EB
1819 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1820 net, NULL, skb, skb->dev, dev,
1da177e4
LT
1821 ipmr_forward_finish);
1822 return;
1823
1824out_free:
1825 kfree_skb(skb);
1da177e4
LT
1826}
1827
0c12295a 1828static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1da177e4
LT
1829{
1830 int ct;
0c12295a
PM
1831
1832 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1833 if (mrt->vif_table[ct].dev == dev)
1da177e4
LT
1834 break;
1835 }
1836 return ct;
1837}
1838
1839/* "local" means that we should preserve one skb (for local delivery) */
c4854ec8 1840static void ip_mr_forward(struct net *net, struct mr_table *mrt,
4b1f0d33
DS
1841 struct net_device *dev, struct sk_buff *skb,
1842 struct mfc_cache *cache, int local)
1da177e4 1843{
4b1f0d33 1844 int true_vifi = ipmr_find_vif(mrt, dev);
1da177e4
LT
1845 int psend = -1;
1846 int vif, ct;
1847
1848 vif = cache->mfc_parent;
1849 cache->mfc_un.res.pkt++;
1850 cache->mfc_un.res.bytes += skb->len;
43b9e127 1851 cache->mfc_un.res.lastuse = jiffies;
1da177e4 1852
360eb5da 1853 if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
660b26dc
ND
1854 struct mfc_cache *cache_proxy;
1855
1856 /* For an (*,G) entry, we only check that the incomming
1857 * interface is part of the static tree.
1858 */
1859 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1860 if (cache_proxy &&
1861 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1862 goto forward;
1863 }
1864
7ef8f65d 1865 /* Wrong interface: drop packet and (maybe) send PIM assert. */
4b1f0d33 1866 if (mrt->vif_table[vif].dev != dev) {
c7537967 1867 if (rt_is_output_route(skb_rtable(skb))) {
1da177e4 1868 /* It is our own packet, looped back.
a8cb16dd
ED
1869 * Very complicated situation...
1870 *
1871 * The best workaround until routing daemons will be
1872 * fixed is not to redistribute packet, if it was
1873 * send through wrong interface. It means, that
1874 * multicast applications WILL NOT work for
1875 * (S,G), which have default multicast route pointing
1876 * to wrong oif. In any case, it is not a good
1877 * idea to use multicasting applications on router.
1da177e4
LT
1878 */
1879 goto dont_forward;
1880 }
1881
1882 cache->mfc_un.res.wrong_if++;
1da177e4 1883
0c12295a 1884 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1da177e4 1885 /* pimsm uses asserts, when switching from RPT to SPT,
a8cb16dd
ED
1886 * so that we cannot check that packet arrived on an oif.
1887 * It is bad, but otherwise we would need to move pretty
1888 * large chunk of pimd to kernel. Ough... --ANK
1da177e4 1889 */
0c12295a 1890 (mrt->mroute_do_pim ||
6f9374a9 1891 cache->mfc_un.res.ttls[true_vifi] < 255) &&
e905a9ed 1892 time_after(jiffies,
1da177e4
LT
1893 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1894 cache->mfc_un.res.last_assert = jiffies;
0c12295a 1895 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1da177e4
LT
1896 }
1897 goto dont_forward;
1898 }
1899
660b26dc 1900forward:
0c12295a
PM
1901 mrt->vif_table[vif].pkt_in++;
1902 mrt->vif_table[vif].bytes_in += skb->len;
1da177e4 1903
7ef8f65d 1904 /* Forward the frame */
360eb5da
ND
1905 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1906 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
660b26dc
ND
1907 if (true_vifi >= 0 &&
1908 true_vifi != cache->mfc_parent &&
1909 ip_hdr(skb)->ttl >
1910 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1911 /* It's an (*,*) entry and the packet is not coming from
1912 * the upstream: forward the packet to the upstream
1913 * only.
1914 */
1915 psend = cache->mfc_parent;
1916 goto last_forward;
1917 }
1918 goto dont_forward;
1919 }
a8cb16dd
ED
1920 for (ct = cache->mfc_un.res.maxvif - 1;
1921 ct >= cache->mfc_un.res.minvif; ct--) {
660b26dc 1922 /* For (*,G) entry, don't forward to the incoming interface */
360eb5da
ND
1923 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1924 ct != true_vifi) &&
660b26dc 1925 ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1da177e4
LT
1926 if (psend != -1) {
1927 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
a8cb16dd 1928
1da177e4 1929 if (skb2)
0c12295a
PM
1930 ipmr_queue_xmit(net, mrt, skb2, cache,
1931 psend);
1da177e4 1932 }
c354e124 1933 psend = ct;
1da177e4
LT
1934 }
1935 }
660b26dc 1936last_forward:
1da177e4
LT
1937 if (psend != -1) {
1938 if (local) {
1939 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
a8cb16dd 1940
1da177e4 1941 if (skb2)
0c12295a 1942 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1da177e4 1943 } else {
0c12295a 1944 ipmr_queue_xmit(net, mrt, skb, cache, psend);
c4854ec8 1945 return;
1da177e4
LT
1946 }
1947 }
1948
1949dont_forward:
1950 if (!local)
1951 kfree_skb(skb);
1da177e4
LT
1952}
1953
417da66f 1954static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
ee3f1aaf 1955{
417da66f
DM
1956 struct rtable *rt = skb_rtable(skb);
1957 struct iphdr *iph = ip_hdr(skb);
da91981b 1958 struct flowi4 fl4 = {
417da66f
DM
1959 .daddr = iph->daddr,
1960 .saddr = iph->saddr,
b0fe4a31 1961 .flowi4_tos = RT_TOS(iph->tos),
4fd551d7
DM
1962 .flowi4_oif = (rt_is_output_route(rt) ?
1963 skb->dev->ifindex : 0),
1964 .flowi4_iif = (rt_is_output_route(rt) ?
1fb9489b 1965 LOOPBACK_IFINDEX :
4fd551d7 1966 skb->dev->ifindex),
b4869889 1967 .flowi4_mark = skb->mark,
ee3f1aaf
DM
1968 };
1969 struct mr_table *mrt;
1970 int err;
1971
da91981b 1972 err = ipmr_fib_lookup(net, &fl4, &mrt);
ee3f1aaf
DM
1973 if (err)
1974 return ERR_PTR(err);
1975 return mrt;
1976}
1da177e4 1977
7ef8f65d
NA
1978/* Multicast packets for forwarding arrive here
1979 * Called with rcu_read_lock();
1da177e4 1980 */
1da177e4
LT
1981int ip_mr_input(struct sk_buff *skb)
1982{
1983 struct mfc_cache *cache;
4feb88e5 1984 struct net *net = dev_net(skb->dev);
511c3f92 1985 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
f0ad0860 1986 struct mr_table *mrt;
bcfc7d33
TW
1987 struct net_device *dev;
1988
1989 /* skb->dev passed in is the loX master dev for vrfs.
1990 * As there are no vifs associated with loopback devices,
1991 * get the proper interface that does have a vif associated with it.
1992 */
1993 dev = skb->dev;
1994 if (netif_is_l3_master(skb->dev)) {
1995 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
1996 if (!dev) {
1997 kfree_skb(skb);
1998 return -ENODEV;
1999 }
2000 }
1da177e4
LT
2001
2002 /* Packet is looped back after forward, it should not be
a8cb16dd 2003 * forwarded second time, but still can be delivered locally.
1da177e4 2004 */
4c968709 2005 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1da177e4
LT
2006 goto dont_forward;
2007
417da66f 2008 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
2009 if (IS_ERR(mrt)) {
2010 kfree_skb(skb);
2011 return PTR_ERR(mrt);
e40dbc51 2012 }
1da177e4 2013 if (!local) {
4c968709
ED
2014 if (IPCB(skb)->opt.router_alert) {
2015 if (ip_call_ra_chain(skb))
2016 return 0;
2017 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
2018 /* IGMPv1 (and broken IGMPv2 implementations sort of
2019 * Cisco IOS <= 11.2(8)) do not put router alert
2020 * option to IGMP packets destined to routable
2021 * groups. It is very bad, because it means
2022 * that we can forward NO IGMP messages.
2023 */
2024 struct sock *mroute_sk;
2025
2026 mroute_sk = rcu_dereference(mrt->mroute_sk);
2027 if (mroute_sk) {
2028 nf_reset(skb);
2029 raw_rcv(mroute_sk, skb);
2030 return 0;
2031 }
1da177e4
LT
2032 }
2033 }
2034
a8c9486b 2035 /* already under rcu_read_lock() */
0c12295a 2036 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
51456b29 2037 if (!cache) {
bcfc7d33 2038 int vif = ipmr_find_vif(mrt, dev);
660b26dc
ND
2039
2040 if (vif >= 0)
2041 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2042 vif);
2043 }
1da177e4 2044
7ef8f65d 2045 /* No usable cache entry */
51456b29 2046 if (!cache) {
1da177e4
LT
2047 int vif;
2048
2049 if (local) {
2050 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2051 ip_local_deliver(skb);
51456b29 2052 if (!skb2)
1da177e4 2053 return -ENOBUFS;
1da177e4
LT
2054 skb = skb2;
2055 }
2056
a8c9486b 2057 read_lock(&mrt_lock);
bcfc7d33 2058 vif = ipmr_find_vif(mrt, dev);
1da177e4 2059 if (vif >= 0) {
4b1f0d33 2060 int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
1da177e4
LT
2061 read_unlock(&mrt_lock);
2062
0eae88f3 2063 return err2;
1da177e4
LT
2064 }
2065 read_unlock(&mrt_lock);
2066 kfree_skb(skb);
2067 return -ENODEV;
2068 }
2069
a8c9486b 2070 read_lock(&mrt_lock);
4b1f0d33 2071 ip_mr_forward(net, mrt, dev, skb, cache, local);
1da177e4
LT
2072 read_unlock(&mrt_lock);
2073
2074 if (local)
2075 return ip_local_deliver(skb);
2076
2077 return 0;
2078
2079dont_forward:
2080 if (local)
2081 return ip_local_deliver(skb);
2082 kfree_skb(skb);
2083 return 0;
2084}
2085
b1879204 2086#ifdef CONFIG_IP_PIMSM_V1
7ef8f65d 2087/* Handle IGMP messages of PIMv1 */
a8cb16dd 2088int pim_rcv_v1(struct sk_buff *skb)
b1879204
IJ
2089{
2090 struct igmphdr *pim;
4feb88e5 2091 struct net *net = dev_net(skb->dev);
f0ad0860 2092 struct mr_table *mrt;
b1879204
IJ
2093
2094 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2095 goto drop;
2096
2097 pim = igmp_hdr(skb);
2098
417da66f 2099 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
2100 if (IS_ERR(mrt))
2101 goto drop;
0c12295a 2102 if (!mrt->mroute_do_pim ||
b1879204
IJ
2103 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2104 goto drop;
2105
f0ad0860 2106 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
b1879204
IJ
2107drop:
2108 kfree_skb(skb);
2109 }
1da177e4
LT
2110 return 0;
2111}
2112#endif
2113
2114#ifdef CONFIG_IP_PIMSM_V2
a8cb16dd 2115static int pim_rcv(struct sk_buff *skb)
1da177e4
LT
2116{
2117 struct pimreghdr *pim;
f0ad0860
PM
2118 struct net *net = dev_net(skb->dev);
2119 struct mr_table *mrt;
1da177e4 2120
b1879204 2121 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1da177e4
LT
2122 goto drop;
2123
9c70220b 2124 pim = (struct pimreghdr *)skb_transport_header(skb);
56245cae 2125 if (pim->type != ((PIM_VERSION << 4) | (PIM_TYPE_REGISTER)) ||
a8cb16dd 2126 (pim->flags & PIM_NULL_REGISTER) ||
e905a9ed 2127 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
d3bc23e7 2128 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1da177e4
LT
2129 goto drop;
2130
417da66f 2131 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
2132 if (IS_ERR(mrt))
2133 goto drop;
f0ad0860 2134 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
b1879204
IJ
2135drop:
2136 kfree_skb(skb);
2137 }
1da177e4
LT
2138 return 0;
2139}
2140#endif
2141
cb6a4e46
PM
2142static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2143 struct mfc_cache *c, struct rtmsg *rtm)
1da177e4 2144{
adfa85e4 2145 struct rta_mfc_stats mfcs;
43b9e127
NA
2146 struct nlattr *mp_attr;
2147 struct rtnexthop *nhp;
b5036cd4 2148 unsigned long lastuse;
43b9e127 2149 int ct;
1da177e4 2150
7438189b 2151 /* If cache is unresolved, don't try to parse IIF and OIF */
1708ebc9
NA
2152 if (c->mfc_parent >= MAXVIFS) {
2153 rtm->rtm_flags |= RTNH_F_UNRESOLVED;
7438189b 2154 return -ENOENT;
1708ebc9 2155 }
7438189b 2156
92a395e5
TG
2157 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2158 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2159 return -EMSGSIZE;
1da177e4 2160
92a395e5
TG
2161 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2162 return -EMSGSIZE;
1da177e4
LT
2163
2164 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
0c12295a 2165 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
92a395e5
TG
2166 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2167 nla_nest_cancel(skb, mp_attr);
2168 return -EMSGSIZE;
2169 }
2170
1da177e4
LT
2171 nhp->rtnh_flags = 0;
2172 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
0c12295a 2173 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
1da177e4
LT
2174 nhp->rtnh_len = sizeof(*nhp);
2175 }
2176 }
92a395e5
TG
2177
2178 nla_nest_end(skb, mp_attr);
2179
b5036cd4
NA
2180 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2181 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2182
adfa85e4
ND
2183 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2184 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2185 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
43b9e127 2186 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
b5036cd4 2187 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
43b9e127 2188 RTA_PAD))
adfa85e4
ND
2189 return -EMSGSIZE;
2190
1da177e4
LT
2191 rtm->rtm_type = RTN_MULTICAST;
2192 return 1;
1da177e4
LT
2193}
2194
9a1b9496
DM
2195int ipmr_get_route(struct net *net, struct sk_buff *skb,
2196 __be32 saddr, __be32 daddr,
9f09eaea 2197 struct rtmsg *rtm, u32 portid)
1da177e4 2198{
1da177e4 2199 struct mfc_cache *cache;
9a1b9496
DM
2200 struct mr_table *mrt;
2201 int err;
1da177e4 2202
f0ad0860 2203 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2204 if (!mrt)
f0ad0860
PM
2205 return -ENOENT;
2206
a8c9486b 2207 rcu_read_lock();
9a1b9496 2208 cache = ipmr_cache_find(mrt, saddr, daddr);
51456b29 2209 if (!cache && skb->dev) {
660b26dc 2210 int vif = ipmr_find_vif(mrt, skb->dev);
1da177e4 2211
660b26dc
ND
2212 if (vif >= 0)
2213 cache = ipmr_cache_find_any(mrt, daddr, vif);
2214 }
51456b29 2215 if (!cache) {
72287490 2216 struct sk_buff *skb2;
eddc9ec5 2217 struct iphdr *iph;
1da177e4 2218 struct net_device *dev;
a8cb16dd 2219 int vif = -1;
1da177e4 2220
1da177e4 2221 dev = skb->dev;
a8c9486b 2222 read_lock(&mrt_lock);
a8cb16dd
ED
2223 if (dev)
2224 vif = ipmr_find_vif(mrt, dev);
2225 if (vif < 0) {
1da177e4 2226 read_unlock(&mrt_lock);
a8c9486b 2227 rcu_read_unlock();
1da177e4
LT
2228 return -ENODEV;
2229 }
72287490
AK
2230 skb2 = skb_clone(skb, GFP_ATOMIC);
2231 if (!skb2) {
2232 read_unlock(&mrt_lock);
a8c9486b 2233 rcu_read_unlock();
72287490
AK
2234 return -ENOMEM;
2235 }
2236
2cf75070 2237 NETLINK_CB(skb2).portid = portid;
e2d1bca7
ACM
2238 skb_push(skb2, sizeof(struct iphdr));
2239 skb_reset_network_header(skb2);
eddc9ec5
ACM
2240 iph = ip_hdr(skb2);
2241 iph->ihl = sizeof(struct iphdr) >> 2;
9a1b9496
DM
2242 iph->saddr = saddr;
2243 iph->daddr = daddr;
eddc9ec5 2244 iph->version = 0;
4b1f0d33 2245 err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
1da177e4 2246 read_unlock(&mrt_lock);
a8c9486b 2247 rcu_read_unlock();
1da177e4
LT
2248 return err;
2249 }
2250
a8c9486b 2251 read_lock(&mrt_lock);
cb6a4e46 2252 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
1da177e4 2253 read_unlock(&mrt_lock);
a8c9486b 2254 rcu_read_unlock();
1da177e4
LT
2255 return err;
2256}
2257
cb6a4e46 2258static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
65886f43
ND
2259 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2260 int flags)
cb6a4e46
PM
2261{
2262 struct nlmsghdr *nlh;
2263 struct rtmsg *rtm;
1eb99af5 2264 int err;
cb6a4e46 2265
65886f43 2266 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
51456b29 2267 if (!nlh)
cb6a4e46
PM
2268 return -EMSGSIZE;
2269
2270 rtm = nlmsg_data(nlh);
2271 rtm->rtm_family = RTNL_FAMILY_IPMR;
2272 rtm->rtm_dst_len = 32;
2273 rtm->rtm_src_len = 32;
2274 rtm->rtm_tos = 0;
2275 rtm->rtm_table = mrt->id;
f3756b79
DM
2276 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2277 goto nla_put_failure;
cb6a4e46
PM
2278 rtm->rtm_type = RTN_MULTICAST;
2279 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
9a68ac72
ND
2280 if (c->mfc_flags & MFC_STATIC)
2281 rtm->rtm_protocol = RTPROT_STATIC;
2282 else
2283 rtm->rtm_protocol = RTPROT_MROUTED;
cb6a4e46
PM
2284 rtm->rtm_flags = 0;
2285
930345ea
JB
2286 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2287 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
f3756b79 2288 goto nla_put_failure;
1eb99af5
ND
2289 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2290 /* do not break the dump if cache is unresolved */
2291 if (err < 0 && err != -ENOENT)
cb6a4e46
PM
2292 goto nla_put_failure;
2293
053c095a
JB
2294 nlmsg_end(skb, nlh);
2295 return 0;
cb6a4e46
PM
2296
2297nla_put_failure:
2298 nlmsg_cancel(skb, nlh);
2299 return -EMSGSIZE;
2300}
2301
8cd3ac9f
ND
2302static size_t mroute_msgsize(bool unresolved, int maxvif)
2303{
2304 size_t len =
2305 NLMSG_ALIGN(sizeof(struct rtmsg))
2306 + nla_total_size(4) /* RTA_TABLE */
2307 + nla_total_size(4) /* RTA_SRC */
2308 + nla_total_size(4) /* RTA_DST */
2309 ;
2310
2311 if (!unresolved)
2312 len = len
2313 + nla_total_size(4) /* RTA_IIF */
2314 + nla_total_size(0) /* RTA_MULTIPATH */
2315 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2316 /* RTA_MFC_STATS */
a9a08042 2317 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
8cd3ac9f
ND
2318 ;
2319
2320 return len;
2321}
2322
2323static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2324 int cmd)
2325{
2326 struct net *net = read_pnet(&mrt->net);
2327 struct sk_buff *skb;
2328 int err = -ENOBUFS;
2329
2330 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2331 GFP_ATOMIC);
51456b29 2332 if (!skb)
8cd3ac9f
ND
2333 goto errout;
2334
65886f43 2335 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
8cd3ac9f
ND
2336 if (err < 0)
2337 goto errout;
2338
2339 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2340 return;
2341
2342errout:
2343 kfree_skb(skb);
2344 if (err < 0)
2345 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2346}
2347
5a645dd8
JG
2348static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
2349{
2350 size_t len =
2351 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2352 + nla_total_size(1) /* IPMRA_CREPORT_MSGTYPE */
2353 + nla_total_size(4) /* IPMRA_CREPORT_VIF_ID */
2354 + nla_total_size(4) /* IPMRA_CREPORT_SRC_ADDR */
2355 + nla_total_size(4) /* IPMRA_CREPORT_DST_ADDR */
2356 /* IPMRA_CREPORT_PKT */
2357 + nla_total_size(payloadlen)
2358 ;
2359
2360 return len;
2361}
2362
2363static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2364{
2365 struct net *net = read_pnet(&mrt->net);
2366 struct nlmsghdr *nlh;
2367 struct rtgenmsg *rtgenm;
2368 struct igmpmsg *msg;
2369 struct sk_buff *skb;
2370 struct nlattr *nla;
2371 int payloadlen;
2372
2373 payloadlen = pkt->len - sizeof(struct igmpmsg);
2374 msg = (struct igmpmsg *)skb_network_header(pkt);
2375
2376 skb = nlmsg_new(igmpmsg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2377 if (!skb)
2378 goto errout;
2379
2380 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2381 sizeof(struct rtgenmsg), 0);
2382 if (!nlh)
2383 goto errout;
2384 rtgenm = nlmsg_data(nlh);
2385 rtgenm->rtgen_family = RTNL_FAMILY_IPMR;
2386 if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) ||
2387 nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif) ||
2388 nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR,
2389 msg->im_src.s_addr) ||
2390 nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR,
2391 msg->im_dst.s_addr))
2392 goto nla_put_failure;
2393
2394 nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen);
2395 if (!nla || skb_copy_bits(pkt, sizeof(struct igmpmsg),
2396 nla_data(nla), payloadlen))
2397 goto nla_put_failure;
2398
2399 nlmsg_end(skb, nlh);
2400
2401 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE_R, NULL, GFP_ATOMIC);
2402 return;
2403
2404nla_put_failure:
2405 nlmsg_cancel(skb, nlh);
2406errout:
2407 kfree_skb(skb);
2408 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS);
2409}
2410
4f75ba69
DS
2411static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2412 struct netlink_ext_ack *extack)
2413{
2414 struct net *net = sock_net(in_skb->sk);
2415 struct nlattr *tb[RTA_MAX + 1];
2416 struct sk_buff *skb = NULL;
2417 struct mfc_cache *cache;
2418 struct mr_table *mrt;
2419 struct rtmsg *rtm;
2420 __be32 src, grp;
2421 u32 tableid;
2422 int err;
2423
2424 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX,
2425 rtm_ipv4_policy, extack);
2426 if (err < 0)
2427 goto errout;
2428
2429 rtm = nlmsg_data(nlh);
2430
2431 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2432 grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2433 tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
2434
2435 mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
2e3d232e
DC
2436 if (!mrt) {
2437 err = -ENOENT;
4f75ba69
DS
2438 goto errout_free;
2439 }
2440
2441 /* entries are added/deleted only under RTNL */
2442 rcu_read_lock();
2443 cache = ipmr_cache_find(mrt, src, grp);
2444 rcu_read_unlock();
2445 if (!cache) {
2446 err = -ENOENT;
2447 goto errout_free;
2448 }
2449
2450 skb = nlmsg_new(mroute_msgsize(false, mrt->maxvif), GFP_KERNEL);
2451 if (!skb) {
2452 err = -ENOBUFS;
2453 goto errout_free;
2454 }
2455
2456 err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
2457 nlh->nlmsg_seq, cache,
2458 RTM_NEWROUTE, 0);
2459 if (err < 0)
2460 goto errout_free;
2461
2462 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2463
2464errout:
2465 return err;
2466
2467errout_free:
2468 kfree_skb(skb);
2469 goto errout;
2470}
2471
cb6a4e46
PM
2472static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2473{
2474 struct net *net = sock_net(skb->sk);
2475 struct mr_table *mrt;
2476 struct mfc_cache *mfc;
2477 unsigned int t = 0, s_t;
cb6a4e46
PM
2478 unsigned int e = 0, s_e;
2479
2480 s_t = cb->args[0];
8fb472c0 2481 s_e = cb->args[1];
cb6a4e46 2482
a8c9486b 2483 rcu_read_lock();
cb6a4e46
PM
2484 ipmr_for_each_table(mrt, net) {
2485 if (t < s_t)
2486 goto next_table;
8fb472c0
NA
2487 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
2488 if (e < s_e)
2489 goto next_entry;
2490 if (ipmr_fill_mroute(mrt, skb,
2491 NETLINK_CB(cb->skb).portid,
2492 cb->nlh->nlmsg_seq,
2493 mfc, RTM_NEWROUTE,
2494 NLM_F_MULTI) < 0)
2495 goto done;
cb6a4e46 2496next_entry:
8fb472c0 2497 e++;
cb6a4e46 2498 }
8fb472c0
NA
2499 e = 0;
2500 s_e = 0;
2501
1eb99af5
ND
2502 spin_lock_bh(&mfc_unres_lock);
2503 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2504 if (e < s_e)
2505 goto next_entry2;
2506 if (ipmr_fill_mroute(mrt, skb,
2507 NETLINK_CB(cb->skb).portid,
2508 cb->nlh->nlmsg_seq,
65886f43
ND
2509 mfc, RTM_NEWROUTE,
2510 NLM_F_MULTI) < 0) {
1eb99af5
ND
2511 spin_unlock_bh(&mfc_unres_lock);
2512 goto done;
2513 }
2514next_entry2:
2515 e++;
2516 }
2517 spin_unlock_bh(&mfc_unres_lock);
8fb472c0
NA
2518 e = 0;
2519 s_e = 0;
cb6a4e46
PM
2520next_table:
2521 t++;
2522 }
2523done:
a8c9486b 2524 rcu_read_unlock();
cb6a4e46 2525
8fb472c0 2526 cb->args[1] = e;
cb6a4e46
PM
2527 cb->args[0] = t;
2528
2529 return skb->len;
2530}
2531
ccbb0aa6
NA
2532static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
2533 [RTA_SRC] = { .type = NLA_U32 },
2534 [RTA_DST] = { .type = NLA_U32 },
2535 [RTA_IIF] = { .type = NLA_U32 },
2536 [RTA_TABLE] = { .type = NLA_U32 },
2537 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2538};
2539
2540static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
2541{
2542 switch (rtm_protocol) {
2543 case RTPROT_STATIC:
2544 case RTPROT_MROUTED:
2545 return true;
2546 }
2547 return false;
2548}
2549
2550static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
2551{
2552 struct rtnexthop *rtnh = nla_data(nla);
2553 int remaining = nla_len(nla), vifi = 0;
2554
2555 while (rtnh_ok(rtnh, remaining)) {
2556 mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
2557 if (++vifi == MAXVIFS)
2558 break;
2559 rtnh = rtnh_next(rtnh, &remaining);
2560 }
2561
2562 return remaining > 0 ? -EINVAL : vifi;
2563}
2564
2565/* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2566static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
2567 struct mfcctl *mfcc, int *mrtsock,
c21ef3e3
DA
2568 struct mr_table **mrtret,
2569 struct netlink_ext_ack *extack)
ccbb0aa6
NA
2570{
2571 struct net_device *dev = NULL;
2572 u32 tblid = RT_TABLE_DEFAULT;
2573 struct mr_table *mrt;
2574 struct nlattr *attr;
2575 struct rtmsg *rtm;
2576 int ret, rem;
2577
fceb6435 2578 ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy,
c21ef3e3 2579 extack);
ccbb0aa6
NA
2580 if (ret < 0)
2581 goto out;
2582 rtm = nlmsg_data(nlh);
2583
2584 ret = -EINVAL;
2585 if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
2586 rtm->rtm_type != RTN_MULTICAST ||
2587 rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
2588 !ipmr_rtm_validate_proto(rtm->rtm_protocol))
2589 goto out;
2590
2591 memset(mfcc, 0, sizeof(*mfcc));
2592 mfcc->mfcc_parent = -1;
2593 ret = 0;
2594 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
2595 switch (nla_type(attr)) {
2596 case RTA_SRC:
2597 mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
2598 break;
2599 case RTA_DST:
2600 mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
2601 break;
2602 case RTA_IIF:
2603 dev = __dev_get_by_index(net, nla_get_u32(attr));
2604 if (!dev) {
2605 ret = -ENODEV;
2606 goto out;
2607 }
2608 break;
2609 case RTA_MULTIPATH:
2610 if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
2611 ret = -EINVAL;
2612 goto out;
2613 }
2614 break;
2615 case RTA_PREFSRC:
2616 ret = 1;
2617 break;
2618 case RTA_TABLE:
2619 tblid = nla_get_u32(attr);
2620 break;
2621 }
2622 }
2623 mrt = ipmr_get_table(net, tblid);
2624 if (!mrt) {
2625 ret = -ENOENT;
2626 goto out;
2627 }
2628 *mrtret = mrt;
2629 *mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
2630 if (dev)
2631 mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2632
2633out:
2634 return ret;
2635}
2636
2637/* takes care of both newroute and delroute */
c21ef3e3
DA
2638static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
2639 struct netlink_ext_ack *extack)
ccbb0aa6
NA
2640{
2641 struct net *net = sock_net(skb->sk);
2642 int ret, mrtsock, parent;
2643 struct mr_table *tbl;
2644 struct mfcctl mfcc;
2645
2646 mrtsock = 0;
2647 tbl = NULL;
c21ef3e3 2648 ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack);
ccbb0aa6
NA
2649 if (ret < 0)
2650 return ret;
2651
2652 parent = ret ? mfcc.mfcc_parent : -1;
2653 if (nlh->nlmsg_type == RTM_NEWROUTE)
2654 return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
2655 else
2656 return ipmr_mfc_delete(tbl, &mfcc, parent);
2657}
2658
772c344d
NA
2659static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
2660{
2661 u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
2662
2663 if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
2664 nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
2665 nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM,
2666 mrt->mroute_reg_vif_num) ||
2667 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
2668 mrt->mroute_do_assert) ||
2669 nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim))
2670 return false;
2671
2672 return true;
2673}
2674
2675static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
2676{
2677 struct nlattr *vif_nest;
2678 struct vif_device *vif;
2679
2680 /* if the VIF doesn't exist just continue */
2681 if (!VIF_EXISTS(mrt, vifid))
2682 return true;
2683
2684 vif = &mrt->vif_table[vifid];
2685 vif_nest = nla_nest_start(skb, IPMRA_VIF);
2686 if (!vif_nest)
2687 return false;
2688 if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) ||
2689 nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
2690 nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
2691 nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in,
2692 IPMRA_VIFA_PAD) ||
2693 nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, vif->bytes_out,
2694 IPMRA_VIFA_PAD) ||
2695 nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, vif->pkt_in,
2696 IPMRA_VIFA_PAD) ||
2697 nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, vif->pkt_out,
2698 IPMRA_VIFA_PAD) ||
2699 nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) ||
2700 nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) {
2701 nla_nest_cancel(skb, vif_nest);
2702 return false;
2703 }
2704 nla_nest_end(skb, vif_nest);
2705
2706 return true;
2707}
2708
2709static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
2710{
2711 struct net *net = sock_net(skb->sk);
2712 struct nlmsghdr *nlh = NULL;
2713 unsigned int t = 0, s_t;
2714 unsigned int e = 0, s_e;
2715 struct mr_table *mrt;
2716
2717 s_t = cb->args[0];
2718 s_e = cb->args[1];
2719
2720 ipmr_for_each_table(mrt, net) {
2721 struct nlattr *vifs, *af;
2722 struct ifinfomsg *hdr;
2723 u32 i;
2724
2725 if (t < s_t)
2726 goto skip_table;
2727 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
2728 cb->nlh->nlmsg_seq, RTM_NEWLINK,
2729 sizeof(*hdr), NLM_F_MULTI);
2730 if (!nlh)
2731 break;
2732
2733 hdr = nlmsg_data(nlh);
2734 memset(hdr, 0, sizeof(*hdr));
2735 hdr->ifi_family = RTNL_FAMILY_IPMR;
2736
2737 af = nla_nest_start(skb, IFLA_AF_SPEC);
2738 if (!af) {
2739 nlmsg_cancel(skb, nlh);
2740 goto out;
2741 }
2742
2743 if (!ipmr_fill_table(mrt, skb)) {
2744 nlmsg_cancel(skb, nlh);
2745 goto out;
2746 }
2747
2748 vifs = nla_nest_start(skb, IPMRA_TABLE_VIFS);
2749 if (!vifs) {
2750 nla_nest_end(skb, af);
2751 nlmsg_end(skb, nlh);
2752 goto out;
2753 }
2754 for (i = 0; i < mrt->maxvif; i++) {
2755 if (e < s_e)
2756 goto skip_entry;
2757 if (!ipmr_fill_vif(mrt, i, skb)) {
2758 nla_nest_end(skb, vifs);
2759 nla_nest_end(skb, af);
2760 nlmsg_end(skb, nlh);
2761 goto out;
2762 }
2763skip_entry:
2764 e++;
2765 }
2766 s_e = 0;
2767 e = 0;
2768 nla_nest_end(skb, vifs);
2769 nla_nest_end(skb, af);
2770 nlmsg_end(skb, nlh);
2771skip_table:
2772 t++;
2773 }
2774
2775out:
2776 cb->args[1] = e;
2777 cb->args[0] = t;
2778
2779 return skb->len;
2780}
2781
e905a9ed 2782#ifdef CONFIG_PROC_FS
7ef8f65d
NA
2783/* The /proc interfaces to multicast routing :
2784 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
1da177e4
LT
2785 */
2786struct ipmr_vif_iter {
f6bb4514 2787 struct seq_net_private p;
f0ad0860 2788 struct mr_table *mrt;
1da177e4
LT
2789 int ct;
2790};
2791
f6bb4514
BT
2792static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2793 struct ipmr_vif_iter *iter,
1da177e4
LT
2794 loff_t pos)
2795{
f0ad0860 2796 struct mr_table *mrt = iter->mrt;
0c12295a
PM
2797
2798 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2799 if (!VIF_EXISTS(mrt, iter->ct))
1da177e4 2800 continue;
e905a9ed 2801 if (pos-- == 0)
0c12295a 2802 return &mrt->vif_table[iter->ct];
1da177e4
LT
2803 }
2804 return NULL;
2805}
2806
2807static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
ba93ef74 2808 __acquires(mrt_lock)
1da177e4 2809{
f0ad0860 2810 struct ipmr_vif_iter *iter = seq->private;
f6bb4514 2811 struct net *net = seq_file_net(seq);
f0ad0860
PM
2812 struct mr_table *mrt;
2813
2814 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2815 if (!mrt)
f0ad0860
PM
2816 return ERR_PTR(-ENOENT);
2817
2818 iter->mrt = mrt;
f6bb4514 2819
1da177e4 2820 read_lock(&mrt_lock);
f6bb4514 2821 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
1da177e4
LT
2822 : SEQ_START_TOKEN;
2823}
2824
2825static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2826{
2827 struct ipmr_vif_iter *iter = seq->private;
f6bb4514 2828 struct net *net = seq_file_net(seq);
f0ad0860 2829 struct mr_table *mrt = iter->mrt;
1da177e4
LT
2830
2831 ++*pos;
2832 if (v == SEQ_START_TOKEN)
f6bb4514 2833 return ipmr_vif_seq_idx(net, iter, 0);
e905a9ed 2834
0c12295a
PM
2835 while (++iter->ct < mrt->maxvif) {
2836 if (!VIF_EXISTS(mrt, iter->ct))
1da177e4 2837 continue;
0c12295a 2838 return &mrt->vif_table[iter->ct];
1da177e4
LT
2839 }
2840 return NULL;
2841}
2842
2843static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
ba93ef74 2844 __releases(mrt_lock)
1da177e4
LT
2845{
2846 read_unlock(&mrt_lock);
2847}
2848
2849static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2850{
f0ad0860
PM
2851 struct ipmr_vif_iter *iter = seq->private;
2852 struct mr_table *mrt = iter->mrt;
f6bb4514 2853
1da177e4 2854 if (v == SEQ_START_TOKEN) {
e905a9ed 2855 seq_puts(seq,
1da177e4
LT
2856 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2857 } else {
2858 const struct vif_device *vif = v;
2859 const char *name = vif->dev ? vif->dev->name : "none";
2860
2861 seq_printf(seq,
5b5e0928 2862 "%2zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
0c12295a 2863 vif - mrt->vif_table,
e905a9ed 2864 name, vif->bytes_in, vif->pkt_in,
1da177e4
LT
2865 vif->bytes_out, vif->pkt_out,
2866 vif->flags, vif->local, vif->remote);
2867 }
2868 return 0;
2869}
2870
f690808e 2871static const struct seq_operations ipmr_vif_seq_ops = {
1da177e4
LT
2872 .start = ipmr_vif_seq_start,
2873 .next = ipmr_vif_seq_next,
2874 .stop = ipmr_vif_seq_stop,
2875 .show = ipmr_vif_seq_show,
2876};
2877
2878static int ipmr_vif_open(struct inode *inode, struct file *file)
2879{
f6bb4514
BT
2880 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2881 sizeof(struct ipmr_vif_iter));
1da177e4
LT
2882}
2883
9a32144e 2884static const struct file_operations ipmr_vif_fops = {
1da177e4
LT
2885 .owner = THIS_MODULE,
2886 .open = ipmr_vif_open,
2887 .read = seq_read,
2888 .llseek = seq_lseek,
f6bb4514 2889 .release = seq_release_net,
1da177e4
LT
2890};
2891
2892struct ipmr_mfc_iter {
f6bb4514 2893 struct seq_net_private p;
f0ad0860 2894 struct mr_table *mrt;
862465f2 2895 struct list_head *cache;
1da177e4
LT
2896};
2897
f6bb4514
BT
2898static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2899 struct ipmr_mfc_iter *it, loff_t pos)
1da177e4 2900{
f0ad0860 2901 struct mr_table *mrt = it->mrt;
1da177e4
LT
2902 struct mfc_cache *mfc;
2903
a8c9486b 2904 rcu_read_lock();
8fb472c0
NA
2905 it->cache = &mrt->mfc_cache_list;
2906 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
2907 if (pos-- == 0)
2908 return mfc;
a8c9486b 2909 rcu_read_unlock();
1da177e4 2910
1da177e4 2911 spin_lock_bh(&mfc_unres_lock);
0c12295a 2912 it->cache = &mrt->mfc_unres_queue;
862465f2 2913 list_for_each_entry(mfc, it->cache, list)
e258beb2 2914 if (pos-- == 0)
1da177e4
LT
2915 return mfc;
2916 spin_unlock_bh(&mfc_unres_lock);
2917
2918 it->cache = NULL;
2919 return NULL;
2920}
2921
2922
2923static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2924{
2925 struct ipmr_mfc_iter *it = seq->private;
f6bb4514 2926 struct net *net = seq_file_net(seq);
f0ad0860 2927 struct mr_table *mrt;
f6bb4514 2928
f0ad0860 2929 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2930 if (!mrt)
f0ad0860 2931 return ERR_PTR(-ENOENT);
f6bb4514 2932
f0ad0860 2933 it->mrt = mrt;
1da177e4 2934 it->cache = NULL;
f6bb4514 2935 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
1da177e4
LT
2936 : SEQ_START_TOKEN;
2937}
2938
2939static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2940{
1da177e4 2941 struct ipmr_mfc_iter *it = seq->private;
f6bb4514 2942 struct net *net = seq_file_net(seq);
f0ad0860 2943 struct mr_table *mrt = it->mrt;
8fb472c0 2944 struct mfc_cache *mfc = v;
1da177e4
LT
2945
2946 ++*pos;
2947
2948 if (v == SEQ_START_TOKEN)
f6bb4514 2949 return ipmr_mfc_seq_idx(net, seq->private, 0);
1da177e4 2950
862465f2
PM
2951 if (mfc->list.next != it->cache)
2952 return list_entry(mfc->list.next, struct mfc_cache, list);
e905a9ed 2953
0c12295a 2954 if (it->cache == &mrt->mfc_unres_queue)
1da177e4
LT
2955 goto end_of_list;
2956
1da177e4 2957 /* exhausted cache_array, show unresolved */
a8c9486b 2958 rcu_read_unlock();
0c12295a 2959 it->cache = &mrt->mfc_unres_queue;
e905a9ed 2960
1da177e4 2961 spin_lock_bh(&mfc_unres_lock);
862465f2
PM
2962 if (!list_empty(it->cache))
2963 return list_first_entry(it->cache, struct mfc_cache, list);
1da177e4 2964
a8cb16dd 2965end_of_list:
1da177e4
LT
2966 spin_unlock_bh(&mfc_unres_lock);
2967 it->cache = NULL;
2968
2969 return NULL;
2970}
2971
2972static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2973{
2974 struct ipmr_mfc_iter *it = seq->private;
f0ad0860 2975 struct mr_table *mrt = it->mrt;
1da177e4 2976
0c12295a 2977 if (it->cache == &mrt->mfc_unres_queue)
1da177e4 2978 spin_unlock_bh(&mfc_unres_lock);
8fb472c0 2979 else if (it->cache == &mrt->mfc_cache_list)
a8c9486b 2980 rcu_read_unlock();
1da177e4
LT
2981}
2982
2983static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2984{
2985 int n;
2986
2987 if (v == SEQ_START_TOKEN) {
e905a9ed 2988 seq_puts(seq,
1da177e4
LT
2989 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2990 } else {
2991 const struct mfc_cache *mfc = v;
2992 const struct ipmr_mfc_iter *it = seq->private;
f0ad0860 2993 const struct mr_table *mrt = it->mrt;
e905a9ed 2994
0eae88f3
ED
2995 seq_printf(seq, "%08X %08X %-3hd",
2996 (__force u32) mfc->mfc_mcastgrp,
2997 (__force u32) mfc->mfc_origin,
1ea472e2 2998 mfc->mfc_parent);
1da177e4 2999
0c12295a 3000 if (it->cache != &mrt->mfc_unres_queue) {
1ea472e2
BT
3001 seq_printf(seq, " %8lu %8lu %8lu",
3002 mfc->mfc_un.res.pkt,
3003 mfc->mfc_un.res.bytes,
3004 mfc->mfc_un.res.wrong_if);
132adf54 3005 for (n = mfc->mfc_un.res.minvif;
a8cb16dd 3006 n < mfc->mfc_un.res.maxvif; n++) {
0c12295a 3007 if (VIF_EXISTS(mrt, n) &&
cf958ae3
BT
3008 mfc->mfc_un.res.ttls[n] < 255)
3009 seq_printf(seq,
e905a9ed 3010 " %2d:%-3d",
1da177e4
LT
3011 n, mfc->mfc_un.res.ttls[n]);
3012 }
1ea472e2
BT
3013 } else {
3014 /* unresolved mfc_caches don't contain
3015 * pkt, bytes and wrong_if values
3016 */
3017 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
1da177e4
LT
3018 }
3019 seq_putc(seq, '\n');
3020 }
3021 return 0;
3022}
3023
f690808e 3024static const struct seq_operations ipmr_mfc_seq_ops = {
1da177e4
LT
3025 .start = ipmr_mfc_seq_start,
3026 .next = ipmr_mfc_seq_next,
3027 .stop = ipmr_mfc_seq_stop,
3028 .show = ipmr_mfc_seq_show,
3029};
3030
3031static int ipmr_mfc_open(struct inode *inode, struct file *file)
3032{
f6bb4514
BT
3033 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
3034 sizeof(struct ipmr_mfc_iter));
1da177e4
LT
3035}
3036
9a32144e 3037static const struct file_operations ipmr_mfc_fops = {
1da177e4
LT
3038 .owner = THIS_MODULE,
3039 .open = ipmr_mfc_open,
3040 .read = seq_read,
3041 .llseek = seq_lseek,
f6bb4514 3042 .release = seq_release_net,
1da177e4 3043};
e905a9ed 3044#endif
1da177e4
LT
3045
3046#ifdef CONFIG_IP_PIMSM_V2
32613090 3047static const struct net_protocol pim_protocol = {
1da177e4 3048 .handler = pim_rcv,
403dbb97 3049 .netns_ok = 1,
1da177e4
LT
3050};
3051#endif
3052
7ef8f65d 3053/* Setup for IP multicast routing */
cf958ae3
BT
3054static int __net_init ipmr_net_init(struct net *net)
3055{
f0ad0860 3056 int err;
cf958ae3 3057
f0ad0860
PM
3058 err = ipmr_rules_init(net);
3059 if (err < 0)
cf958ae3 3060 goto fail;
f6bb4514
BT
3061
3062#ifdef CONFIG_PROC_FS
3063 err = -ENOMEM;
d4beaa66 3064 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
f6bb4514 3065 goto proc_vif_fail;
d4beaa66 3066 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
f6bb4514
BT
3067 goto proc_cache_fail;
3068#endif
2bb8b26c
BT
3069 return 0;
3070
f6bb4514
BT
3071#ifdef CONFIG_PROC_FS
3072proc_cache_fail:
ece31ffd 3073 remove_proc_entry("ip_mr_vif", net->proc_net);
f6bb4514 3074proc_vif_fail:
f0ad0860 3075 ipmr_rules_exit(net);
f6bb4514 3076#endif
cf958ae3
BT
3077fail:
3078 return err;
3079}
3080
3081static void __net_exit ipmr_net_exit(struct net *net)
3082{
f6bb4514 3083#ifdef CONFIG_PROC_FS
ece31ffd
G
3084 remove_proc_entry("ip_mr_cache", net->proc_net);
3085 remove_proc_entry("ip_mr_vif", net->proc_net);
f6bb4514 3086#endif
f0ad0860 3087 ipmr_rules_exit(net);
cf958ae3
BT
3088}
3089
3090static struct pernet_operations ipmr_net_ops = {
3091 .init = ipmr_net_init,
3092 .exit = ipmr_net_exit,
3093};
e905a9ed 3094
03d2f897 3095int __init ip_mr_init(void)
1da177e4 3096{
03d2f897
WC
3097 int err;
3098
1da177e4
LT
3099 mrt_cachep = kmem_cache_create("ip_mrt_cache",
3100 sizeof(struct mfc_cache),
a8c9486b 3101 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
20c2df83 3102 NULL);
03d2f897 3103
cf958ae3
BT
3104 err = register_pernet_subsys(&ipmr_net_ops);
3105 if (err)
3106 goto reg_pernet_fail;
3107
03d2f897
WC
3108 err = register_netdevice_notifier(&ip_mr_notifier);
3109 if (err)
3110 goto reg_notif_fail;
403dbb97
TG
3111#ifdef CONFIG_IP_PIMSM_V2
3112 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
058bd4d2 3113 pr_err("%s: can't add PIM protocol\n", __func__);
403dbb97
TG
3114 err = -EAGAIN;
3115 goto add_proto_fail;
3116 }
3117#endif
c7ac8679 3118 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
b97bac64 3119 ipmr_rtm_getroute, ipmr_rtm_dumproute, 0);
ccbb0aa6 3120 rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
b97bac64 3121 ipmr_rtm_route, NULL, 0);
ccbb0aa6 3122 rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
b97bac64 3123 ipmr_rtm_route, NULL, 0);
772c344d
NA
3124
3125 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK,
b97bac64 3126 NULL, ipmr_rtm_dumplink, 0);
03d2f897 3127 return 0;
f6bb4514 3128
403dbb97
TG
3129#ifdef CONFIG_IP_PIMSM_V2
3130add_proto_fail:
3131 unregister_netdevice_notifier(&ip_mr_notifier);
3132#endif
c3e38896 3133reg_notif_fail:
cf958ae3
BT
3134 unregister_pernet_subsys(&ipmr_net_ops);
3135reg_pernet_fail:
c3e38896 3136 kmem_cache_destroy(mrt_cachep);
03d2f897 3137 return err;
1da177e4 3138}