]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - net/ipv4/ipmr.c
Merge tag 'devprop-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[mirror_ubuntu-focal-kernel.git] / net / ipv4 / ipmr.c
CommitLineData
1da177e4
LT
1/*
2 * IP multicast routing support for mrouted 3.6/3.8
3 *
113aa838 4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 * Linux Consultancy and Custom Driver Development
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
1da177e4
LT
12 * Fixes:
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
22 * overflow.
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
f77f13e2 25 * Relax this requirement to work with older peers.
1da177e4
LT
26 *
27 */
28
7c0f6ba6 29#include <linux/uaccess.h>
1da177e4 30#include <linux/types.h>
4fc268d2 31#include <linux/capability.h>
1da177e4
LT
32#include <linux/errno.h>
33#include <linux/timer.h>
34#include <linux/mm.h>
35#include <linux/kernel.h>
36#include <linux/fcntl.h>
37#include <linux/stat.h>
38#include <linux/socket.h>
39#include <linux/in.h>
40#include <linux/inet.h>
41#include <linux/netdevice.h>
42#include <linux/inetdevice.h>
43#include <linux/igmp.h>
44#include <linux/proc_fs.h>
45#include <linux/seq_file.h>
46#include <linux/mroute.h>
47#include <linux/init.h>
46f25dff 48#include <linux/if_ether.h>
5a0e3ad6 49#include <linux/slab.h>
457c4cbc 50#include <net/net_namespace.h>
1da177e4
LT
51#include <net/ip.h>
52#include <net/protocol.h>
53#include <linux/skbuff.h>
14c85021 54#include <net/route.h>
1da177e4
LT
55#include <net/sock.h>
56#include <net/icmp.h>
57#include <net/udp.h>
58#include <net/raw.h>
59#include <linux/notifier.h>
60#include <linux/if_arp.h>
61#include <linux/netfilter_ipv4.h>
709b46e8 62#include <linux/compat.h>
bc3b2d7f 63#include <linux/export.h>
c5441932 64#include <net/ip_tunnels.h>
1da177e4 65#include <net/checksum.h>
dc5fc579 66#include <net/netlink.h>
f0ad0860 67#include <net/fib_rules.h>
d67b8c61 68#include <linux/netconf.h>
ccbb0aa6 69#include <net/nexthop.h>
1da177e4 70
f0ad0860
PM
71struct ipmr_rule {
72 struct fib_rule common;
73};
74
75struct ipmr_result {
76 struct mr_table *mrt;
77};
78
1da177e4 79/* Big lock, protecting vif table, mrt cache and mroute socket state.
a8cb16dd 80 * Note that the changes are semaphored via rtnl_lock.
1da177e4
LT
81 */
82
83static DEFINE_RWLOCK(mrt_lock);
84
7ef8f65d 85/* Multicast router control variables */
1da177e4 86
1da177e4
LT
87/* Special spinlock for queue of unresolved entries */
88static DEFINE_SPINLOCK(mfc_unres_lock);
89
90/* We return to original Alan's scheme. Hash table of resolved
a8cb16dd
ED
91 * entries is changed only in process context and protected
92 * with weak lock mrt_lock. Queue of unresolved entries is protected
93 * with strong spinlock mfc_unres_lock.
94 *
95 * In this case data path is free of exclusive locks at all.
1da177e4
LT
96 */
97
e18b890b 98static struct kmem_cache *mrt_cachep __read_mostly;
1da177e4 99
f0ad0860 100static struct mr_table *ipmr_new_table(struct net *net, u32 id);
acbb219d
FR
101static void ipmr_free_table(struct mr_table *mrt);
102
c4854ec8
RR
103static void ip_mr_forward(struct net *net, struct mr_table *mrt,
104 struct sk_buff *skb, struct mfc_cache *cache,
105 int local);
0c12295a 106static int ipmr_cache_report(struct mr_table *mrt,
4feb88e5 107 struct sk_buff *pkt, vifi_t vifi, int assert);
cb6a4e46
PM
108static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
109 struct mfc_cache *c, struct rtmsg *rtm);
8cd3ac9f
ND
110static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
111 int cmd);
0e615e96 112static void mroute_clean_tables(struct mr_table *mrt, bool all);
f0ad0860
PM
113static void ipmr_expire_process(unsigned long arg);
114
115#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
116#define ipmr_for_each_table(mrt, net) \
117 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
118
119static struct mr_table *ipmr_get_table(struct net *net, u32 id)
120{
121 struct mr_table *mrt;
122
123 ipmr_for_each_table(mrt, net) {
124 if (mrt->id == id)
125 return mrt;
126 }
127 return NULL;
128}
129
da91981b 130static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
f0ad0860
PM
131 struct mr_table **mrt)
132{
f0ad0860 133 int err;
95f4a45d
HFS
134 struct ipmr_result res;
135 struct fib_lookup_arg arg = {
136 .result = &res,
137 .flags = FIB_LOOKUP_NOREF,
138 };
f0ad0860 139
e58e4159
DA
140 /* update flow if oif or iif point to device enslaved to l3mdev */
141 l3mdev_update_flow(net, flowi4_to_flowi(flp4));
142
da91981b
DM
143 err = fib_rules_lookup(net->ipv4.mr_rules_ops,
144 flowi4_to_flowi(flp4), 0, &arg);
f0ad0860
PM
145 if (err < 0)
146 return err;
147 *mrt = res.mrt;
148 return 0;
149}
150
151static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
152 int flags, struct fib_lookup_arg *arg)
153{
154 struct ipmr_result *res = arg->result;
155 struct mr_table *mrt;
1da177e4 156
f0ad0860
PM
157 switch (rule->action) {
158 case FR_ACT_TO_TBL:
159 break;
160 case FR_ACT_UNREACHABLE:
161 return -ENETUNREACH;
162 case FR_ACT_PROHIBIT:
163 return -EACCES;
164 case FR_ACT_BLACKHOLE:
165 default:
166 return -EINVAL;
167 }
168
e58e4159
DA
169 arg->table = fib_rule_get_table(rule, arg);
170
171 mrt = ipmr_get_table(rule->fr_net, arg->table);
51456b29 172 if (!mrt)
f0ad0860
PM
173 return -EAGAIN;
174 res->mrt = mrt;
175 return 0;
176}
177
178static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
179{
180 return 1;
181}
182
183static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
184 FRA_GENERIC_POLICY,
185};
186
187static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
188 struct fib_rule_hdr *frh, struct nlattr **tb)
189{
190 return 0;
191}
192
193static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
194 struct nlattr **tb)
195{
196 return 1;
197}
198
199static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
200 struct fib_rule_hdr *frh)
201{
202 frh->dst_len = 0;
203 frh->src_len = 0;
204 frh->tos = 0;
205 return 0;
206}
207
04a6f82c 208static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
25239cee 209 .family = RTNL_FAMILY_IPMR,
f0ad0860
PM
210 .rule_size = sizeof(struct ipmr_rule),
211 .addr_size = sizeof(u32),
212 .action = ipmr_rule_action,
213 .match = ipmr_rule_match,
214 .configure = ipmr_rule_configure,
215 .compare = ipmr_rule_compare,
f0ad0860
PM
216 .fill = ipmr_rule_fill,
217 .nlgroup = RTNLGRP_IPV4_RULE,
218 .policy = ipmr_rule_policy,
219 .owner = THIS_MODULE,
220};
221
222static int __net_init ipmr_rules_init(struct net *net)
223{
224 struct fib_rules_ops *ops;
225 struct mr_table *mrt;
226 int err;
227
228 ops = fib_rules_register(&ipmr_rules_ops_template, net);
229 if (IS_ERR(ops))
230 return PTR_ERR(ops);
231
232 INIT_LIST_HEAD(&net->ipv4.mr_tables);
233
234 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
1113ebbc
NA
235 if (IS_ERR(mrt)) {
236 err = PTR_ERR(mrt);
f0ad0860
PM
237 goto err1;
238 }
239
240 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
241 if (err < 0)
242 goto err2;
243
244 net->ipv4.mr_rules_ops = ops;
245 return 0;
246
247err2:
f243e5a7 248 ipmr_free_table(mrt);
f0ad0860
PM
249err1:
250 fib_rules_unregister(ops);
251 return err;
252}
253
254static void __net_exit ipmr_rules_exit(struct net *net)
255{
256 struct mr_table *mrt, *next;
257
ed785309 258 rtnl_lock();
035320d5
ED
259 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
260 list_del(&mrt->list);
acbb219d 261 ipmr_free_table(mrt);
035320d5 262 }
f0ad0860 263 fib_rules_unregister(net->ipv4.mr_rules_ops);
419df12f 264 rtnl_unlock();
f0ad0860
PM
265}
266#else
267#define ipmr_for_each_table(mrt, net) \
268 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
269
270static struct mr_table *ipmr_get_table(struct net *net, u32 id)
271{
272 return net->ipv4.mrt;
273}
274
da91981b 275static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
f0ad0860
PM
276 struct mr_table **mrt)
277{
278 *mrt = net->ipv4.mrt;
279 return 0;
280}
281
282static int __net_init ipmr_rules_init(struct net *net)
283{
1113ebbc
NA
284 struct mr_table *mrt;
285
286 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
287 if (IS_ERR(mrt))
288 return PTR_ERR(mrt);
289 net->ipv4.mrt = mrt;
290 return 0;
f0ad0860
PM
291}
292
293static void __net_exit ipmr_rules_exit(struct net *net)
294{
ed785309 295 rtnl_lock();
acbb219d 296 ipmr_free_table(net->ipv4.mrt);
ed785309
WC
297 net->ipv4.mrt = NULL;
298 rtnl_unlock();
f0ad0860
PM
299}
300#endif
301
8fb472c0
NA
302static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
303 const void *ptr)
304{
305 const struct mfc_cache_cmp_arg *cmparg = arg->key;
306 struct mfc_cache *c = (struct mfc_cache *)ptr;
307
308 return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
309 cmparg->mfc_origin != c->mfc_origin;
310}
311
312static const struct rhashtable_params ipmr_rht_params = {
313 .head_offset = offsetof(struct mfc_cache, mnode),
314 .key_offset = offsetof(struct mfc_cache, cmparg),
315 .key_len = sizeof(struct mfc_cache_cmp_arg),
316 .nelem_hint = 3,
317 .locks_mul = 1,
318 .obj_cmpfn = ipmr_hash_cmp,
319 .automatic_shrinking = true,
320};
321
f0ad0860
PM
322static struct mr_table *ipmr_new_table(struct net *net, u32 id)
323{
324 struct mr_table *mrt;
1da177e4 325
1113ebbc
NA
326 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
327 if (id != RT_TABLE_DEFAULT && id >= 1000000000)
328 return ERR_PTR(-EINVAL);
329
f0ad0860 330 mrt = ipmr_get_table(net, id);
00db4124 331 if (mrt)
f0ad0860
PM
332 return mrt;
333
334 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
51456b29 335 if (!mrt)
1113ebbc 336 return ERR_PTR(-ENOMEM);
8de53dfb 337 write_pnet(&mrt->net, net);
f0ad0860
PM
338 mrt->id = id;
339
8fb472c0
NA
340 rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
341 INIT_LIST_HEAD(&mrt->mfc_cache_list);
f0ad0860
PM
342 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
343
344 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
345 (unsigned long)mrt);
346
f0ad0860 347 mrt->mroute_reg_vif_num = -1;
f0ad0860
PM
348#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
349 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
350#endif
351 return mrt;
352}
1da177e4 353
acbb219d
FR
354static void ipmr_free_table(struct mr_table *mrt)
355{
356 del_timer_sync(&mrt->ipmr_expire_timer);
0e615e96 357 mroute_clean_tables(mrt, true);
8fb472c0 358 rhltable_destroy(&mrt->mfc_hash);
acbb219d
FR
359 kfree(mrt);
360}
361
1da177e4
LT
362/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
363
d607032d
WC
364static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
365{
4feb88e5
BT
366 struct net *net = dev_net(dev);
367
d607032d
WC
368 dev_close(dev);
369
4feb88e5 370 dev = __dev_get_by_name(net, "tunl0");
d607032d 371 if (dev) {
5bc3eb7e 372 const struct net_device_ops *ops = dev->netdev_ops;
d607032d 373 struct ifreq ifr;
d607032d
WC
374 struct ip_tunnel_parm p;
375
376 memset(&p, 0, sizeof(p));
377 p.iph.daddr = v->vifc_rmt_addr.s_addr;
378 p.iph.saddr = v->vifc_lcl_addr.s_addr;
379 p.iph.version = 4;
380 p.iph.ihl = 5;
381 p.iph.protocol = IPPROTO_IPIP;
382 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
383 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
384
5bc3eb7e
SH
385 if (ops->ndo_do_ioctl) {
386 mm_segment_t oldfs = get_fs();
387
388 set_fs(KERNEL_DS);
389 ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL);
390 set_fs(oldfs);
391 }
d607032d
WC
392 }
393}
394
a0b47736
NA
395/* Initialize ipmr pimreg/tunnel in_device */
396static bool ipmr_init_vif_indev(const struct net_device *dev)
397{
398 struct in_device *in_dev;
399
400 ASSERT_RTNL();
401
402 in_dev = __in_dev_get_rtnl(dev);
403 if (!in_dev)
404 return false;
405 ipv4_devconf_setall(in_dev);
406 neigh_parms_data_state_setall(in_dev->arp_parms);
407 IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
408
409 return true;
410}
411
7ef8f65d 412static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
1da177e4
LT
413{
414 struct net_device *dev;
415
4feb88e5 416 dev = __dev_get_by_name(net, "tunl0");
1da177e4
LT
417
418 if (dev) {
5bc3eb7e 419 const struct net_device_ops *ops = dev->netdev_ops;
1da177e4
LT
420 int err;
421 struct ifreq ifr;
1da177e4 422 struct ip_tunnel_parm p;
1da177e4
LT
423
424 memset(&p, 0, sizeof(p));
425 p.iph.daddr = v->vifc_rmt_addr.s_addr;
426 p.iph.saddr = v->vifc_lcl_addr.s_addr;
427 p.iph.version = 4;
428 p.iph.ihl = 5;
429 p.iph.protocol = IPPROTO_IPIP;
430 sprintf(p.name, "dvmrp%d", v->vifc_vifi);
ba93ef74 431 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
1da177e4 432
5bc3eb7e
SH
433 if (ops->ndo_do_ioctl) {
434 mm_segment_t oldfs = get_fs();
435
436 set_fs(KERNEL_DS);
437 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
438 set_fs(oldfs);
a8cb16dd 439 } else {
5bc3eb7e 440 err = -EOPNOTSUPP;
a8cb16dd 441 }
1da177e4
LT
442 dev = NULL;
443
4feb88e5
BT
444 if (err == 0 &&
445 (dev = __dev_get_by_name(net, p.name)) != NULL) {
1da177e4 446 dev->flags |= IFF_MULTICAST;
a0b47736 447 if (!ipmr_init_vif_indev(dev))
1da177e4 448 goto failure;
1da177e4
LT
449 if (dev_open(dev))
450 goto failure;
7dc00c82 451 dev_hold(dev);
1da177e4
LT
452 }
453 }
454 return dev;
455
456failure:
1da177e4
LT
457 unregister_netdevice(dev);
458 return NULL;
459}
460
c316c629 461#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
6fef4c0c 462static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
1da177e4 463{
4feb88e5 464 struct net *net = dev_net(dev);
f0ad0860 465 struct mr_table *mrt;
da91981b
DM
466 struct flowi4 fl4 = {
467 .flowi4_oif = dev->ifindex,
6a662719 468 .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
da91981b 469 .flowi4_mark = skb->mark,
f0ad0860
PM
470 };
471 int err;
472
da91981b 473 err = ipmr_fib_lookup(net, &fl4, &mrt);
e40dbc51
BG
474 if (err < 0) {
475 kfree_skb(skb);
f0ad0860 476 return err;
e40dbc51 477 }
4feb88e5 478
1da177e4 479 read_lock(&mrt_lock);
cf3677ae
PE
480 dev->stats.tx_bytes += skb->len;
481 dev->stats.tx_packets++;
0c12295a 482 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
1da177e4
LT
483 read_unlock(&mrt_lock);
484 kfree_skb(skb);
6ed10654 485 return NETDEV_TX_OK;
1da177e4
LT
486}
487
ee9b9596
ND
488static int reg_vif_get_iflink(const struct net_device *dev)
489{
490 return 0;
491}
492
007c3838
SH
493static const struct net_device_ops reg_vif_netdev_ops = {
494 .ndo_start_xmit = reg_vif_xmit,
ee9b9596 495 .ndo_get_iflink = reg_vif_get_iflink,
007c3838
SH
496};
497
1da177e4
LT
498static void reg_vif_setup(struct net_device *dev)
499{
500 dev->type = ARPHRD_PIMREG;
46f25dff 501 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
1da177e4 502 dev->flags = IFF_NOARP;
70cb4a45 503 dev->netdev_ops = &reg_vif_netdev_ops;
1da177e4 504 dev->destructor = free_netdev;
403dbb97 505 dev->features |= NETIF_F_NETNS_LOCAL;
1da177e4
LT
506}
507
f0ad0860 508static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
1da177e4
LT
509{
510 struct net_device *dev;
f0ad0860 511 char name[IFNAMSIZ];
1da177e4 512
f0ad0860
PM
513 if (mrt->id == RT_TABLE_DEFAULT)
514 sprintf(name, "pimreg");
515 else
516 sprintf(name, "pimreg%u", mrt->id);
1da177e4 517
c835a677 518 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
1da177e4 519
51456b29 520 if (!dev)
1da177e4
LT
521 return NULL;
522
403dbb97
TG
523 dev_net_set(dev, net);
524
1da177e4
LT
525 if (register_netdevice(dev)) {
526 free_netdev(dev);
527 return NULL;
528 }
1da177e4 529
a0b47736 530 if (!ipmr_init_vif_indev(dev))
1da177e4 531 goto failure;
1da177e4
LT
532 if (dev_open(dev))
533 goto failure;
534
7dc00c82
WC
535 dev_hold(dev);
536
1da177e4
LT
537 return dev;
538
539failure:
1da177e4
LT
540 unregister_netdevice(dev);
541 return NULL;
542}
c316c629
NA
543
544/* called with rcu_read_lock() */
545static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
546 unsigned int pimlen)
547{
548 struct net_device *reg_dev = NULL;
549 struct iphdr *encap;
550
551 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
7ef8f65d 552 /* Check that:
c316c629
NA
553 * a. packet is really sent to a multicast group
554 * b. packet is not a NULL-REGISTER
555 * c. packet is not truncated
556 */
557 if (!ipv4_is_multicast(encap->daddr) ||
558 encap->tot_len == 0 ||
559 ntohs(encap->tot_len) + pimlen > skb->len)
560 return 1;
561
562 read_lock(&mrt_lock);
563 if (mrt->mroute_reg_vif_num >= 0)
564 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
565 read_unlock(&mrt_lock);
566
567 if (!reg_dev)
568 return 1;
569
570 skb->mac_header = skb->network_header;
571 skb_pull(skb, (u8 *)encap - skb->data);
572 skb_reset_network_header(skb);
573 skb->protocol = htons(ETH_P_IP);
574 skb->ip_summed = CHECKSUM_NONE;
575
576 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
577
578 netif_rx(skb);
579
580 return NET_RX_SUCCESS;
581}
582#else
583static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
584{
585 return NULL;
586}
1da177e4
LT
587#endif
588
2c53040f
BH
589/**
590 * vif_delete - Delete a VIF entry
7dc00c82 591 * @notify: Set to 1, if the caller is a notifier_call
1da177e4 592 */
0c12295a 593static int vif_delete(struct mr_table *mrt, int vifi, int notify,
d17fa6fa 594 struct list_head *head)
1da177e4
LT
595{
596 struct vif_device *v;
597 struct net_device *dev;
598 struct in_device *in_dev;
599
0c12295a 600 if (vifi < 0 || vifi >= mrt->maxvif)
1da177e4
LT
601 return -EADDRNOTAVAIL;
602
0c12295a 603 v = &mrt->vif_table[vifi];
1da177e4
LT
604
605 write_lock_bh(&mrt_lock);
606 dev = v->dev;
607 v->dev = NULL;
608
609 if (!dev) {
610 write_unlock_bh(&mrt_lock);
611 return -EADDRNOTAVAIL;
612 }
613
0c12295a
PM
614 if (vifi == mrt->mroute_reg_vif_num)
615 mrt->mroute_reg_vif_num = -1;
1da177e4 616
a8cb16dd 617 if (vifi + 1 == mrt->maxvif) {
1da177e4 618 int tmp;
a8cb16dd
ED
619
620 for (tmp = vifi - 1; tmp >= 0; tmp--) {
0c12295a 621 if (VIF_EXISTS(mrt, tmp))
1da177e4
LT
622 break;
623 }
0c12295a 624 mrt->maxvif = tmp+1;
1da177e4
LT
625 }
626
627 write_unlock_bh(&mrt_lock);
628
629 dev_set_allmulti(dev, -1);
630
a8cb16dd
ED
631 in_dev = __in_dev_get_rtnl(dev);
632 if (in_dev) {
42f811b8 633 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
d67b8c61
ND
634 inet_netconf_notify_devconf(dev_net(dev),
635 NETCONFA_MC_FORWARDING,
636 dev->ifindex, &in_dev->cnf);
1da177e4
LT
637 ip_rt_multicast_event(in_dev);
638 }
639
a8cb16dd 640 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
d17fa6fa 641 unregister_netdevice_queue(dev, head);
1da177e4
LT
642
643 dev_put(dev);
644 return 0;
645}
646
a8c9486b 647static void ipmr_cache_free_rcu(struct rcu_head *head)
5c0a66f5 648{
a8c9486b
ED
649 struct mfc_cache *c = container_of(head, struct mfc_cache, rcu);
650
5c0a66f5
BT
651 kmem_cache_free(mrt_cachep, c);
652}
653
a8c9486b
ED
654static inline void ipmr_cache_free(struct mfc_cache *c)
655{
656 call_rcu(&c->rcu, ipmr_cache_free_rcu);
657}
658
1da177e4 659/* Destroy an unresolved cache entry, killing queued skbs
a8cb16dd 660 * and reporting error to netlink readers.
1da177e4 661 */
0c12295a 662static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
1da177e4 663{
8de53dfb 664 struct net *net = read_pnet(&mrt->net);
1da177e4 665 struct sk_buff *skb;
9ef1d4c7 666 struct nlmsgerr *e;
1da177e4 667
0c12295a 668 atomic_dec(&mrt->cache_resolve_queue_len);
1da177e4 669
c354e124 670 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
eddc9ec5 671 if (ip_hdr(skb)->version == 0) {
1da177e4
LT
672 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
673 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 674 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1da177e4 675 skb_trim(skb, nlh->nlmsg_len);
573ce260 676 e = nlmsg_data(nlh);
9ef1d4c7
PM
677 e->error = -ETIMEDOUT;
678 memset(&e->msg, 0, sizeof(e->msg));
2942e900 679
15e47304 680 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
a8cb16dd 681 } else {
1da177e4 682 kfree_skb(skb);
a8cb16dd 683 }
1da177e4
LT
684 }
685
5c0a66f5 686 ipmr_cache_free(c);
1da177e4
LT
687}
688
e258beb2 689/* Timer process for the unresolved queue. */
e258beb2 690static void ipmr_expire_process(unsigned long arg)
1da177e4 691{
0c12295a 692 struct mr_table *mrt = (struct mr_table *)arg;
1da177e4
LT
693 unsigned long now;
694 unsigned long expires;
862465f2 695 struct mfc_cache *c, *next;
1da177e4
LT
696
697 if (!spin_trylock(&mfc_unres_lock)) {
0c12295a 698 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
1da177e4
LT
699 return;
700 }
701
0c12295a 702 if (list_empty(&mrt->mfc_unres_queue))
1da177e4
LT
703 goto out;
704
705 now = jiffies;
706 expires = 10*HZ;
1da177e4 707
0c12295a 708 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
1da177e4
LT
709 if (time_after(c->mfc_un.unres.expires, now)) {
710 unsigned long interval = c->mfc_un.unres.expires - now;
711 if (interval < expires)
712 expires = interval;
1da177e4
LT
713 continue;
714 }
715
862465f2 716 list_del(&c->list);
8cd3ac9f 717 mroute_netlink_event(mrt, c, RTM_DELROUTE);
0c12295a 718 ipmr_destroy_unres(mrt, c);
1da177e4
LT
719 }
720
0c12295a
PM
721 if (!list_empty(&mrt->mfc_unres_queue))
722 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
1da177e4
LT
723
724out:
725 spin_unlock(&mfc_unres_lock);
726}
727
728/* Fill oifs list. It is called under write locked mrt_lock. */
0c12295a 729static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
d658f8a0 730 unsigned char *ttls)
1da177e4
LT
731{
732 int vifi;
733
734 cache->mfc_un.res.minvif = MAXVIFS;
735 cache->mfc_un.res.maxvif = 0;
736 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
737
0c12295a
PM
738 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
739 if (VIF_EXISTS(mrt, vifi) &&
cf958ae3 740 ttls[vifi] && ttls[vifi] < 255) {
1da177e4
LT
741 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
742 if (cache->mfc_un.res.minvif > vifi)
743 cache->mfc_un.res.minvif = vifi;
744 if (cache->mfc_un.res.maxvif <= vifi)
745 cache->mfc_un.res.maxvif = vifi + 1;
746 }
747 }
90b5ca17 748 cache->mfc_un.res.lastuse = jiffies;
1da177e4
LT
749}
750
0c12295a
PM
751static int vif_add(struct net *net, struct mr_table *mrt,
752 struct vifctl *vifc, int mrtsock)
1da177e4
LT
753{
754 int vifi = vifc->vifc_vifi;
0c12295a 755 struct vif_device *v = &mrt->vif_table[vifi];
1da177e4
LT
756 struct net_device *dev;
757 struct in_device *in_dev;
d607032d 758 int err;
1da177e4
LT
759
760 /* Is vif busy ? */
0c12295a 761 if (VIF_EXISTS(mrt, vifi))
1da177e4
LT
762 return -EADDRINUSE;
763
764 switch (vifc->vifc_flags) {
1da177e4 765 case VIFF_REGISTER:
1973a4ea 766 if (!ipmr_pimsm_enabled())
c316c629
NA
767 return -EINVAL;
768 /* Special Purpose VIF in PIM
1da177e4
LT
769 * All the packets will be sent to the daemon
770 */
0c12295a 771 if (mrt->mroute_reg_vif_num >= 0)
1da177e4 772 return -EADDRINUSE;
f0ad0860 773 dev = ipmr_reg_vif(net, mrt);
1da177e4
LT
774 if (!dev)
775 return -ENOBUFS;
d607032d
WC
776 err = dev_set_allmulti(dev, 1);
777 if (err) {
778 unregister_netdevice(dev);
7dc00c82 779 dev_put(dev);
d607032d
WC
780 return err;
781 }
1da177e4 782 break;
e905a9ed 783 case VIFF_TUNNEL:
4feb88e5 784 dev = ipmr_new_tunnel(net, vifc);
1da177e4
LT
785 if (!dev)
786 return -ENOBUFS;
d607032d
WC
787 err = dev_set_allmulti(dev, 1);
788 if (err) {
789 ipmr_del_tunnel(dev, vifc);
7dc00c82 790 dev_put(dev);
d607032d
WC
791 return err;
792 }
1da177e4 793 break;
ee5e81f0 794 case VIFF_USE_IFINDEX:
1da177e4 795 case 0:
ee5e81f0
I
796 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
797 dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
51456b29 798 if (dev && !__in_dev_get_rtnl(dev)) {
ee5e81f0
I
799 dev_put(dev);
800 return -EADDRNOTAVAIL;
801 }
a8cb16dd 802 } else {
ee5e81f0 803 dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
a8cb16dd 804 }
1da177e4
LT
805 if (!dev)
806 return -EADDRNOTAVAIL;
d607032d 807 err = dev_set_allmulti(dev, 1);
7dc00c82
WC
808 if (err) {
809 dev_put(dev);
d607032d 810 return err;
7dc00c82 811 }
1da177e4
LT
812 break;
813 default:
814 return -EINVAL;
815 }
816
a8cb16dd
ED
817 in_dev = __in_dev_get_rtnl(dev);
818 if (!in_dev) {
d0490cfd 819 dev_put(dev);
1da177e4 820 return -EADDRNOTAVAIL;
d0490cfd 821 }
42f811b8 822 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
d67b8c61
ND
823 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING, dev->ifindex,
824 &in_dev->cnf);
1da177e4
LT
825 ip_rt_multicast_event(in_dev);
826
a8cb16dd
ED
827 /* Fill in the VIF structures */
828
c354e124
JK
829 v->rate_limit = vifc->vifc_rate_limit;
830 v->local = vifc->vifc_lcl_addr.s_addr;
831 v->remote = vifc->vifc_rmt_addr.s_addr;
832 v->flags = vifc->vifc_flags;
1da177e4
LT
833 if (!mrtsock)
834 v->flags |= VIFF_STATIC;
c354e124 835 v->threshold = vifc->vifc_threshold;
1da177e4
LT
836 v->bytes_in = 0;
837 v->bytes_out = 0;
838 v->pkt_in = 0;
839 v->pkt_out = 0;
840 v->link = dev->ifindex;
a8cb16dd 841 if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER))
a54acb3a 842 v->link = dev_get_iflink(dev);
1da177e4
LT
843
844 /* And finish update writing critical data */
845 write_lock_bh(&mrt_lock);
c354e124 846 v->dev = dev;
a8cb16dd 847 if (v->flags & VIFF_REGISTER)
0c12295a 848 mrt->mroute_reg_vif_num = vifi;
0c12295a
PM
849 if (vifi+1 > mrt->maxvif)
850 mrt->maxvif = vifi+1;
1da177e4
LT
851 write_unlock_bh(&mrt_lock);
852 return 0;
853}
854
a8c9486b 855/* called with rcu_read_lock() */
0c12295a 856static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
4feb88e5
BT
857 __be32 origin,
858 __be32 mcastgrp)
1da177e4 859{
8fb472c0
NA
860 struct mfc_cache_cmp_arg arg = {
861 .mfc_mcastgrp = mcastgrp,
862 .mfc_origin = origin
863 };
864 struct rhlist_head *tmp, *list;
1da177e4
LT
865 struct mfc_cache *c;
866
8fb472c0
NA
867 list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
868 rhl_for_each_entry_rcu(c, tmp, list, mnode)
869 return c;
870
862465f2 871 return NULL;
1da177e4
LT
872}
873
660b26dc
ND
874/* Look for a (*,*,oif) entry */
875static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
876 int vifi)
877{
8fb472c0
NA
878 struct mfc_cache_cmp_arg arg = {
879 .mfc_mcastgrp = htonl(INADDR_ANY),
880 .mfc_origin = htonl(INADDR_ANY)
881 };
882 struct rhlist_head *tmp, *list;
660b26dc
ND
883 struct mfc_cache *c;
884
8fb472c0
NA
885 list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
886 rhl_for_each_entry_rcu(c, tmp, list, mnode)
887 if (c->mfc_un.res.ttls[vifi] < 255)
660b26dc
ND
888 return c;
889
890 return NULL;
891}
892
893/* Look for a (*,G) entry */
894static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
895 __be32 mcastgrp, int vifi)
896{
8fb472c0
NA
897 struct mfc_cache_cmp_arg arg = {
898 .mfc_mcastgrp = mcastgrp,
899 .mfc_origin = htonl(INADDR_ANY)
900 };
901 struct rhlist_head *tmp, *list;
660b26dc
ND
902 struct mfc_cache *c, *proxy;
903
360eb5da 904 if (mcastgrp == htonl(INADDR_ANY))
660b26dc
ND
905 goto skip;
906
8fb472c0
NA
907 list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
908 rhl_for_each_entry_rcu(c, tmp, list, mnode) {
909 if (c->mfc_un.res.ttls[vifi] < 255)
910 return c;
911
912 /* It's ok if the vifi is part of the static tree */
913 proxy = ipmr_cache_find_any_parent(mrt, c->mfc_parent);
914 if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
915 return c;
916 }
660b26dc
ND
917
918skip:
919 return ipmr_cache_find_any_parent(mrt, vifi);
920}
921
8fb472c0
NA
922/* Look for a (S,G,iif) entry if parent != -1 */
923static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
924 __be32 origin, __be32 mcastgrp,
925 int parent)
926{
927 struct mfc_cache_cmp_arg arg = {
928 .mfc_mcastgrp = mcastgrp,
929 .mfc_origin = origin,
930 };
931 struct rhlist_head *tmp, *list;
932 struct mfc_cache *c;
933
934 list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
935 rhl_for_each_entry_rcu(c, tmp, list, mnode)
936 if (parent == -1 || parent == c->mfc_parent)
937 return c;
938
939 return NULL;
940}
941
7ef8f65d 942/* Allocate a multicast cache entry */
d658f8a0 943static struct mfc_cache *ipmr_cache_alloc(void)
1da177e4 944{
c354e124 945 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
a8c9486b 946
70a0dec4
TG
947 if (c) {
948 c->mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
a8c9486b 949 c->mfc_un.res.minvif = MAXVIFS;
70a0dec4 950 }
1da177e4
LT
951 return c;
952}
953
d658f8a0 954static struct mfc_cache *ipmr_cache_alloc_unres(void)
1da177e4 955{
c354e124 956 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
a8c9486b
ED
957
958 if (c) {
959 skb_queue_head_init(&c->mfc_un.unres.unresolved);
960 c->mfc_un.unres.expires = jiffies + 10*HZ;
961 }
1da177e4
LT
962 return c;
963}
964
7ef8f65d 965/* A cache entry has gone into a resolved state from queued */
0c12295a
PM
966static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
967 struct mfc_cache *uc, struct mfc_cache *c)
1da177e4
LT
968{
969 struct sk_buff *skb;
9ef1d4c7 970 struct nlmsgerr *e;
1da177e4 971
a8cb16dd 972 /* Play the pending entries through our router */
c354e124 973 while ((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
eddc9ec5 974 if (ip_hdr(skb)->version == 0) {
1da177e4
LT
975 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
976
573ce260 977 if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
a8cb16dd
ED
978 nlh->nlmsg_len = skb_tail_pointer(skb) -
979 (u8 *)nlh;
1da177e4
LT
980 } else {
981 nlh->nlmsg_type = NLMSG_ERROR;
573ce260 982 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1da177e4 983 skb_trim(skb, nlh->nlmsg_len);
573ce260 984 e = nlmsg_data(nlh);
9ef1d4c7
PM
985 e->error = -EMSGSIZE;
986 memset(&e->msg, 0, sizeof(e->msg));
1da177e4 987 }
2942e900 988
15e47304 989 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
a8cb16dd 990 } else {
0c12295a 991 ip_mr_forward(net, mrt, skb, c, 0);
a8cb16dd 992 }
1da177e4
LT
993 }
994}
995
c316c629
NA
996/* Bounce a cache query up to mrouted. We could use netlink for this but mrouted
997 * expects the following bizarre scheme.
1da177e4 998 *
c316c629 999 * Called under mrt_lock.
1da177e4 1000 */
0c12295a 1001static int ipmr_cache_report(struct mr_table *mrt,
4feb88e5 1002 struct sk_buff *pkt, vifi_t vifi, int assert)
1da177e4 1003{
c9bdd4b5 1004 const int ihl = ip_hdrlen(pkt);
c316c629 1005 struct sock *mroute_sk;
1da177e4
LT
1006 struct igmphdr *igmp;
1007 struct igmpmsg *msg;
c316c629 1008 struct sk_buff *skb;
1da177e4
LT
1009 int ret;
1010
1da177e4
LT
1011 if (assert == IGMPMSG_WHOLEPKT)
1012 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
1013 else
1da177e4
LT
1014 skb = alloc_skb(128, GFP_ATOMIC);
1015
132adf54 1016 if (!skb)
1da177e4
LT
1017 return -ENOBUFS;
1018
1da177e4
LT
1019 if (assert == IGMPMSG_WHOLEPKT) {
1020 /* Ugly, but we have no choice with this interface.
a8cb16dd
ED
1021 * Duplicate old header, fix ihl, length etc.
1022 * And all this only to mangle msg->im_msgtype and
1023 * to set msg->im_mbz to "mbz" :-)
1da177e4 1024 */
878c8145
ACM
1025 skb_push(skb, sizeof(struct iphdr));
1026 skb_reset_network_header(skb);
badff6d0 1027 skb_reset_transport_header(skb);
0272ffc4 1028 msg = (struct igmpmsg *)skb_network_header(skb);
d56f90a7 1029 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
1da177e4
LT
1030 msg->im_msgtype = IGMPMSG_WHOLEPKT;
1031 msg->im_mbz = 0;
0c12295a 1032 msg->im_vif = mrt->mroute_reg_vif_num;
eddc9ec5
ACM
1033 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
1034 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
1035 sizeof(struct iphdr));
c316c629
NA
1036 } else {
1037 /* Copy the IP header */
1038 skb_set_network_header(skb, skb->len);
1039 skb_put(skb, ihl);
1040 skb_copy_to_linear_data(skb, pkt->data, ihl);
1041 /* Flag to the kernel this is a route add */
1042 ip_hdr(skb)->protocol = 0;
1043 msg = (struct igmpmsg *)skb_network_header(skb);
1044 msg->im_vif = vifi;
1045 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1046 /* Add our header */
1047 igmp = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
1048 igmp->type = assert;
1049 msg->im_msgtype = assert;
1050 igmp->code = 0;
1051 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1052 skb->transport_header = skb->network_header;
e905a9ed 1053 }
1da177e4 1054
4c968709
ED
1055 rcu_read_lock();
1056 mroute_sk = rcu_dereference(mrt->mroute_sk);
51456b29 1057 if (!mroute_sk) {
4c968709 1058 rcu_read_unlock();
1da177e4
LT
1059 kfree_skb(skb);
1060 return -EINVAL;
1061 }
1062
a8cb16dd 1063 /* Deliver to mrouted */
4c968709
ED
1064 ret = sock_queue_rcv_skb(mroute_sk, skb);
1065 rcu_read_unlock();
70a269e6 1066 if (ret < 0) {
e87cc472 1067 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1da177e4
LT
1068 kfree_skb(skb);
1069 }
1070
1071 return ret;
1072}
1073
7ef8f65d
NA
1074/* Queue a packet for resolution. It gets locked cache entry! */
1075static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1076 struct sk_buff *skb)
1da177e4 1077{
8fb472c0
NA
1078 const struct iphdr *iph = ip_hdr(skb);
1079 struct mfc_cache *c;
862465f2 1080 bool found = false;
1da177e4 1081 int err;
1da177e4
LT
1082
1083 spin_lock_bh(&mfc_unres_lock);
0c12295a 1084 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
e258beb2 1085 if (c->mfc_mcastgrp == iph->daddr &&
862465f2
PM
1086 c->mfc_origin == iph->saddr) {
1087 found = true;
1da177e4 1088 break;
862465f2 1089 }
1da177e4
LT
1090 }
1091
862465f2 1092 if (!found) {
a8cb16dd 1093 /* Create a new entry if allowable */
0c12295a 1094 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
d658f8a0 1095 (c = ipmr_cache_alloc_unres()) == NULL) {
1da177e4
LT
1096 spin_unlock_bh(&mfc_unres_lock);
1097
1098 kfree_skb(skb);
1099 return -ENOBUFS;
1100 }
1101
a8cb16dd 1102 /* Fill in the new cache entry */
eddc9ec5
ACM
1103 c->mfc_parent = -1;
1104 c->mfc_origin = iph->saddr;
1105 c->mfc_mcastgrp = iph->daddr;
1da177e4 1106
a8cb16dd 1107 /* Reflect first query at mrouted. */
0c12295a 1108 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
4feb88e5 1109 if (err < 0) {
e905a9ed 1110 /* If the report failed throw the cache entry
1da177e4
LT
1111 out - Brad Parker
1112 */
1113 spin_unlock_bh(&mfc_unres_lock);
1114
5c0a66f5 1115 ipmr_cache_free(c);
1da177e4
LT
1116 kfree_skb(skb);
1117 return err;
1118 }
1119
0c12295a
PM
1120 atomic_inc(&mrt->cache_resolve_queue_len);
1121 list_add(&c->list, &mrt->mfc_unres_queue);
8cd3ac9f 1122 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4 1123
278554bd
DM
1124 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1125 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
1da177e4
LT
1126 }
1127
a8cb16dd 1128 /* See if we can append the packet */
a8cb16dd 1129 if (c->mfc_un.unres.unresolved.qlen > 3) {
1da177e4
LT
1130 kfree_skb(skb);
1131 err = -ENOBUFS;
1132 } else {
c354e124 1133 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
1da177e4
LT
1134 err = 0;
1135 }
1136
1137 spin_unlock_bh(&mfc_unres_lock);
1138 return err;
1139}
1140
7ef8f65d 1141/* MFC cache manipulation by user space mroute daemon */
1da177e4 1142
660b26dc 1143static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1da177e4 1144{
8fb472c0 1145 struct mfc_cache *c;
1da177e4 1146
8fb472c0
NA
1147 /* The entries are added/deleted only under RTNL */
1148 rcu_read_lock();
1149 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1150 mfc->mfcc_mcastgrp.s_addr, parent);
1151 rcu_read_unlock();
1152 if (!c)
1153 return -ENOENT;
1154 rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1155 list_del_rcu(&c->list);
1156 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1157 ipmr_cache_free(c);
1da177e4 1158
8fb472c0 1159 return 0;
1da177e4
LT
1160}
1161
0c12295a 1162static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
660b26dc 1163 struct mfcctl *mfc, int mrtsock, int parent)
1da177e4 1164{
862465f2 1165 struct mfc_cache *uc, *c;
8fb472c0
NA
1166 bool found;
1167 int ret;
1da177e4 1168
a50436f2
PM
1169 if (mfc->mfcc_parent >= MAXVIFS)
1170 return -ENFILE;
1171
8fb472c0
NA
1172 /* The entries are added/deleted only under RTNL */
1173 rcu_read_lock();
1174 c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1175 mfc->mfcc_mcastgrp.s_addr, parent);
1176 rcu_read_unlock();
1177 if (c) {
1da177e4
LT
1178 write_lock_bh(&mrt_lock);
1179 c->mfc_parent = mfc->mfcc_parent;
0c12295a 1180 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1da177e4
LT
1181 if (!mrtsock)
1182 c->mfc_flags |= MFC_STATIC;
1183 write_unlock_bh(&mrt_lock);
8cd3ac9f 1184 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4
LT
1185 return 0;
1186 }
1187
360eb5da 1188 if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
660b26dc 1189 !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1da177e4
LT
1190 return -EINVAL;
1191
d658f8a0 1192 c = ipmr_cache_alloc();
51456b29 1193 if (!c)
1da177e4
LT
1194 return -ENOMEM;
1195
c354e124
JK
1196 c->mfc_origin = mfc->mfcc_origin.s_addr;
1197 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1198 c->mfc_parent = mfc->mfcc_parent;
0c12295a 1199 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
1da177e4
LT
1200 if (!mrtsock)
1201 c->mfc_flags |= MFC_STATIC;
1202
8fb472c0
NA
1203 ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->mnode,
1204 ipmr_rht_params);
1205 if (ret) {
1206 pr_err("ipmr: rhtable insert error %d\n", ret);
1207 ipmr_cache_free(c);
1208 return ret;
1209 }
1210 list_add_tail_rcu(&c->list, &mrt->mfc_cache_list);
7ef8f65d
NA
1211 /* Check to see if we resolved a queued list. If so we
1212 * need to send on the frames and tidy up.
1da177e4 1213 */
b0ebb739 1214 found = false;
1da177e4 1215 spin_lock_bh(&mfc_unres_lock);
0c12295a 1216 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
e258beb2 1217 if (uc->mfc_origin == c->mfc_origin &&
1da177e4 1218 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
862465f2 1219 list_del(&uc->list);
0c12295a 1220 atomic_dec(&mrt->cache_resolve_queue_len);
b0ebb739 1221 found = true;
1da177e4
LT
1222 break;
1223 }
1224 }
0c12295a
PM
1225 if (list_empty(&mrt->mfc_unres_queue))
1226 del_timer(&mrt->ipmr_expire_timer);
1da177e4
LT
1227 spin_unlock_bh(&mfc_unres_lock);
1228
b0ebb739 1229 if (found) {
0c12295a 1230 ipmr_cache_resolve(net, mrt, uc, c);
5c0a66f5 1231 ipmr_cache_free(uc);
1da177e4 1232 }
8cd3ac9f 1233 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1da177e4
LT
1234 return 0;
1235}
1236
7ef8f65d 1237/* Close the multicast socket, and clear the vif tables etc */
0e615e96 1238static void mroute_clean_tables(struct mr_table *mrt, bool all)
1da177e4 1239{
8fb472c0 1240 struct mfc_cache *c, *tmp;
d17fa6fa 1241 LIST_HEAD(list);
8fb472c0 1242 int i;
e905a9ed 1243
a8cb16dd 1244 /* Shut down all active vif entries */
0c12295a 1245 for (i = 0; i < mrt->maxvif; i++) {
0e615e96
NA
1246 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1247 continue;
1248 vif_delete(mrt, i, 0, &list);
1da177e4 1249 }
d17fa6fa 1250 unregister_netdevice_many(&list);
1da177e4 1251
a8cb16dd 1252 /* Wipe the cache */
8fb472c0
NA
1253 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1254 if (!all && (c->mfc_flags & MFC_STATIC))
1255 continue;
1256 rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1257 list_del_rcu(&c->list);
1258 mroute_netlink_event(mrt, c, RTM_DELROUTE);
1259 ipmr_cache_free(c);
1da177e4
LT
1260 }
1261
0c12295a 1262 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1da177e4 1263 spin_lock_bh(&mfc_unres_lock);
8fb472c0 1264 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
862465f2 1265 list_del(&c->list);
8cd3ac9f 1266 mroute_netlink_event(mrt, c, RTM_DELROUTE);
0c12295a 1267 ipmr_destroy_unres(mrt, c);
1da177e4
LT
1268 }
1269 spin_unlock_bh(&mfc_unres_lock);
1270 }
1271}
1272
4c968709
ED
1273/* called from ip_ra_control(), before an RCU grace period,
1274 * we dont need to call synchronize_rcu() here
1275 */
1da177e4
LT
1276static void mrtsock_destruct(struct sock *sk)
1277{
4feb88e5 1278 struct net *net = sock_net(sk);
f0ad0860 1279 struct mr_table *mrt;
4feb88e5 1280
1215e51e 1281 ASSERT_RTNL();
f0ad0860 1282 ipmr_for_each_table(mrt, net) {
4c968709 1283 if (sk == rtnl_dereference(mrt->mroute_sk)) {
f0ad0860 1284 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
d67b8c61
ND
1285 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1286 NETCONFA_IFINDEX_ALL,
1287 net->ipv4.devconf_all);
a9b3cd7f 1288 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
0e615e96 1289 mroute_clean_tables(mrt, false);
f0ad0860 1290 }
1da177e4 1291 }
1da177e4
LT
1292}
1293
7ef8f65d
NA
1294/* Socket options and virtual interface manipulation. The whole
1295 * virtual interface system is a complete heap, but unfortunately
1296 * that's how BSD mrouted happens to think. Maybe one day with a proper
1297 * MOSPF/PIM router set up we can clean this up.
1da177e4 1298 */
e905a9ed 1299
29e97d21
NA
1300int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval,
1301 unsigned int optlen)
1da177e4 1302{
4feb88e5 1303 struct net *net = sock_net(sk);
29e97d21 1304 int val, ret = 0, parent = 0;
f0ad0860 1305 struct mr_table *mrt;
29e97d21
NA
1306 struct vifctl vif;
1307 struct mfcctl mfc;
1308 u32 uval;
f0ad0860 1309
29e97d21
NA
1310 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1311 rtnl_lock();
5e1859fb 1312 if (sk->sk_type != SOCK_RAW ||
29e97d21
NA
1313 inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1314 ret = -EOPNOTSUPP;
1315 goto out_unlock;
1316 }
5e1859fb 1317
f0ad0860 1318 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
29e97d21
NA
1319 if (!mrt) {
1320 ret = -ENOENT;
1321 goto out_unlock;
1322 }
132adf54 1323 if (optname != MRT_INIT) {
33d480ce 1324 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
29e97d21
NA
1325 !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1326 ret = -EACCES;
1327 goto out_unlock;
1328 }
1da177e4
LT
1329 }
1330
132adf54
SH
1331 switch (optname) {
1332 case MRT_INIT:
42e6b89c 1333 if (optlen != sizeof(int)) {
29e97d21 1334 ret = -EINVAL;
42e6b89c
NA
1335 break;
1336 }
1337 if (rtnl_dereference(mrt->mroute_sk)) {
29e97d21 1338 ret = -EADDRINUSE;
29e97d21 1339 break;
42e6b89c 1340 }
132adf54
SH
1341
1342 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1343 if (ret == 0) {
cf778b00 1344 rcu_assign_pointer(mrt->mroute_sk, sk);
4feb88e5 1345 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
d67b8c61
ND
1346 inet_netconf_notify_devconf(net, NETCONFA_MC_FORWARDING,
1347 NETCONFA_IFINDEX_ALL,
1348 net->ipv4.devconf_all);
132adf54 1349 }
29e97d21 1350 break;
132adf54 1351 case MRT_DONE:
29e97d21
NA
1352 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1353 ret = -EACCES;
1354 } else {
29e97d21 1355 ret = ip_ra_control(sk, 0, NULL);
1215e51e 1356 goto out_unlock;
29e97d21
NA
1357 }
1358 break;
132adf54
SH
1359 case MRT_ADD_VIF:
1360 case MRT_DEL_VIF:
29e97d21
NA
1361 if (optlen != sizeof(vif)) {
1362 ret = -EINVAL;
1363 break;
1364 }
1365 if (copy_from_user(&vif, optval, sizeof(vif))) {
1366 ret = -EFAULT;
1367 break;
1368 }
1369 if (vif.vifc_vifi >= MAXVIFS) {
1370 ret = -ENFILE;
1371 break;
1372 }
c354e124 1373 if (optname == MRT_ADD_VIF) {
4c968709
ED
1374 ret = vif_add(net, mrt, &vif,
1375 sk == rtnl_dereference(mrt->mroute_sk));
132adf54 1376 } else {
0c12295a 1377 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
132adf54 1378 }
29e97d21 1379 break;
7ef8f65d
NA
1380 /* Manipulate the forwarding caches. These live
1381 * in a sort of kernel/user symbiosis.
1382 */
132adf54
SH
1383 case MRT_ADD_MFC:
1384 case MRT_DEL_MFC:
660b26dc
ND
1385 parent = -1;
1386 case MRT_ADD_MFC_PROXY:
1387 case MRT_DEL_MFC_PROXY:
29e97d21
NA
1388 if (optlen != sizeof(mfc)) {
1389 ret = -EINVAL;
1390 break;
1391 }
1392 if (copy_from_user(&mfc, optval, sizeof(mfc))) {
1393 ret = -EFAULT;
1394 break;
1395 }
660b26dc
ND
1396 if (parent == 0)
1397 parent = mfc.mfcc_parent;
660b26dc
ND
1398 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1399 ret = ipmr_mfc_delete(mrt, &mfc, parent);
132adf54 1400 else
4c968709 1401 ret = ipmr_mfc_add(net, mrt, &mfc,
660b26dc
ND
1402 sk == rtnl_dereference(mrt->mroute_sk),
1403 parent);
29e97d21 1404 break;
7ef8f65d 1405 /* Control PIM assert. */
132adf54 1406 case MRT_ASSERT:
29e97d21
NA
1407 if (optlen != sizeof(val)) {
1408 ret = -EINVAL;
1409 break;
1410 }
1411 if (get_user(val, (int __user *)optval)) {
1412 ret = -EFAULT;
1413 break;
1414 }
1415 mrt->mroute_do_assert = val;
1416 break;
132adf54 1417 case MRT_PIM:
1973a4ea 1418 if (!ipmr_pimsm_enabled()) {
29e97d21
NA
1419 ret = -ENOPROTOOPT;
1420 break;
1421 }
1422 if (optlen != sizeof(val)) {
1423 ret = -EINVAL;
1424 break;
1425 }
1426 if (get_user(val, (int __user *)optval)) {
1427 ret = -EFAULT;
1428 break;
1429 }
ba93ef74 1430
29e97d21
NA
1431 val = !!val;
1432 if (val != mrt->mroute_do_pim) {
1433 mrt->mroute_do_pim = val;
1434 mrt->mroute_do_assert = val;
1da177e4 1435 }
29e97d21 1436 break;
f0ad0860 1437 case MRT_TABLE:
29e97d21
NA
1438 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1439 ret = -ENOPROTOOPT;
1440 break;
1441 }
1442 if (optlen != sizeof(uval)) {
1443 ret = -EINVAL;
1444 break;
1445 }
1446 if (get_user(uval, (u32 __user *)optval)) {
1447 ret = -EFAULT;
1448 break;
1449 }
f0ad0860 1450
4c968709
ED
1451 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1452 ret = -EBUSY;
1453 } else {
29e97d21 1454 mrt = ipmr_new_table(net, uval);
1113ebbc
NA
1455 if (IS_ERR(mrt))
1456 ret = PTR_ERR(mrt);
5e1859fb 1457 else
29e97d21 1458 raw_sk(sk)->ipmr_table = uval;
4c968709 1459 }
29e97d21 1460 break;
7ef8f65d 1461 /* Spurious command, or MRT_VERSION which you cannot set. */
132adf54 1462 default:
29e97d21 1463 ret = -ENOPROTOOPT;
1da177e4 1464 }
29e97d21
NA
1465out_unlock:
1466 rtnl_unlock();
29e97d21 1467 return ret;
1da177e4
LT
1468}
1469
7ef8f65d 1470/* Getsock opt support for the multicast routing system. */
c354e124 1471int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1da177e4
LT
1472{
1473 int olr;
1474 int val;
4feb88e5 1475 struct net *net = sock_net(sk);
f0ad0860
PM
1476 struct mr_table *mrt;
1477
5e1859fb
ED
1478 if (sk->sk_type != SOCK_RAW ||
1479 inet_sk(sk)->inet_num != IPPROTO_IGMP)
1480 return -EOPNOTSUPP;
1481
f0ad0860 1482 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1483 if (!mrt)
f0ad0860 1484 return -ENOENT;
1da177e4 1485
fe9ef3ce
NA
1486 switch (optname) {
1487 case MRT_VERSION:
1488 val = 0x0305;
1489 break;
1490 case MRT_PIM:
1973a4ea 1491 if (!ipmr_pimsm_enabled())
fe9ef3ce
NA
1492 return -ENOPROTOOPT;
1493 val = mrt->mroute_do_pim;
1494 break;
1495 case MRT_ASSERT:
1496 val = mrt->mroute_do_assert;
1497 break;
1498 default:
1da177e4 1499 return -ENOPROTOOPT;
fe9ef3ce 1500 }
1da177e4
LT
1501
1502 if (get_user(olr, optlen))
1503 return -EFAULT;
1da177e4
LT
1504 olr = min_t(unsigned int, olr, sizeof(int));
1505 if (olr < 0)
1506 return -EINVAL;
c354e124 1507 if (put_user(olr, optlen))
1da177e4 1508 return -EFAULT;
c354e124 1509 if (copy_to_user(optval, &val, olr))
1da177e4
LT
1510 return -EFAULT;
1511 return 0;
1512}
1513
7ef8f65d 1514/* The IP multicast ioctl support routines. */
1da177e4
LT
1515int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1516{
1517 struct sioc_sg_req sr;
1518 struct sioc_vif_req vr;
1519 struct vif_device *vif;
1520 struct mfc_cache *c;
4feb88e5 1521 struct net *net = sock_net(sk);
f0ad0860
PM
1522 struct mr_table *mrt;
1523
1524 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1525 if (!mrt)
f0ad0860 1526 return -ENOENT;
e905a9ed 1527
132adf54
SH
1528 switch (cmd) {
1529 case SIOCGETVIFCNT:
c354e124 1530 if (copy_from_user(&vr, arg, sizeof(vr)))
132adf54 1531 return -EFAULT;
0c12295a 1532 if (vr.vifi >= mrt->maxvif)
132adf54
SH
1533 return -EINVAL;
1534 read_lock(&mrt_lock);
0c12295a
PM
1535 vif = &mrt->vif_table[vr.vifi];
1536 if (VIF_EXISTS(mrt, vr.vifi)) {
c354e124
JK
1537 vr.icount = vif->pkt_in;
1538 vr.ocount = vif->pkt_out;
1539 vr.ibytes = vif->bytes_in;
1540 vr.obytes = vif->bytes_out;
1da177e4 1541 read_unlock(&mrt_lock);
1da177e4 1542
c354e124 1543 if (copy_to_user(arg, &vr, sizeof(vr)))
132adf54
SH
1544 return -EFAULT;
1545 return 0;
1546 }
1547 read_unlock(&mrt_lock);
1548 return -EADDRNOTAVAIL;
1549 case SIOCGETSGCNT:
c354e124 1550 if (copy_from_user(&sr, arg, sizeof(sr)))
132adf54
SH
1551 return -EFAULT;
1552
a8c9486b 1553 rcu_read_lock();
0c12295a 1554 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
132adf54
SH
1555 if (c) {
1556 sr.pktcnt = c->mfc_un.res.pkt;
1557 sr.bytecnt = c->mfc_un.res.bytes;
1558 sr.wrong_if = c->mfc_un.res.wrong_if;
a8c9486b 1559 rcu_read_unlock();
132adf54 1560
c354e124 1561 if (copy_to_user(arg, &sr, sizeof(sr)))
132adf54
SH
1562 return -EFAULT;
1563 return 0;
1564 }
a8c9486b 1565 rcu_read_unlock();
132adf54
SH
1566 return -EADDRNOTAVAIL;
1567 default:
1568 return -ENOIOCTLCMD;
1da177e4
LT
1569 }
1570}
1571
709b46e8
EB
1572#ifdef CONFIG_COMPAT
1573struct compat_sioc_sg_req {
1574 struct in_addr src;
1575 struct in_addr grp;
1576 compat_ulong_t pktcnt;
1577 compat_ulong_t bytecnt;
1578 compat_ulong_t wrong_if;
1579};
1580
ca6b8bb0
DM
1581struct compat_sioc_vif_req {
1582 vifi_t vifi; /* Which iface */
1583 compat_ulong_t icount;
1584 compat_ulong_t ocount;
1585 compat_ulong_t ibytes;
1586 compat_ulong_t obytes;
1587};
1588
709b46e8
EB
1589int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1590{
0033d5ad 1591 struct compat_sioc_sg_req sr;
ca6b8bb0
DM
1592 struct compat_sioc_vif_req vr;
1593 struct vif_device *vif;
709b46e8
EB
1594 struct mfc_cache *c;
1595 struct net *net = sock_net(sk);
1596 struct mr_table *mrt;
1597
1598 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
51456b29 1599 if (!mrt)
709b46e8
EB
1600 return -ENOENT;
1601
1602 switch (cmd) {
ca6b8bb0
DM
1603 case SIOCGETVIFCNT:
1604 if (copy_from_user(&vr, arg, sizeof(vr)))
1605 return -EFAULT;
1606 if (vr.vifi >= mrt->maxvif)
1607 return -EINVAL;
1608 read_lock(&mrt_lock);
1609 vif = &mrt->vif_table[vr.vifi];
1610 if (VIF_EXISTS(mrt, vr.vifi)) {
1611 vr.icount = vif->pkt_in;
1612 vr.ocount = vif->pkt_out;
1613 vr.ibytes = vif->bytes_in;
1614 vr.obytes = vif->bytes_out;
1615 read_unlock(&mrt_lock);
1616
1617 if (copy_to_user(arg, &vr, sizeof(vr)))
1618 return -EFAULT;
1619 return 0;
1620 }
1621 read_unlock(&mrt_lock);
1622 return -EADDRNOTAVAIL;
709b46e8
EB
1623 case SIOCGETSGCNT:
1624 if (copy_from_user(&sr, arg, sizeof(sr)))
1625 return -EFAULT;
1626
1627 rcu_read_lock();
1628 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1629 if (c) {
1630 sr.pktcnt = c->mfc_un.res.pkt;
1631 sr.bytecnt = c->mfc_un.res.bytes;
1632 sr.wrong_if = c->mfc_un.res.wrong_if;
1633 rcu_read_unlock();
1634
1635 if (copy_to_user(arg, &sr, sizeof(sr)))
1636 return -EFAULT;
1637 return 0;
1638 }
1639 rcu_read_unlock();
1640 return -EADDRNOTAVAIL;
1641 default:
1642 return -ENOIOCTLCMD;
1643 }
1644}
1645#endif
1646
1da177e4
LT
1647static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1648{
351638e7 1649 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4feb88e5 1650 struct net *net = dev_net(dev);
f0ad0860 1651 struct mr_table *mrt;
1da177e4
LT
1652 struct vif_device *v;
1653 int ct;
e9dc8653 1654
1da177e4
LT
1655 if (event != NETDEV_UNREGISTER)
1656 return NOTIFY_DONE;
f0ad0860
PM
1657
1658 ipmr_for_each_table(mrt, net) {
1659 v = &mrt->vif_table[0];
1660 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1661 if (v->dev == dev)
e92036a6 1662 vif_delete(mrt, ct, 1, NULL);
f0ad0860 1663 }
1da177e4
LT
1664 }
1665 return NOTIFY_DONE;
1666}
1667
c354e124 1668static struct notifier_block ip_mr_notifier = {
1da177e4
LT
1669 .notifier_call = ipmr_device_event,
1670};
1671
7ef8f65d
NA
1672/* Encapsulate a packet by attaching a valid IPIP header to it.
1673 * This avoids tunnel drivers and other mess and gives us the speed so
1674 * important for multicast video.
1da177e4 1675 */
b6a7719a
HFS
1676static void ip_encap(struct net *net, struct sk_buff *skb,
1677 __be32 saddr, __be32 daddr)
1da177e4 1678{
8856dfa3 1679 struct iphdr *iph;
b71d1d42 1680 const struct iphdr *old_iph = ip_hdr(skb);
8856dfa3
ACM
1681
1682 skb_push(skb, sizeof(struct iphdr));
b0e380b1 1683 skb->transport_header = skb->network_header;
8856dfa3 1684 skb_reset_network_header(skb);
eddc9ec5 1685 iph = ip_hdr(skb);
1da177e4 1686
a8cb16dd 1687 iph->version = 4;
e023dd64
ACM
1688 iph->tos = old_iph->tos;
1689 iph->ttl = old_iph->ttl;
1da177e4
LT
1690 iph->frag_off = 0;
1691 iph->daddr = daddr;
1692 iph->saddr = saddr;
1693 iph->protocol = IPPROTO_IPIP;
1694 iph->ihl = 5;
1695 iph->tot_len = htons(skb->len);
b6a7719a 1696 ip_select_ident(net, skb, NULL);
1da177e4
LT
1697 ip_send_check(iph);
1698
1da177e4
LT
1699 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1700 nf_reset(skb);
1701}
1702
0c4b51f0
EB
1703static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1704 struct sk_buff *skb)
1da177e4 1705{
a8cb16dd 1706 struct ip_options *opt = &(IPCB(skb)->opt);
1da177e4 1707
73186df8
DM
1708 IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1709 IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1da177e4
LT
1710
1711 if (unlikely(opt->optlen))
1712 ip_forward_options(skb);
1713
13206b6b 1714 return dst_output(net, sk, skb);
1da177e4
LT
1715}
1716
7ef8f65d 1717/* Processing handlers for ipmr_forward */
1da177e4 1718
0c12295a
PM
1719static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1720 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1da177e4 1721{
eddc9ec5 1722 const struct iphdr *iph = ip_hdr(skb);
0c12295a 1723 struct vif_device *vif = &mrt->vif_table[vifi];
1da177e4
LT
1724 struct net_device *dev;
1725 struct rtable *rt;
31e4543d 1726 struct flowi4 fl4;
1da177e4
LT
1727 int encap = 0;
1728
51456b29 1729 if (!vif->dev)
1da177e4
LT
1730 goto out_free;
1731
1da177e4
LT
1732 if (vif->flags & VIFF_REGISTER) {
1733 vif->pkt_out++;
c354e124 1734 vif->bytes_out += skb->len;
cf3677ae
PE
1735 vif->dev->stats.tx_bytes += skb->len;
1736 vif->dev->stats.tx_packets++;
0c12295a 1737 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
69ebbf58 1738 goto out_free;
1da177e4 1739 }
1da177e4 1740
a8cb16dd 1741 if (vif->flags & VIFF_TUNNEL) {
31e4543d 1742 rt = ip_route_output_ports(net, &fl4, NULL,
78fbfd8a
DM
1743 vif->remote, vif->local,
1744 0, 0,
1745 IPPROTO_IPIP,
1746 RT_TOS(iph->tos), vif->link);
b23dd4fe 1747 if (IS_ERR(rt))
1da177e4
LT
1748 goto out_free;
1749 encap = sizeof(struct iphdr);
1750 } else {
31e4543d 1751 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
78fbfd8a
DM
1752 0, 0,
1753 IPPROTO_IPIP,
1754 RT_TOS(iph->tos), vif->link);
b23dd4fe 1755 if (IS_ERR(rt))
1da177e4
LT
1756 goto out_free;
1757 }
1758
d8d1f30b 1759 dev = rt->dst.dev;
1da177e4 1760
d8d1f30b 1761 if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1da177e4 1762 /* Do not fragment multicasts. Alas, IPv4 does not
a8cb16dd
ED
1763 * allow to send ICMP, so that packets will disappear
1764 * to blackhole.
1da177e4 1765 */
73186df8 1766 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1da177e4
LT
1767 ip_rt_put(rt);
1768 goto out_free;
1769 }
1770
d8d1f30b 1771 encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1da177e4
LT
1772
1773 if (skb_cow(skb, encap)) {
e905a9ed 1774 ip_rt_put(rt);
1da177e4
LT
1775 goto out_free;
1776 }
1777
1778 vif->pkt_out++;
c354e124 1779 vif->bytes_out += skb->len;
1da177e4 1780
adf30907 1781 skb_dst_drop(skb);
d8d1f30b 1782 skb_dst_set(skb, &rt->dst);
eddc9ec5 1783 ip_decrease_ttl(ip_hdr(skb));
1da177e4
LT
1784
1785 /* FIXME: forward and output firewalls used to be called here.
a8cb16dd
ED
1786 * What do we do with netfilter? -- RR
1787 */
1da177e4 1788 if (vif->flags & VIFF_TUNNEL) {
b6a7719a 1789 ip_encap(net, skb, vif->local, vif->remote);
1da177e4 1790 /* FIXME: extra output firewall step used to be here. --RR */
2f4c02d4
PE
1791 vif->dev->stats.tx_packets++;
1792 vif->dev->stats.tx_bytes += skb->len;
1da177e4
LT
1793 }
1794
9ee6c5dc 1795 IPCB(skb)->flags |= IPSKB_FORWARDED;
1da177e4 1796
7ef8f65d 1797 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1da177e4
LT
1798 * not only before forwarding, but after forwarding on all output
1799 * interfaces. It is clear, if mrouter runs a multicasting
1800 * program, it should receive packets not depending to what interface
1801 * program is joined.
1802 * If we will not make it, the program will have to join on all
1803 * interfaces. On the other hand, multihoming host (or router, but
1804 * not mrouter) cannot join to more than one interface - it will
1805 * result in receiving multiple packets.
1806 */
29a26a56
EB
1807 NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1808 net, NULL, skb, skb->dev, dev,
1da177e4
LT
1809 ipmr_forward_finish);
1810 return;
1811
1812out_free:
1813 kfree_skb(skb);
1da177e4
LT
1814}
1815
0c12295a 1816static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1da177e4
LT
1817{
1818 int ct;
0c12295a
PM
1819
1820 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1821 if (mrt->vif_table[ct].dev == dev)
1da177e4
LT
1822 break;
1823 }
1824 return ct;
1825}
1826
1827/* "local" means that we should preserve one skb (for local delivery) */
c4854ec8
RR
1828static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1829 struct sk_buff *skb, struct mfc_cache *cache,
1830 int local)
1da177e4 1831{
8fb472c0 1832 int true_vifi = ipmr_find_vif(mrt, skb->dev);
1da177e4
LT
1833 int psend = -1;
1834 int vif, ct;
1835
1836 vif = cache->mfc_parent;
1837 cache->mfc_un.res.pkt++;
1838 cache->mfc_un.res.bytes += skb->len;
43b9e127 1839 cache->mfc_un.res.lastuse = jiffies;
1da177e4 1840
360eb5da 1841 if (cache->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
660b26dc
ND
1842 struct mfc_cache *cache_proxy;
1843
1844 /* For an (*,G) entry, we only check that the incomming
1845 * interface is part of the static tree.
1846 */
1847 cache_proxy = ipmr_cache_find_any_parent(mrt, vif);
1848 if (cache_proxy &&
1849 cache_proxy->mfc_un.res.ttls[true_vifi] < 255)
1850 goto forward;
1851 }
1852
7ef8f65d 1853 /* Wrong interface: drop packet and (maybe) send PIM assert. */
0c12295a 1854 if (mrt->vif_table[vif].dev != skb->dev) {
e58e4159
DA
1855 struct net_device *mdev;
1856
1857 mdev = l3mdev_master_dev_rcu(mrt->vif_table[vif].dev);
1858 if (mdev == skb->dev)
1859 goto forward;
1860
c7537967 1861 if (rt_is_output_route(skb_rtable(skb))) {
1da177e4 1862 /* It is our own packet, looped back.
a8cb16dd
ED
1863 * Very complicated situation...
1864 *
1865 * The best workaround until routing daemons will be
1866 * fixed is not to redistribute packet, if it was
1867 * send through wrong interface. It means, that
1868 * multicast applications WILL NOT work for
1869 * (S,G), which have default multicast route pointing
1870 * to wrong oif. In any case, it is not a good
1871 * idea to use multicasting applications on router.
1da177e4
LT
1872 */
1873 goto dont_forward;
1874 }
1875
1876 cache->mfc_un.res.wrong_if++;
1da177e4 1877
0c12295a 1878 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1da177e4 1879 /* pimsm uses asserts, when switching from RPT to SPT,
a8cb16dd
ED
1880 * so that we cannot check that packet arrived on an oif.
1881 * It is bad, but otherwise we would need to move pretty
1882 * large chunk of pimd to kernel. Ough... --ANK
1da177e4 1883 */
0c12295a 1884 (mrt->mroute_do_pim ||
6f9374a9 1885 cache->mfc_un.res.ttls[true_vifi] < 255) &&
e905a9ed 1886 time_after(jiffies,
1da177e4
LT
1887 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1888 cache->mfc_un.res.last_assert = jiffies;
0c12295a 1889 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1da177e4
LT
1890 }
1891 goto dont_forward;
1892 }
1893
660b26dc 1894forward:
0c12295a
PM
1895 mrt->vif_table[vif].pkt_in++;
1896 mrt->vif_table[vif].bytes_in += skb->len;
1da177e4 1897
7ef8f65d 1898 /* Forward the frame */
360eb5da
ND
1899 if (cache->mfc_origin == htonl(INADDR_ANY) &&
1900 cache->mfc_mcastgrp == htonl(INADDR_ANY)) {
660b26dc
ND
1901 if (true_vifi >= 0 &&
1902 true_vifi != cache->mfc_parent &&
1903 ip_hdr(skb)->ttl >
1904 cache->mfc_un.res.ttls[cache->mfc_parent]) {
1905 /* It's an (*,*) entry and the packet is not coming from
1906 * the upstream: forward the packet to the upstream
1907 * only.
1908 */
1909 psend = cache->mfc_parent;
1910 goto last_forward;
1911 }
1912 goto dont_forward;
1913 }
a8cb16dd
ED
1914 for (ct = cache->mfc_un.res.maxvif - 1;
1915 ct >= cache->mfc_un.res.minvif; ct--) {
660b26dc 1916 /* For (*,G) entry, don't forward to the incoming interface */
360eb5da
ND
1917 if ((cache->mfc_origin != htonl(INADDR_ANY) ||
1918 ct != true_vifi) &&
660b26dc 1919 ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
1da177e4
LT
1920 if (psend != -1) {
1921 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
a8cb16dd 1922
1da177e4 1923 if (skb2)
0c12295a
PM
1924 ipmr_queue_xmit(net, mrt, skb2, cache,
1925 psend);
1da177e4 1926 }
c354e124 1927 psend = ct;
1da177e4
LT
1928 }
1929 }
660b26dc 1930last_forward:
1da177e4
LT
1931 if (psend != -1) {
1932 if (local) {
1933 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
a8cb16dd 1934
1da177e4 1935 if (skb2)
0c12295a 1936 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1da177e4 1937 } else {
0c12295a 1938 ipmr_queue_xmit(net, mrt, skb, cache, psend);
c4854ec8 1939 return;
1da177e4
LT
1940 }
1941 }
1942
1943dont_forward:
1944 if (!local)
1945 kfree_skb(skb);
1da177e4
LT
1946}
1947
417da66f 1948static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
ee3f1aaf 1949{
417da66f
DM
1950 struct rtable *rt = skb_rtable(skb);
1951 struct iphdr *iph = ip_hdr(skb);
da91981b 1952 struct flowi4 fl4 = {
417da66f
DM
1953 .daddr = iph->daddr,
1954 .saddr = iph->saddr,
b0fe4a31 1955 .flowi4_tos = RT_TOS(iph->tos),
4fd551d7
DM
1956 .flowi4_oif = (rt_is_output_route(rt) ?
1957 skb->dev->ifindex : 0),
1958 .flowi4_iif = (rt_is_output_route(rt) ?
1fb9489b 1959 LOOPBACK_IFINDEX :
4fd551d7 1960 skb->dev->ifindex),
b4869889 1961 .flowi4_mark = skb->mark,
ee3f1aaf
DM
1962 };
1963 struct mr_table *mrt;
1964 int err;
1965
da91981b 1966 err = ipmr_fib_lookup(net, &fl4, &mrt);
ee3f1aaf
DM
1967 if (err)
1968 return ERR_PTR(err);
1969 return mrt;
1970}
1da177e4 1971
7ef8f65d
NA
1972/* Multicast packets for forwarding arrive here
1973 * Called with rcu_read_lock();
1da177e4 1974 */
1da177e4
LT
1975int ip_mr_input(struct sk_buff *skb)
1976{
1977 struct mfc_cache *cache;
4feb88e5 1978 struct net *net = dev_net(skb->dev);
511c3f92 1979 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
f0ad0860 1980 struct mr_table *mrt;
1da177e4
LT
1981
1982 /* Packet is looped back after forward, it should not be
a8cb16dd 1983 * forwarded second time, but still can be delivered locally.
1da177e4 1984 */
4c968709 1985 if (IPCB(skb)->flags & IPSKB_FORWARDED)
1da177e4
LT
1986 goto dont_forward;
1987
417da66f 1988 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
1989 if (IS_ERR(mrt)) {
1990 kfree_skb(skb);
1991 return PTR_ERR(mrt);
e40dbc51 1992 }
1da177e4 1993 if (!local) {
4c968709
ED
1994 if (IPCB(skb)->opt.router_alert) {
1995 if (ip_call_ra_chain(skb))
1996 return 0;
1997 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
1998 /* IGMPv1 (and broken IGMPv2 implementations sort of
1999 * Cisco IOS <= 11.2(8)) do not put router alert
2000 * option to IGMP packets destined to routable
2001 * groups. It is very bad, because it means
2002 * that we can forward NO IGMP messages.
2003 */
2004 struct sock *mroute_sk;
2005
2006 mroute_sk = rcu_dereference(mrt->mroute_sk);
2007 if (mroute_sk) {
2008 nf_reset(skb);
2009 raw_rcv(mroute_sk, skb);
2010 return 0;
2011 }
1da177e4
LT
2012 }
2013 }
2014
a8c9486b 2015 /* already under rcu_read_lock() */
0c12295a 2016 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
51456b29 2017 if (!cache) {
660b26dc
ND
2018 int vif = ipmr_find_vif(mrt, skb->dev);
2019
2020 if (vif >= 0)
2021 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2022 vif);
2023 }
1da177e4 2024
7ef8f65d 2025 /* No usable cache entry */
51456b29 2026 if (!cache) {
1da177e4
LT
2027 int vif;
2028
2029 if (local) {
2030 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2031 ip_local_deliver(skb);
51456b29 2032 if (!skb2)
1da177e4 2033 return -ENOBUFS;
1da177e4
LT
2034 skb = skb2;
2035 }
2036
a8c9486b 2037 read_lock(&mrt_lock);
0c12295a 2038 vif = ipmr_find_vif(mrt, skb->dev);
1da177e4 2039 if (vif >= 0) {
0eae88f3 2040 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1da177e4
LT
2041 read_unlock(&mrt_lock);
2042
0eae88f3 2043 return err2;
1da177e4
LT
2044 }
2045 read_unlock(&mrt_lock);
2046 kfree_skb(skb);
2047 return -ENODEV;
2048 }
2049
a8c9486b 2050 read_lock(&mrt_lock);
0c12295a 2051 ip_mr_forward(net, mrt, skb, cache, local);
1da177e4
LT
2052 read_unlock(&mrt_lock);
2053
2054 if (local)
2055 return ip_local_deliver(skb);
2056
2057 return 0;
2058
2059dont_forward:
2060 if (local)
2061 return ip_local_deliver(skb);
2062 kfree_skb(skb);
2063 return 0;
2064}
2065
b1879204 2066#ifdef CONFIG_IP_PIMSM_V1
7ef8f65d 2067/* Handle IGMP messages of PIMv1 */
a8cb16dd 2068int pim_rcv_v1(struct sk_buff *skb)
b1879204
IJ
2069{
2070 struct igmphdr *pim;
4feb88e5 2071 struct net *net = dev_net(skb->dev);
f0ad0860 2072 struct mr_table *mrt;
b1879204
IJ
2073
2074 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2075 goto drop;
2076
2077 pim = igmp_hdr(skb);
2078
417da66f 2079 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
2080 if (IS_ERR(mrt))
2081 goto drop;
0c12295a 2082 if (!mrt->mroute_do_pim ||
b1879204
IJ
2083 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2084 goto drop;
2085
f0ad0860 2086 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
b1879204
IJ
2087drop:
2088 kfree_skb(skb);
2089 }
1da177e4
LT
2090 return 0;
2091}
2092#endif
2093
2094#ifdef CONFIG_IP_PIMSM_V2
a8cb16dd 2095static int pim_rcv(struct sk_buff *skb)
1da177e4
LT
2096{
2097 struct pimreghdr *pim;
f0ad0860
PM
2098 struct net *net = dev_net(skb->dev);
2099 struct mr_table *mrt;
1da177e4 2100
b1879204 2101 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1da177e4
LT
2102 goto drop;
2103
9c70220b 2104 pim = (struct pimreghdr *)skb_transport_header(skb);
56245cae 2105 if (pim->type != ((PIM_VERSION << 4) | (PIM_TYPE_REGISTER)) ||
a8cb16dd 2106 (pim->flags & PIM_NULL_REGISTER) ||
e905a9ed 2107 (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
d3bc23e7 2108 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1da177e4
LT
2109 goto drop;
2110
417da66f 2111 mrt = ipmr_rt_fib_lookup(net, skb);
ee3f1aaf
DM
2112 if (IS_ERR(mrt))
2113 goto drop;
f0ad0860 2114 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
b1879204
IJ
2115drop:
2116 kfree_skb(skb);
2117 }
1da177e4
LT
2118 return 0;
2119}
2120#endif
2121
cb6a4e46
PM
2122static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2123 struct mfc_cache *c, struct rtmsg *rtm)
1da177e4 2124{
adfa85e4 2125 struct rta_mfc_stats mfcs;
43b9e127
NA
2126 struct nlattr *mp_attr;
2127 struct rtnexthop *nhp;
b5036cd4 2128 unsigned long lastuse;
43b9e127 2129 int ct;
1da177e4 2130
7438189b 2131 /* If cache is unresolved, don't try to parse IIF and OIF */
1708ebc9
NA
2132 if (c->mfc_parent >= MAXVIFS) {
2133 rtm->rtm_flags |= RTNH_F_UNRESOLVED;
7438189b 2134 return -ENOENT;
1708ebc9 2135 }
7438189b 2136
92a395e5
TG
2137 if (VIF_EXISTS(mrt, c->mfc_parent) &&
2138 nla_put_u32(skb, RTA_IIF, mrt->vif_table[c->mfc_parent].dev->ifindex) < 0)
2139 return -EMSGSIZE;
1da177e4 2140
92a395e5
TG
2141 if (!(mp_attr = nla_nest_start(skb, RTA_MULTIPATH)))
2142 return -EMSGSIZE;
1da177e4
LT
2143
2144 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
0c12295a 2145 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
92a395e5
TG
2146 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp)))) {
2147 nla_nest_cancel(skb, mp_attr);
2148 return -EMSGSIZE;
2149 }
2150
1da177e4
LT
2151 nhp->rtnh_flags = 0;
2152 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
0c12295a 2153 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
1da177e4
LT
2154 nhp->rtnh_len = sizeof(*nhp);
2155 }
2156 }
92a395e5
TG
2157
2158 nla_nest_end(skb, mp_attr);
2159
b5036cd4
NA
2160 lastuse = READ_ONCE(c->mfc_un.res.lastuse);
2161 lastuse = time_after_eq(jiffies, lastuse) ? jiffies - lastuse : 0;
2162
adfa85e4
ND
2163 mfcs.mfcs_packets = c->mfc_un.res.pkt;
2164 mfcs.mfcs_bytes = c->mfc_un.res.bytes;
2165 mfcs.mfcs_wrong_if = c->mfc_un.res.wrong_if;
43b9e127 2166 if (nla_put_64bit(skb, RTA_MFC_STATS, sizeof(mfcs), &mfcs, RTA_PAD) ||
b5036cd4 2167 nla_put_u64_64bit(skb, RTA_EXPIRES, jiffies_to_clock_t(lastuse),
43b9e127 2168 RTA_PAD))
adfa85e4
ND
2169 return -EMSGSIZE;
2170
1da177e4
LT
2171 rtm->rtm_type = RTN_MULTICAST;
2172 return 1;
1da177e4
LT
2173}
2174
9a1b9496
DM
2175int ipmr_get_route(struct net *net, struct sk_buff *skb,
2176 __be32 saddr, __be32 daddr,
9f09eaea 2177 struct rtmsg *rtm, u32 portid)
1da177e4 2178{
1da177e4 2179 struct mfc_cache *cache;
9a1b9496
DM
2180 struct mr_table *mrt;
2181 int err;
1da177e4 2182
f0ad0860 2183 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2184 if (!mrt)
f0ad0860
PM
2185 return -ENOENT;
2186
a8c9486b 2187 rcu_read_lock();
9a1b9496 2188 cache = ipmr_cache_find(mrt, saddr, daddr);
51456b29 2189 if (!cache && skb->dev) {
660b26dc 2190 int vif = ipmr_find_vif(mrt, skb->dev);
1da177e4 2191
660b26dc
ND
2192 if (vif >= 0)
2193 cache = ipmr_cache_find_any(mrt, daddr, vif);
2194 }
51456b29 2195 if (!cache) {
72287490 2196 struct sk_buff *skb2;
eddc9ec5 2197 struct iphdr *iph;
1da177e4 2198 struct net_device *dev;
a8cb16dd 2199 int vif = -1;
1da177e4 2200
1da177e4 2201 dev = skb->dev;
a8c9486b 2202 read_lock(&mrt_lock);
a8cb16dd
ED
2203 if (dev)
2204 vif = ipmr_find_vif(mrt, dev);
2205 if (vif < 0) {
1da177e4 2206 read_unlock(&mrt_lock);
a8c9486b 2207 rcu_read_unlock();
1da177e4
LT
2208 return -ENODEV;
2209 }
72287490
AK
2210 skb2 = skb_clone(skb, GFP_ATOMIC);
2211 if (!skb2) {
2212 read_unlock(&mrt_lock);
a8c9486b 2213 rcu_read_unlock();
72287490
AK
2214 return -ENOMEM;
2215 }
2216
2cf75070 2217 NETLINK_CB(skb2).portid = portid;
e2d1bca7
ACM
2218 skb_push(skb2, sizeof(struct iphdr));
2219 skb_reset_network_header(skb2);
eddc9ec5
ACM
2220 iph = ip_hdr(skb2);
2221 iph->ihl = sizeof(struct iphdr) >> 2;
9a1b9496
DM
2222 iph->saddr = saddr;
2223 iph->daddr = daddr;
eddc9ec5 2224 iph->version = 0;
0c12295a 2225 err = ipmr_cache_unresolved(mrt, vif, skb2);
1da177e4 2226 read_unlock(&mrt_lock);
a8c9486b 2227 rcu_read_unlock();
1da177e4
LT
2228 return err;
2229 }
2230
a8c9486b 2231 read_lock(&mrt_lock);
cb6a4e46 2232 err = __ipmr_fill_mroute(mrt, skb, cache, rtm);
1da177e4 2233 read_unlock(&mrt_lock);
a8c9486b 2234 rcu_read_unlock();
1da177e4
LT
2235 return err;
2236}
2237
cb6a4e46 2238static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
65886f43
ND
2239 u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2240 int flags)
cb6a4e46
PM
2241{
2242 struct nlmsghdr *nlh;
2243 struct rtmsg *rtm;
1eb99af5 2244 int err;
cb6a4e46 2245
65886f43 2246 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
51456b29 2247 if (!nlh)
cb6a4e46
PM
2248 return -EMSGSIZE;
2249
2250 rtm = nlmsg_data(nlh);
2251 rtm->rtm_family = RTNL_FAMILY_IPMR;
2252 rtm->rtm_dst_len = 32;
2253 rtm->rtm_src_len = 32;
2254 rtm->rtm_tos = 0;
2255 rtm->rtm_table = mrt->id;
f3756b79
DM
2256 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2257 goto nla_put_failure;
cb6a4e46
PM
2258 rtm->rtm_type = RTN_MULTICAST;
2259 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
9a68ac72
ND
2260 if (c->mfc_flags & MFC_STATIC)
2261 rtm->rtm_protocol = RTPROT_STATIC;
2262 else
2263 rtm->rtm_protocol = RTPROT_MROUTED;
cb6a4e46
PM
2264 rtm->rtm_flags = 0;
2265
930345ea
JB
2266 if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2267 nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
f3756b79 2268 goto nla_put_failure;
1eb99af5
ND
2269 err = __ipmr_fill_mroute(mrt, skb, c, rtm);
2270 /* do not break the dump if cache is unresolved */
2271 if (err < 0 && err != -ENOENT)
cb6a4e46
PM
2272 goto nla_put_failure;
2273
053c095a
JB
2274 nlmsg_end(skb, nlh);
2275 return 0;
cb6a4e46
PM
2276
2277nla_put_failure:
2278 nlmsg_cancel(skb, nlh);
2279 return -EMSGSIZE;
2280}
2281
8cd3ac9f
ND
2282static size_t mroute_msgsize(bool unresolved, int maxvif)
2283{
2284 size_t len =
2285 NLMSG_ALIGN(sizeof(struct rtmsg))
2286 + nla_total_size(4) /* RTA_TABLE */
2287 + nla_total_size(4) /* RTA_SRC */
2288 + nla_total_size(4) /* RTA_DST */
2289 ;
2290
2291 if (!unresolved)
2292 len = len
2293 + nla_total_size(4) /* RTA_IIF */
2294 + nla_total_size(0) /* RTA_MULTIPATH */
2295 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2296 /* RTA_MFC_STATS */
a9a08042 2297 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
8cd3ac9f
ND
2298 ;
2299
2300 return len;
2301}
2302
2303static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2304 int cmd)
2305{
2306 struct net *net = read_pnet(&mrt->net);
2307 struct sk_buff *skb;
2308 int err = -ENOBUFS;
2309
2310 skb = nlmsg_new(mroute_msgsize(mfc->mfc_parent >= MAXVIFS, mrt->maxvif),
2311 GFP_ATOMIC);
51456b29 2312 if (!skb)
8cd3ac9f
ND
2313 goto errout;
2314
65886f43 2315 err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
8cd3ac9f
ND
2316 if (err < 0)
2317 goto errout;
2318
2319 rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2320 return;
2321
2322errout:
2323 kfree_skb(skb);
2324 if (err < 0)
2325 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2326}
2327
cb6a4e46
PM
2328static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2329{
2330 struct net *net = sock_net(skb->sk);
2331 struct mr_table *mrt;
2332 struct mfc_cache *mfc;
2333 unsigned int t = 0, s_t;
cb6a4e46
PM
2334 unsigned int e = 0, s_e;
2335
2336 s_t = cb->args[0];
8fb472c0 2337 s_e = cb->args[1];
cb6a4e46 2338
a8c9486b 2339 rcu_read_lock();
cb6a4e46
PM
2340 ipmr_for_each_table(mrt, net) {
2341 if (t < s_t)
2342 goto next_table;
8fb472c0
NA
2343 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
2344 if (e < s_e)
2345 goto next_entry;
2346 if (ipmr_fill_mroute(mrt, skb,
2347 NETLINK_CB(cb->skb).portid,
2348 cb->nlh->nlmsg_seq,
2349 mfc, RTM_NEWROUTE,
2350 NLM_F_MULTI) < 0)
2351 goto done;
cb6a4e46 2352next_entry:
8fb472c0 2353 e++;
cb6a4e46 2354 }
8fb472c0
NA
2355 e = 0;
2356 s_e = 0;
2357
1eb99af5
ND
2358 spin_lock_bh(&mfc_unres_lock);
2359 list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
2360 if (e < s_e)
2361 goto next_entry2;
2362 if (ipmr_fill_mroute(mrt, skb,
2363 NETLINK_CB(cb->skb).portid,
2364 cb->nlh->nlmsg_seq,
65886f43
ND
2365 mfc, RTM_NEWROUTE,
2366 NLM_F_MULTI) < 0) {
1eb99af5
ND
2367 spin_unlock_bh(&mfc_unres_lock);
2368 goto done;
2369 }
2370next_entry2:
2371 e++;
2372 }
2373 spin_unlock_bh(&mfc_unres_lock);
8fb472c0
NA
2374 e = 0;
2375 s_e = 0;
cb6a4e46
PM
2376next_table:
2377 t++;
2378 }
2379done:
a8c9486b 2380 rcu_read_unlock();
cb6a4e46 2381
8fb472c0 2382 cb->args[1] = e;
cb6a4e46
PM
2383 cb->args[0] = t;
2384
2385 return skb->len;
2386}
2387
ccbb0aa6
NA
2388static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
2389 [RTA_SRC] = { .type = NLA_U32 },
2390 [RTA_DST] = { .type = NLA_U32 },
2391 [RTA_IIF] = { .type = NLA_U32 },
2392 [RTA_TABLE] = { .type = NLA_U32 },
2393 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2394};
2395
2396static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
2397{
2398 switch (rtm_protocol) {
2399 case RTPROT_STATIC:
2400 case RTPROT_MROUTED:
2401 return true;
2402 }
2403 return false;
2404}
2405
2406static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
2407{
2408 struct rtnexthop *rtnh = nla_data(nla);
2409 int remaining = nla_len(nla), vifi = 0;
2410
2411 while (rtnh_ok(rtnh, remaining)) {
2412 mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
2413 if (++vifi == MAXVIFS)
2414 break;
2415 rtnh = rtnh_next(rtnh, &remaining);
2416 }
2417
2418 return remaining > 0 ? -EINVAL : vifi;
2419}
2420
2421/* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2422static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
2423 struct mfcctl *mfcc, int *mrtsock,
2424 struct mr_table **mrtret)
2425{
2426 struct net_device *dev = NULL;
2427 u32 tblid = RT_TABLE_DEFAULT;
2428 struct mr_table *mrt;
2429 struct nlattr *attr;
2430 struct rtmsg *rtm;
2431 int ret, rem;
2432
2433 ret = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipmr_policy);
2434 if (ret < 0)
2435 goto out;
2436 rtm = nlmsg_data(nlh);
2437
2438 ret = -EINVAL;
2439 if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
2440 rtm->rtm_type != RTN_MULTICAST ||
2441 rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
2442 !ipmr_rtm_validate_proto(rtm->rtm_protocol))
2443 goto out;
2444
2445 memset(mfcc, 0, sizeof(*mfcc));
2446 mfcc->mfcc_parent = -1;
2447 ret = 0;
2448 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
2449 switch (nla_type(attr)) {
2450 case RTA_SRC:
2451 mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
2452 break;
2453 case RTA_DST:
2454 mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
2455 break;
2456 case RTA_IIF:
2457 dev = __dev_get_by_index(net, nla_get_u32(attr));
2458 if (!dev) {
2459 ret = -ENODEV;
2460 goto out;
2461 }
2462 break;
2463 case RTA_MULTIPATH:
2464 if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
2465 ret = -EINVAL;
2466 goto out;
2467 }
2468 break;
2469 case RTA_PREFSRC:
2470 ret = 1;
2471 break;
2472 case RTA_TABLE:
2473 tblid = nla_get_u32(attr);
2474 break;
2475 }
2476 }
2477 mrt = ipmr_get_table(net, tblid);
2478 if (!mrt) {
2479 ret = -ENOENT;
2480 goto out;
2481 }
2482 *mrtret = mrt;
2483 *mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
2484 if (dev)
2485 mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2486
2487out:
2488 return ret;
2489}
2490
2491/* takes care of both newroute and delroute */
2492static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh)
2493{
2494 struct net *net = sock_net(skb->sk);
2495 int ret, mrtsock, parent;
2496 struct mr_table *tbl;
2497 struct mfcctl mfcc;
2498
2499 mrtsock = 0;
2500 tbl = NULL;
2501 ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl);
2502 if (ret < 0)
2503 return ret;
2504
2505 parent = ret ? mfcc.mfcc_parent : -1;
2506 if (nlh->nlmsg_type == RTM_NEWROUTE)
2507 return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
2508 else
2509 return ipmr_mfc_delete(tbl, &mfcc, parent);
2510}
2511
e905a9ed 2512#ifdef CONFIG_PROC_FS
7ef8f65d
NA
2513/* The /proc interfaces to multicast routing :
2514 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
1da177e4
LT
2515 */
2516struct ipmr_vif_iter {
f6bb4514 2517 struct seq_net_private p;
f0ad0860 2518 struct mr_table *mrt;
1da177e4
LT
2519 int ct;
2520};
2521
f6bb4514
BT
2522static struct vif_device *ipmr_vif_seq_idx(struct net *net,
2523 struct ipmr_vif_iter *iter,
1da177e4
LT
2524 loff_t pos)
2525{
f0ad0860 2526 struct mr_table *mrt = iter->mrt;
0c12295a
PM
2527
2528 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2529 if (!VIF_EXISTS(mrt, iter->ct))
1da177e4 2530 continue;
e905a9ed 2531 if (pos-- == 0)
0c12295a 2532 return &mrt->vif_table[iter->ct];
1da177e4
LT
2533 }
2534 return NULL;
2535}
2536
2537static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
ba93ef74 2538 __acquires(mrt_lock)
1da177e4 2539{
f0ad0860 2540 struct ipmr_vif_iter *iter = seq->private;
f6bb4514 2541 struct net *net = seq_file_net(seq);
f0ad0860
PM
2542 struct mr_table *mrt;
2543
2544 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2545 if (!mrt)
f0ad0860
PM
2546 return ERR_PTR(-ENOENT);
2547
2548 iter->mrt = mrt;
f6bb4514 2549
1da177e4 2550 read_lock(&mrt_lock);
f6bb4514 2551 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
1da177e4
LT
2552 : SEQ_START_TOKEN;
2553}
2554
2555static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2556{
2557 struct ipmr_vif_iter *iter = seq->private;
f6bb4514 2558 struct net *net = seq_file_net(seq);
f0ad0860 2559 struct mr_table *mrt = iter->mrt;
1da177e4
LT
2560
2561 ++*pos;
2562 if (v == SEQ_START_TOKEN)
f6bb4514 2563 return ipmr_vif_seq_idx(net, iter, 0);
e905a9ed 2564
0c12295a
PM
2565 while (++iter->ct < mrt->maxvif) {
2566 if (!VIF_EXISTS(mrt, iter->ct))
1da177e4 2567 continue;
0c12295a 2568 return &mrt->vif_table[iter->ct];
1da177e4
LT
2569 }
2570 return NULL;
2571}
2572
2573static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
ba93ef74 2574 __releases(mrt_lock)
1da177e4
LT
2575{
2576 read_unlock(&mrt_lock);
2577}
2578
2579static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2580{
f0ad0860
PM
2581 struct ipmr_vif_iter *iter = seq->private;
2582 struct mr_table *mrt = iter->mrt;
f6bb4514 2583
1da177e4 2584 if (v == SEQ_START_TOKEN) {
e905a9ed 2585 seq_puts(seq,
1da177e4
LT
2586 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
2587 } else {
2588 const struct vif_device *vif = v;
2589 const char *name = vif->dev ? vif->dev->name : "none";
2590
2591 seq_printf(seq,
5b5e0928 2592 "%2zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
0c12295a 2593 vif - mrt->vif_table,
e905a9ed 2594 name, vif->bytes_in, vif->pkt_in,
1da177e4
LT
2595 vif->bytes_out, vif->pkt_out,
2596 vif->flags, vif->local, vif->remote);
2597 }
2598 return 0;
2599}
2600
f690808e 2601static const struct seq_operations ipmr_vif_seq_ops = {
1da177e4
LT
2602 .start = ipmr_vif_seq_start,
2603 .next = ipmr_vif_seq_next,
2604 .stop = ipmr_vif_seq_stop,
2605 .show = ipmr_vif_seq_show,
2606};
2607
2608static int ipmr_vif_open(struct inode *inode, struct file *file)
2609{
f6bb4514
BT
2610 return seq_open_net(inode, file, &ipmr_vif_seq_ops,
2611 sizeof(struct ipmr_vif_iter));
1da177e4
LT
2612}
2613
9a32144e 2614static const struct file_operations ipmr_vif_fops = {
1da177e4
LT
2615 .owner = THIS_MODULE,
2616 .open = ipmr_vif_open,
2617 .read = seq_read,
2618 .llseek = seq_lseek,
f6bb4514 2619 .release = seq_release_net,
1da177e4
LT
2620};
2621
2622struct ipmr_mfc_iter {
f6bb4514 2623 struct seq_net_private p;
f0ad0860 2624 struct mr_table *mrt;
862465f2 2625 struct list_head *cache;
1da177e4
LT
2626};
2627
f6bb4514
BT
2628static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
2629 struct ipmr_mfc_iter *it, loff_t pos)
1da177e4 2630{
f0ad0860 2631 struct mr_table *mrt = it->mrt;
1da177e4
LT
2632 struct mfc_cache *mfc;
2633
a8c9486b 2634 rcu_read_lock();
8fb472c0
NA
2635 it->cache = &mrt->mfc_cache_list;
2636 list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
2637 if (pos-- == 0)
2638 return mfc;
a8c9486b 2639 rcu_read_unlock();
1da177e4 2640
1da177e4 2641 spin_lock_bh(&mfc_unres_lock);
0c12295a 2642 it->cache = &mrt->mfc_unres_queue;
862465f2 2643 list_for_each_entry(mfc, it->cache, list)
e258beb2 2644 if (pos-- == 0)
1da177e4
LT
2645 return mfc;
2646 spin_unlock_bh(&mfc_unres_lock);
2647
2648 it->cache = NULL;
2649 return NULL;
2650}
2651
2652
2653static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2654{
2655 struct ipmr_mfc_iter *it = seq->private;
f6bb4514 2656 struct net *net = seq_file_net(seq);
f0ad0860 2657 struct mr_table *mrt;
f6bb4514 2658
f0ad0860 2659 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
51456b29 2660 if (!mrt)
f0ad0860 2661 return ERR_PTR(-ENOENT);
f6bb4514 2662
f0ad0860 2663 it->mrt = mrt;
1da177e4 2664 it->cache = NULL;
f6bb4514 2665 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
1da177e4
LT
2666 : SEQ_START_TOKEN;
2667}
2668
2669static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2670{
1da177e4 2671 struct ipmr_mfc_iter *it = seq->private;
f6bb4514 2672 struct net *net = seq_file_net(seq);
f0ad0860 2673 struct mr_table *mrt = it->mrt;
8fb472c0 2674 struct mfc_cache *mfc = v;
1da177e4
LT
2675
2676 ++*pos;
2677
2678 if (v == SEQ_START_TOKEN)
f6bb4514 2679 return ipmr_mfc_seq_idx(net, seq->private, 0);
1da177e4 2680
862465f2
PM
2681 if (mfc->list.next != it->cache)
2682 return list_entry(mfc->list.next, struct mfc_cache, list);
e905a9ed 2683
0c12295a 2684 if (it->cache == &mrt->mfc_unres_queue)
1da177e4
LT
2685 goto end_of_list;
2686
1da177e4 2687 /* exhausted cache_array, show unresolved */
a8c9486b 2688 rcu_read_unlock();
0c12295a 2689 it->cache = &mrt->mfc_unres_queue;
e905a9ed 2690
1da177e4 2691 spin_lock_bh(&mfc_unres_lock);
862465f2
PM
2692 if (!list_empty(it->cache))
2693 return list_first_entry(it->cache, struct mfc_cache, list);
1da177e4 2694
a8cb16dd 2695end_of_list:
1da177e4
LT
2696 spin_unlock_bh(&mfc_unres_lock);
2697 it->cache = NULL;
2698
2699 return NULL;
2700}
2701
2702static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
2703{
2704 struct ipmr_mfc_iter *it = seq->private;
f0ad0860 2705 struct mr_table *mrt = it->mrt;
1da177e4 2706
0c12295a 2707 if (it->cache == &mrt->mfc_unres_queue)
1da177e4 2708 spin_unlock_bh(&mfc_unres_lock);
8fb472c0 2709 else if (it->cache == &mrt->mfc_cache_list)
a8c9486b 2710 rcu_read_unlock();
1da177e4
LT
2711}
2712
2713static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2714{
2715 int n;
2716
2717 if (v == SEQ_START_TOKEN) {
e905a9ed 2718 seq_puts(seq,
1da177e4
LT
2719 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
2720 } else {
2721 const struct mfc_cache *mfc = v;
2722 const struct ipmr_mfc_iter *it = seq->private;
f0ad0860 2723 const struct mr_table *mrt = it->mrt;
e905a9ed 2724
0eae88f3
ED
2725 seq_printf(seq, "%08X %08X %-3hd",
2726 (__force u32) mfc->mfc_mcastgrp,
2727 (__force u32) mfc->mfc_origin,
1ea472e2 2728 mfc->mfc_parent);
1da177e4 2729
0c12295a 2730 if (it->cache != &mrt->mfc_unres_queue) {
1ea472e2
BT
2731 seq_printf(seq, " %8lu %8lu %8lu",
2732 mfc->mfc_un.res.pkt,
2733 mfc->mfc_un.res.bytes,
2734 mfc->mfc_un.res.wrong_if);
132adf54 2735 for (n = mfc->mfc_un.res.minvif;
a8cb16dd 2736 n < mfc->mfc_un.res.maxvif; n++) {
0c12295a 2737 if (VIF_EXISTS(mrt, n) &&
cf958ae3
BT
2738 mfc->mfc_un.res.ttls[n] < 255)
2739 seq_printf(seq,
e905a9ed 2740 " %2d:%-3d",
1da177e4
LT
2741 n, mfc->mfc_un.res.ttls[n]);
2742 }
1ea472e2
BT
2743 } else {
2744 /* unresolved mfc_caches don't contain
2745 * pkt, bytes and wrong_if values
2746 */
2747 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
1da177e4
LT
2748 }
2749 seq_putc(seq, '\n');
2750 }
2751 return 0;
2752}
2753
f690808e 2754static const struct seq_operations ipmr_mfc_seq_ops = {
1da177e4
LT
2755 .start = ipmr_mfc_seq_start,
2756 .next = ipmr_mfc_seq_next,
2757 .stop = ipmr_mfc_seq_stop,
2758 .show = ipmr_mfc_seq_show,
2759};
2760
2761static int ipmr_mfc_open(struct inode *inode, struct file *file)
2762{
f6bb4514
BT
2763 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
2764 sizeof(struct ipmr_mfc_iter));
1da177e4
LT
2765}
2766
9a32144e 2767static const struct file_operations ipmr_mfc_fops = {
1da177e4
LT
2768 .owner = THIS_MODULE,
2769 .open = ipmr_mfc_open,
2770 .read = seq_read,
2771 .llseek = seq_lseek,
f6bb4514 2772 .release = seq_release_net,
1da177e4 2773};
e905a9ed 2774#endif
1da177e4
LT
2775
2776#ifdef CONFIG_IP_PIMSM_V2
32613090 2777static const struct net_protocol pim_protocol = {
1da177e4 2778 .handler = pim_rcv,
403dbb97 2779 .netns_ok = 1,
1da177e4
LT
2780};
2781#endif
2782
7ef8f65d 2783/* Setup for IP multicast routing */
cf958ae3
BT
2784static int __net_init ipmr_net_init(struct net *net)
2785{
f0ad0860 2786 int err;
cf958ae3 2787
f0ad0860
PM
2788 err = ipmr_rules_init(net);
2789 if (err < 0)
cf958ae3 2790 goto fail;
f6bb4514
BT
2791
2792#ifdef CONFIG_PROC_FS
2793 err = -ENOMEM;
d4beaa66 2794 if (!proc_create("ip_mr_vif", 0, net->proc_net, &ipmr_vif_fops))
f6bb4514 2795 goto proc_vif_fail;
d4beaa66 2796 if (!proc_create("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_fops))
f6bb4514
BT
2797 goto proc_cache_fail;
2798#endif
2bb8b26c
BT
2799 return 0;
2800
f6bb4514
BT
2801#ifdef CONFIG_PROC_FS
2802proc_cache_fail:
ece31ffd 2803 remove_proc_entry("ip_mr_vif", net->proc_net);
f6bb4514 2804proc_vif_fail:
f0ad0860 2805 ipmr_rules_exit(net);
f6bb4514 2806#endif
cf958ae3
BT
2807fail:
2808 return err;
2809}
2810
2811static void __net_exit ipmr_net_exit(struct net *net)
2812{
f6bb4514 2813#ifdef CONFIG_PROC_FS
ece31ffd
G
2814 remove_proc_entry("ip_mr_cache", net->proc_net);
2815 remove_proc_entry("ip_mr_vif", net->proc_net);
f6bb4514 2816#endif
f0ad0860 2817 ipmr_rules_exit(net);
cf958ae3
BT
2818}
2819
2820static struct pernet_operations ipmr_net_ops = {
2821 .init = ipmr_net_init,
2822 .exit = ipmr_net_exit,
2823};
e905a9ed 2824
03d2f897 2825int __init ip_mr_init(void)
1da177e4 2826{
03d2f897
WC
2827 int err;
2828
1da177e4
LT
2829 mrt_cachep = kmem_cache_create("ip_mrt_cache",
2830 sizeof(struct mfc_cache),
a8c9486b 2831 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
20c2df83 2832 NULL);
03d2f897 2833
cf958ae3
BT
2834 err = register_pernet_subsys(&ipmr_net_ops);
2835 if (err)
2836 goto reg_pernet_fail;
2837
03d2f897
WC
2838 err = register_netdevice_notifier(&ip_mr_notifier);
2839 if (err)
2840 goto reg_notif_fail;
403dbb97
TG
2841#ifdef CONFIG_IP_PIMSM_V2
2842 if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
058bd4d2 2843 pr_err("%s: can't add PIM protocol\n", __func__);
403dbb97
TG
2844 err = -EAGAIN;
2845 goto add_proto_fail;
2846 }
2847#endif
c7ac8679
GR
2848 rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
2849 NULL, ipmr_rtm_dumproute, NULL);
ccbb0aa6
NA
2850 rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
2851 ipmr_rtm_route, NULL, NULL);
2852 rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
2853 ipmr_rtm_route, NULL, NULL);
03d2f897 2854 return 0;
f6bb4514 2855
403dbb97
TG
2856#ifdef CONFIG_IP_PIMSM_V2
2857add_proto_fail:
2858 unregister_netdevice_notifier(&ip_mr_notifier);
2859#endif
c3e38896 2860reg_notif_fail:
cf958ae3
BT
2861 unregister_pernet_subsys(&ipmr_net_ops);
2862reg_pernet_fail:
c3e38896 2863 kmem_cache_destroy(mrt_cachep);
03d2f897 2864 return err;
1da177e4 2865}