2 * IP multicast routing support for mrouted 3.6/3.8
4 * (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
5 * Linux Consultancy and Custom Driver Development
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 * Michael Chastain : Incorrect size of copying.
14 * Alan Cox : Added the cache manager code
15 * Alan Cox : Fixed the clone/copy bug and device race.
16 * Mike McLagan : Routing by source
17 * Malcolm Beattie : Buffer handling fixes.
18 * Alexey Kuznetsov : Double buffer free and other fixes.
19 * SVR Anand : Fixed several multicast bugs and problems.
20 * Alexey Kuznetsov : Status, optimisations and more.
21 * Brad Parker : Better behaviour on mrouted upcall
23 * Carlos Picoto : PIMv1 Support
24 * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
25 * Relax this requirement to work with older peers.
29 #include <linux/uaccess.h>
30 #include <linux/types.h>
31 #include <linux/capability.h>
32 #include <linux/errno.h>
33 #include <linux/timer.h>
35 #include <linux/kernel.h>
36 #include <linux/fcntl.h>
37 #include <linux/stat.h>
38 #include <linux/socket.h>
40 #include <linux/inet.h>
41 #include <linux/netdevice.h>
42 #include <linux/inetdevice.h>
43 #include <linux/igmp.h>
44 #include <linux/proc_fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/mroute.h>
47 #include <linux/init.h>
48 #include <linux/if_ether.h>
49 #include <linux/slab.h>
50 #include <net/net_namespace.h>
52 #include <net/protocol.h>
53 #include <linux/skbuff.h>
54 #include <net/route.h>
59 #include <linux/notifier.h>
60 #include <linux/if_arp.h>
61 #include <linux/netfilter_ipv4.h>
62 #include <linux/compat.h>
63 #include <linux/export.h>
64 #include <net/ip_tunnels.h>
65 #include <net/checksum.h>
66 #include <net/netlink.h>
67 #include <net/fib_rules.h>
68 #include <linux/netconf.h>
69 #include <net/nexthop.h>
70 #include <net/switchdev.h>
72 #include <linux/nospec.h>
75 struct fib_rule common
;
82 /* Big lock, protecting vif table, mrt cache and mroute socket state.
83 * Note that the changes are semaphored via rtnl_lock.
86 static DEFINE_RWLOCK(mrt_lock
);
88 /* Multicast router control variables */
90 /* Special spinlock for queue of unresolved entries */
91 static DEFINE_SPINLOCK(mfc_unres_lock
);
93 /* We return to original Alan's scheme. Hash table of resolved
94 * entries is changed only in process context and protected
95 * with weak lock mrt_lock. Queue of unresolved entries is protected
96 * with strong spinlock mfc_unres_lock.
98 * In this case data path is free of exclusive locks at all.
101 static struct kmem_cache
*mrt_cachep __read_mostly
;
103 static struct mr_table
*ipmr_new_table(struct net
*net
, u32 id
);
104 static void ipmr_free_table(struct mr_table
*mrt
);
106 static void ip_mr_forward(struct net
*net
, struct mr_table
*mrt
,
107 struct net_device
*dev
, struct sk_buff
*skb
,
108 struct mfc_cache
*cache
, int local
);
109 static int ipmr_cache_report(struct mr_table
*mrt
,
110 struct sk_buff
*pkt
, vifi_t vifi
, int assert);
111 static int __ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
112 struct mfc_cache
*c
, struct rtmsg
*rtm
);
113 static void mroute_netlink_event(struct mr_table
*mrt
, struct mfc_cache
*mfc
,
115 static void igmpmsg_netlink_event(struct mr_table
*mrt
, struct sk_buff
*pkt
);
116 static void mroute_clean_tables(struct mr_table
*mrt
, bool all
);
117 static void ipmr_expire_process(struct timer_list
*t
);
119 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
120 #define ipmr_for_each_table(mrt, net) \
121 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
123 static struct mr_table
*ipmr_get_table(struct net
*net
, u32 id
)
125 struct mr_table
*mrt
;
127 ipmr_for_each_table(mrt
, net
) {
134 static int ipmr_fib_lookup(struct net
*net
, struct flowi4
*flp4
,
135 struct mr_table
**mrt
)
138 struct ipmr_result res
;
139 struct fib_lookup_arg arg
= {
141 .flags
= FIB_LOOKUP_NOREF
,
144 /* update flow if oif or iif point to device enslaved to l3mdev */
145 l3mdev_update_flow(net
, flowi4_to_flowi(flp4
));
147 err
= fib_rules_lookup(net
->ipv4
.mr_rules_ops
,
148 flowi4_to_flowi(flp4
), 0, &arg
);
155 static int ipmr_rule_action(struct fib_rule
*rule
, struct flowi
*flp
,
156 int flags
, struct fib_lookup_arg
*arg
)
158 struct ipmr_result
*res
= arg
->result
;
159 struct mr_table
*mrt
;
161 switch (rule
->action
) {
164 case FR_ACT_UNREACHABLE
:
166 case FR_ACT_PROHIBIT
:
168 case FR_ACT_BLACKHOLE
:
173 arg
->table
= fib_rule_get_table(rule
, arg
);
175 mrt
= ipmr_get_table(rule
->fr_net
, arg
->table
);
182 static int ipmr_rule_match(struct fib_rule
*rule
, struct flowi
*fl
, int flags
)
187 static const struct nla_policy ipmr_rule_policy
[FRA_MAX
+ 1] = {
191 static int ipmr_rule_configure(struct fib_rule
*rule
, struct sk_buff
*skb
,
192 struct fib_rule_hdr
*frh
, struct nlattr
**tb
)
197 static int ipmr_rule_compare(struct fib_rule
*rule
, struct fib_rule_hdr
*frh
,
203 static int ipmr_rule_fill(struct fib_rule
*rule
, struct sk_buff
*skb
,
204 struct fib_rule_hdr
*frh
)
212 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template
= {
213 .family
= RTNL_FAMILY_IPMR
,
214 .rule_size
= sizeof(struct ipmr_rule
),
215 .addr_size
= sizeof(u32
),
216 .action
= ipmr_rule_action
,
217 .match
= ipmr_rule_match
,
218 .configure
= ipmr_rule_configure
,
219 .compare
= ipmr_rule_compare
,
220 .fill
= ipmr_rule_fill
,
221 .nlgroup
= RTNLGRP_IPV4_RULE
,
222 .policy
= ipmr_rule_policy
,
223 .owner
= THIS_MODULE
,
226 static int __net_init
ipmr_rules_init(struct net
*net
)
228 struct fib_rules_ops
*ops
;
229 struct mr_table
*mrt
;
232 ops
= fib_rules_register(&ipmr_rules_ops_template
, net
);
236 INIT_LIST_HEAD(&net
->ipv4
.mr_tables
);
238 mrt
= ipmr_new_table(net
, RT_TABLE_DEFAULT
);
244 err
= fib_default_rule_add(ops
, 0x7fff, RT_TABLE_DEFAULT
, 0);
248 net
->ipv4
.mr_rules_ops
= ops
;
252 ipmr_free_table(mrt
);
254 fib_rules_unregister(ops
);
258 static void __net_exit
ipmr_rules_exit(struct net
*net
)
260 struct mr_table
*mrt
, *next
;
263 list_for_each_entry_safe(mrt
, next
, &net
->ipv4
.mr_tables
, list
) {
264 list_del(&mrt
->list
);
265 ipmr_free_table(mrt
);
267 fib_rules_unregister(net
->ipv4
.mr_rules_ops
);
271 static int ipmr_rules_dump(struct net
*net
, struct notifier_block
*nb
)
273 return fib_rules_dump(net
, nb
, RTNL_FAMILY_IPMR
);
276 static unsigned int ipmr_rules_seq_read(struct net
*net
)
278 return fib_rules_seq_read(net
, RTNL_FAMILY_IPMR
);
281 bool ipmr_rule_default(const struct fib_rule
*rule
)
283 return fib_rule_matchall(rule
) && rule
->table
== RT_TABLE_DEFAULT
;
285 EXPORT_SYMBOL(ipmr_rule_default
);
287 #define ipmr_for_each_table(mrt, net) \
288 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
290 static struct mr_table
*ipmr_get_table(struct net
*net
, u32 id
)
292 return net
->ipv4
.mrt
;
295 static int ipmr_fib_lookup(struct net
*net
, struct flowi4
*flp4
,
296 struct mr_table
**mrt
)
298 *mrt
= net
->ipv4
.mrt
;
302 static int __net_init
ipmr_rules_init(struct net
*net
)
304 struct mr_table
*mrt
;
306 mrt
= ipmr_new_table(net
, RT_TABLE_DEFAULT
);
313 static void __net_exit
ipmr_rules_exit(struct net
*net
)
316 ipmr_free_table(net
->ipv4
.mrt
);
317 net
->ipv4
.mrt
= NULL
;
321 static int ipmr_rules_dump(struct net
*net
, struct notifier_block
*nb
)
326 static unsigned int ipmr_rules_seq_read(struct net
*net
)
331 bool ipmr_rule_default(const struct fib_rule
*rule
)
335 EXPORT_SYMBOL(ipmr_rule_default
);
338 static inline int ipmr_hash_cmp(struct rhashtable_compare_arg
*arg
,
341 const struct mfc_cache_cmp_arg
*cmparg
= arg
->key
;
342 struct mfc_cache
*c
= (struct mfc_cache
*)ptr
;
344 return cmparg
->mfc_mcastgrp
!= c
->mfc_mcastgrp
||
345 cmparg
->mfc_origin
!= c
->mfc_origin
;
348 static const struct rhashtable_params ipmr_rht_params
= {
349 .head_offset
= offsetof(struct mfc_cache
, mnode
),
350 .key_offset
= offsetof(struct mfc_cache
, cmparg
),
351 .key_len
= sizeof(struct mfc_cache_cmp_arg
),
354 .obj_cmpfn
= ipmr_hash_cmp
,
355 .automatic_shrinking
= true,
358 static struct mr_table
*ipmr_new_table(struct net
*net
, u32 id
)
360 struct mr_table
*mrt
;
363 /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
364 if (id
!= RT_TABLE_DEFAULT
&& id
>= 1000000000)
365 return ERR_PTR(-EINVAL
);
367 mrt
= ipmr_get_table(net
, id
);
371 mrt
= kzalloc(sizeof(*mrt
), GFP_KERNEL
);
373 return ERR_PTR(-ENOMEM
);
374 write_pnet(&mrt
->net
, net
);
377 err
= rhltable_init(&mrt
->mfc_hash
, &ipmr_rht_params
);
382 INIT_LIST_HEAD(&mrt
->mfc_cache_list
);
383 INIT_LIST_HEAD(&mrt
->mfc_unres_queue
);
385 timer_setup(&mrt
->ipmr_expire_timer
, ipmr_expire_process
, 0);
387 mrt
->mroute_reg_vif_num
= -1;
388 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
389 list_add_tail_rcu(&mrt
->list
, &net
->ipv4
.mr_tables
);
394 static void ipmr_free_table(struct mr_table
*mrt
)
396 del_timer_sync(&mrt
->ipmr_expire_timer
);
397 mroute_clean_tables(mrt
, true);
398 rhltable_destroy(&mrt
->mfc_hash
);
402 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
404 static void ipmr_del_tunnel(struct net_device
*dev
, struct vifctl
*v
)
406 struct net
*net
= dev_net(dev
);
410 dev
= __dev_get_by_name(net
, "tunl0");
412 const struct net_device_ops
*ops
= dev
->netdev_ops
;
414 struct ip_tunnel_parm p
;
416 memset(&p
, 0, sizeof(p
));
417 p
.iph
.daddr
= v
->vifc_rmt_addr
.s_addr
;
418 p
.iph
.saddr
= v
->vifc_lcl_addr
.s_addr
;
421 p
.iph
.protocol
= IPPROTO_IPIP
;
422 sprintf(p
.name
, "dvmrp%d", v
->vifc_vifi
);
423 ifr
.ifr_ifru
.ifru_data
= (__force
void __user
*)&p
;
425 if (ops
->ndo_do_ioctl
) {
426 mm_segment_t oldfs
= get_fs();
429 ops
->ndo_do_ioctl(dev
, &ifr
, SIOCDELTUNNEL
);
435 /* Initialize ipmr pimreg/tunnel in_device */
436 static bool ipmr_init_vif_indev(const struct net_device
*dev
)
438 struct in_device
*in_dev
;
442 in_dev
= __in_dev_get_rtnl(dev
);
445 ipv4_devconf_setall(in_dev
);
446 neigh_parms_data_state_setall(in_dev
->arp_parms
);
447 IPV4_DEVCONF(in_dev
->cnf
, RP_FILTER
) = 0;
452 static struct net_device
*ipmr_new_tunnel(struct net
*net
, struct vifctl
*v
)
454 struct net_device
*dev
;
456 dev
= __dev_get_by_name(net
, "tunl0");
459 const struct net_device_ops
*ops
= dev
->netdev_ops
;
462 struct ip_tunnel_parm p
;
464 memset(&p
, 0, sizeof(p
));
465 p
.iph
.daddr
= v
->vifc_rmt_addr
.s_addr
;
466 p
.iph
.saddr
= v
->vifc_lcl_addr
.s_addr
;
469 p
.iph
.protocol
= IPPROTO_IPIP
;
470 sprintf(p
.name
, "dvmrp%d", v
->vifc_vifi
);
471 ifr
.ifr_ifru
.ifru_data
= (__force
void __user
*)&p
;
473 if (ops
->ndo_do_ioctl
) {
474 mm_segment_t oldfs
= get_fs();
477 err
= ops
->ndo_do_ioctl(dev
, &ifr
, SIOCADDTUNNEL
);
485 (dev
= __dev_get_by_name(net
, p
.name
)) != NULL
) {
486 dev
->flags
|= IFF_MULTICAST
;
487 if (!ipmr_init_vif_indev(dev
))
497 unregister_netdevice(dev
);
501 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
502 static netdev_tx_t
reg_vif_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
504 struct net
*net
= dev_net(dev
);
505 struct mr_table
*mrt
;
506 struct flowi4 fl4
= {
507 .flowi4_oif
= dev
->ifindex
,
508 .flowi4_iif
= skb
->skb_iif
? : LOOPBACK_IFINDEX
,
509 .flowi4_mark
= skb
->mark
,
513 err
= ipmr_fib_lookup(net
, &fl4
, &mrt
);
519 read_lock(&mrt_lock
);
520 dev
->stats
.tx_bytes
+= skb
->len
;
521 dev
->stats
.tx_packets
++;
522 ipmr_cache_report(mrt
, skb
, mrt
->mroute_reg_vif_num
, IGMPMSG_WHOLEPKT
);
523 read_unlock(&mrt_lock
);
528 static int reg_vif_get_iflink(const struct net_device
*dev
)
533 static const struct net_device_ops reg_vif_netdev_ops
= {
534 .ndo_start_xmit
= reg_vif_xmit
,
535 .ndo_get_iflink
= reg_vif_get_iflink
,
538 static void reg_vif_setup(struct net_device
*dev
)
540 dev
->type
= ARPHRD_PIMREG
;
541 dev
->mtu
= ETH_DATA_LEN
- sizeof(struct iphdr
) - 8;
542 dev
->flags
= IFF_NOARP
;
543 dev
->netdev_ops
= ®_vif_netdev_ops
;
544 dev
->needs_free_netdev
= true;
545 dev
->features
|= NETIF_F_NETNS_LOCAL
;
548 static struct net_device
*ipmr_reg_vif(struct net
*net
, struct mr_table
*mrt
)
550 struct net_device
*dev
;
553 if (mrt
->id
== RT_TABLE_DEFAULT
)
554 sprintf(name
, "pimreg");
556 sprintf(name
, "pimreg%u", mrt
->id
);
558 dev
= alloc_netdev(0, name
, NET_NAME_UNKNOWN
, reg_vif_setup
);
563 dev_net_set(dev
, net
);
565 if (register_netdevice(dev
)) {
570 if (!ipmr_init_vif_indev(dev
))
580 unregister_netdevice(dev
);
584 /* called with rcu_read_lock() */
585 static int __pim_rcv(struct mr_table
*mrt
, struct sk_buff
*skb
,
588 struct net_device
*reg_dev
= NULL
;
591 encap
= (struct iphdr
*)(skb_transport_header(skb
) + pimlen
);
593 * a. packet is really sent to a multicast group
594 * b. packet is not a NULL-REGISTER
595 * c. packet is not truncated
597 if (!ipv4_is_multicast(encap
->daddr
) ||
598 encap
->tot_len
== 0 ||
599 ntohs(encap
->tot_len
) + pimlen
> skb
->len
)
602 read_lock(&mrt_lock
);
603 if (mrt
->mroute_reg_vif_num
>= 0)
604 reg_dev
= mrt
->vif_table
[mrt
->mroute_reg_vif_num
].dev
;
605 read_unlock(&mrt_lock
);
610 skb
->mac_header
= skb
->network_header
;
611 skb_pull(skb
, (u8
*)encap
- skb
->data
);
612 skb_reset_network_header(skb
);
613 skb
->protocol
= htons(ETH_P_IP
);
614 skb
->ip_summed
= CHECKSUM_NONE
;
616 skb_tunnel_rx(skb
, reg_dev
, dev_net(reg_dev
));
620 return NET_RX_SUCCESS
;
623 static struct net_device
*ipmr_reg_vif(struct net
*net
, struct mr_table
*mrt
)
629 static int call_ipmr_vif_entry_notifier(struct notifier_block
*nb
,
631 enum fib_event_type event_type
,
632 struct vif_device
*vif
,
633 vifi_t vif_index
, u32 tb_id
)
635 struct vif_entry_notifier_info info
= {
637 .family
= RTNL_FAMILY_IPMR
,
641 .vif_index
= vif_index
,
642 .vif_flags
= vif
->flags
,
646 return call_fib_notifier(nb
, net
, event_type
, &info
.info
);
649 static int call_ipmr_vif_entry_notifiers(struct net
*net
,
650 enum fib_event_type event_type
,
651 struct vif_device
*vif
,
652 vifi_t vif_index
, u32 tb_id
)
654 struct vif_entry_notifier_info info
= {
656 .family
= RTNL_FAMILY_IPMR
,
660 .vif_index
= vif_index
,
661 .vif_flags
= vif
->flags
,
666 net
->ipv4
.ipmr_seq
++;
667 return call_fib_notifiers(net
, event_type
, &info
.info
);
670 static int call_ipmr_mfc_entry_notifier(struct notifier_block
*nb
,
672 enum fib_event_type event_type
,
673 struct mfc_cache
*mfc
, u32 tb_id
)
675 struct mfc_entry_notifier_info info
= {
677 .family
= RTNL_FAMILY_IPMR
,
684 return call_fib_notifier(nb
, net
, event_type
, &info
.info
);
687 static int call_ipmr_mfc_entry_notifiers(struct net
*net
,
688 enum fib_event_type event_type
,
689 struct mfc_cache
*mfc
, u32 tb_id
)
691 struct mfc_entry_notifier_info info
= {
693 .family
= RTNL_FAMILY_IPMR
,
701 net
->ipv4
.ipmr_seq
++;
702 return call_fib_notifiers(net
, event_type
, &info
.info
);
706 * vif_delete - Delete a VIF entry
707 * @notify: Set to 1, if the caller is a notifier_call
709 static int vif_delete(struct mr_table
*mrt
, int vifi
, int notify
,
710 struct list_head
*head
)
712 struct net
*net
= read_pnet(&mrt
->net
);
713 struct vif_device
*v
;
714 struct net_device
*dev
;
715 struct in_device
*in_dev
;
717 if (vifi
< 0 || vifi
>= mrt
->maxvif
)
718 return -EADDRNOTAVAIL
;
720 v
= &mrt
->vif_table
[vifi
];
722 if (VIF_EXISTS(mrt
, vifi
))
723 call_ipmr_vif_entry_notifiers(net
, FIB_EVENT_VIF_DEL
, v
, vifi
,
726 write_lock_bh(&mrt_lock
);
731 write_unlock_bh(&mrt_lock
);
732 return -EADDRNOTAVAIL
;
735 if (vifi
== mrt
->mroute_reg_vif_num
)
736 mrt
->mroute_reg_vif_num
= -1;
738 if (vifi
+ 1 == mrt
->maxvif
) {
741 for (tmp
= vifi
- 1; tmp
>= 0; tmp
--) {
742 if (VIF_EXISTS(mrt
, tmp
))
748 write_unlock_bh(&mrt_lock
);
750 dev_set_allmulti(dev
, -1);
752 in_dev
= __in_dev_get_rtnl(dev
);
754 IPV4_DEVCONF(in_dev
->cnf
, MC_FORWARDING
)--;
755 inet_netconf_notify_devconf(dev_net(dev
), RTM_NEWNETCONF
,
756 NETCONFA_MC_FORWARDING
,
757 dev
->ifindex
, &in_dev
->cnf
);
758 ip_rt_multicast_event(in_dev
);
761 if (v
->flags
& (VIFF_TUNNEL
| VIFF_REGISTER
) && !notify
)
762 unregister_netdevice_queue(dev
, head
);
768 static void ipmr_cache_free_rcu(struct rcu_head
*head
)
770 struct mfc_cache
*c
= container_of(head
, struct mfc_cache
, rcu
);
772 kmem_cache_free(mrt_cachep
, c
);
775 void ipmr_cache_free(struct mfc_cache
*c
)
777 call_rcu(&c
->rcu
, ipmr_cache_free_rcu
);
779 EXPORT_SYMBOL(ipmr_cache_free
);
781 /* Destroy an unresolved cache entry, killing queued skbs
782 * and reporting error to netlink readers.
784 static void ipmr_destroy_unres(struct mr_table
*mrt
, struct mfc_cache
*c
)
786 struct net
*net
= read_pnet(&mrt
->net
);
790 atomic_dec(&mrt
->cache_resolve_queue_len
);
792 while ((skb
= skb_dequeue(&c
->mfc_un
.unres
.unresolved
))) {
793 if (ip_hdr(skb
)->version
== 0) {
794 struct nlmsghdr
*nlh
= skb_pull(skb
,
795 sizeof(struct iphdr
));
796 nlh
->nlmsg_type
= NLMSG_ERROR
;
797 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
798 skb_trim(skb
, nlh
->nlmsg_len
);
800 e
->error
= -ETIMEDOUT
;
801 memset(&e
->msg
, 0, sizeof(e
->msg
));
803 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
812 /* Timer process for the unresolved queue. */
813 static void ipmr_expire_process(struct timer_list
*t
)
815 struct mr_table
*mrt
= from_timer(mrt
, t
, ipmr_expire_timer
);
817 unsigned long expires
;
818 struct mfc_cache
*c
, *next
;
820 if (!spin_trylock(&mfc_unres_lock
)) {
821 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+HZ
/10);
825 if (list_empty(&mrt
->mfc_unres_queue
))
831 list_for_each_entry_safe(c
, next
, &mrt
->mfc_unres_queue
, list
) {
832 if (time_after(c
->mfc_un
.unres
.expires
, now
)) {
833 unsigned long interval
= c
->mfc_un
.unres
.expires
- now
;
834 if (interval
< expires
)
840 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
841 ipmr_destroy_unres(mrt
, c
);
844 if (!list_empty(&mrt
->mfc_unres_queue
))
845 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ expires
);
848 spin_unlock(&mfc_unres_lock
);
851 /* Fill oifs list. It is called under write locked mrt_lock. */
852 static void ipmr_update_thresholds(struct mr_table
*mrt
, struct mfc_cache
*cache
,
857 cache
->mfc_un
.res
.minvif
= MAXVIFS
;
858 cache
->mfc_un
.res
.maxvif
= 0;
859 memset(cache
->mfc_un
.res
.ttls
, 255, MAXVIFS
);
861 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++) {
862 if (VIF_EXISTS(mrt
, vifi
) &&
863 ttls
[vifi
] && ttls
[vifi
] < 255) {
864 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
865 if (cache
->mfc_un
.res
.minvif
> vifi
)
866 cache
->mfc_un
.res
.minvif
= vifi
;
867 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
868 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
871 cache
->mfc_un
.res
.lastuse
= jiffies
;
874 static int vif_add(struct net
*net
, struct mr_table
*mrt
,
875 struct vifctl
*vifc
, int mrtsock
)
877 int vifi
= vifc
->vifc_vifi
;
878 struct switchdev_attr attr
= {
879 .id
= SWITCHDEV_ATTR_ID_PORT_PARENT_ID
,
881 struct vif_device
*v
= &mrt
->vif_table
[vifi
];
882 struct net_device
*dev
;
883 struct in_device
*in_dev
;
887 if (VIF_EXISTS(mrt
, vifi
))
890 switch (vifc
->vifc_flags
) {
892 if (!ipmr_pimsm_enabled())
894 /* Special Purpose VIF in PIM
895 * All the packets will be sent to the daemon
897 if (mrt
->mroute_reg_vif_num
>= 0)
899 dev
= ipmr_reg_vif(net
, mrt
);
902 err
= dev_set_allmulti(dev
, 1);
904 unregister_netdevice(dev
);
910 dev
= ipmr_new_tunnel(net
, vifc
);
913 err
= dev_set_allmulti(dev
, 1);
915 ipmr_del_tunnel(dev
, vifc
);
920 case VIFF_USE_IFINDEX
:
922 if (vifc
->vifc_flags
== VIFF_USE_IFINDEX
) {
923 dev
= dev_get_by_index(net
, vifc
->vifc_lcl_ifindex
);
924 if (dev
&& !__in_dev_get_rtnl(dev
)) {
926 return -EADDRNOTAVAIL
;
929 dev
= ip_dev_find(net
, vifc
->vifc_lcl_addr
.s_addr
);
932 return -EADDRNOTAVAIL
;
933 err
= dev_set_allmulti(dev
, 1);
943 in_dev
= __in_dev_get_rtnl(dev
);
946 return -EADDRNOTAVAIL
;
948 IPV4_DEVCONF(in_dev
->cnf
, MC_FORWARDING
)++;
949 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
, NETCONFA_MC_FORWARDING
,
950 dev
->ifindex
, &in_dev
->cnf
);
951 ip_rt_multicast_event(in_dev
);
953 /* Fill in the VIF structures */
956 if (!switchdev_port_attr_get(dev
, &attr
)) {
957 memcpy(v
->dev_parent_id
.id
, attr
.u
.ppid
.id
, attr
.u
.ppid
.id_len
);
958 v
->dev_parent_id
.id_len
= attr
.u
.ppid
.id_len
;
960 v
->dev_parent_id
.id_len
= 0;
962 v
->rate_limit
= vifc
->vifc_rate_limit
;
963 v
->local
= vifc
->vifc_lcl_addr
.s_addr
;
964 v
->remote
= vifc
->vifc_rmt_addr
.s_addr
;
965 v
->flags
= vifc
->vifc_flags
;
967 v
->flags
|= VIFF_STATIC
;
968 v
->threshold
= vifc
->vifc_threshold
;
973 v
->link
= dev
->ifindex
;
974 if (v
->flags
& (VIFF_TUNNEL
| VIFF_REGISTER
))
975 v
->link
= dev_get_iflink(dev
);
977 /* And finish update writing critical data */
978 write_lock_bh(&mrt_lock
);
980 if (v
->flags
& VIFF_REGISTER
)
981 mrt
->mroute_reg_vif_num
= vifi
;
982 if (vifi
+1 > mrt
->maxvif
)
983 mrt
->maxvif
= vifi
+1;
984 write_unlock_bh(&mrt_lock
);
985 call_ipmr_vif_entry_notifiers(net
, FIB_EVENT_VIF_ADD
, v
, vifi
, mrt
->id
);
989 /* called with rcu_read_lock() */
990 static struct mfc_cache
*ipmr_cache_find(struct mr_table
*mrt
,
994 struct mfc_cache_cmp_arg arg
= {
995 .mfc_mcastgrp
= mcastgrp
,
998 struct rhlist_head
*tmp
, *list
;
1001 list
= rhltable_lookup(&mrt
->mfc_hash
, &arg
, ipmr_rht_params
);
1002 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
)
1008 /* Look for a (*,*,oif) entry */
1009 static struct mfc_cache
*ipmr_cache_find_any_parent(struct mr_table
*mrt
,
1012 struct mfc_cache_cmp_arg arg
= {
1013 .mfc_mcastgrp
= htonl(INADDR_ANY
),
1014 .mfc_origin
= htonl(INADDR_ANY
)
1016 struct rhlist_head
*tmp
, *list
;
1017 struct mfc_cache
*c
;
1019 list
= rhltable_lookup(&mrt
->mfc_hash
, &arg
, ipmr_rht_params
);
1020 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
)
1021 if (c
->mfc_un
.res
.ttls
[vifi
] < 255)
1027 /* Look for a (*,G) entry */
1028 static struct mfc_cache
*ipmr_cache_find_any(struct mr_table
*mrt
,
1029 __be32 mcastgrp
, int vifi
)
1031 struct mfc_cache_cmp_arg arg
= {
1032 .mfc_mcastgrp
= mcastgrp
,
1033 .mfc_origin
= htonl(INADDR_ANY
)
1035 struct rhlist_head
*tmp
, *list
;
1036 struct mfc_cache
*c
, *proxy
;
1038 if (mcastgrp
== htonl(INADDR_ANY
))
1041 list
= rhltable_lookup(&mrt
->mfc_hash
, &arg
, ipmr_rht_params
);
1042 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
) {
1043 if (c
->mfc_un
.res
.ttls
[vifi
] < 255)
1046 /* It's ok if the vifi is part of the static tree */
1047 proxy
= ipmr_cache_find_any_parent(mrt
, c
->mfc_parent
);
1048 if (proxy
&& proxy
->mfc_un
.res
.ttls
[vifi
] < 255)
1053 return ipmr_cache_find_any_parent(mrt
, vifi
);
1056 /* Look for a (S,G,iif) entry if parent != -1 */
1057 static struct mfc_cache
*ipmr_cache_find_parent(struct mr_table
*mrt
,
1058 __be32 origin
, __be32 mcastgrp
,
1061 struct mfc_cache_cmp_arg arg
= {
1062 .mfc_mcastgrp
= mcastgrp
,
1063 .mfc_origin
= origin
,
1065 struct rhlist_head
*tmp
, *list
;
1066 struct mfc_cache
*c
;
1068 list
= rhltable_lookup(&mrt
->mfc_hash
, &arg
, ipmr_rht_params
);
1069 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
)
1070 if (parent
== -1 || parent
== c
->mfc_parent
)
1076 /* Allocate a multicast cache entry */
1077 static struct mfc_cache
*ipmr_cache_alloc(void)
1079 struct mfc_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_KERNEL
);
1082 c
->mfc_un
.res
.last_assert
= jiffies
- MFC_ASSERT_THRESH
- 1;
1083 c
->mfc_un
.res
.minvif
= MAXVIFS
;
1084 refcount_set(&c
->mfc_un
.res
.refcount
, 1);
1089 static struct mfc_cache
*ipmr_cache_alloc_unres(void)
1091 struct mfc_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_ATOMIC
);
1094 skb_queue_head_init(&c
->mfc_un
.unres
.unresolved
);
1095 c
->mfc_un
.unres
.expires
= jiffies
+ 10*HZ
;
1100 /* A cache entry has gone into a resolved state from queued */
1101 static void ipmr_cache_resolve(struct net
*net
, struct mr_table
*mrt
,
1102 struct mfc_cache
*uc
, struct mfc_cache
*c
)
1104 struct sk_buff
*skb
;
1107 /* Play the pending entries through our router */
1108 while ((skb
= __skb_dequeue(&uc
->mfc_un
.unres
.unresolved
))) {
1109 if (ip_hdr(skb
)->version
== 0) {
1110 struct nlmsghdr
*nlh
= skb_pull(skb
,
1111 sizeof(struct iphdr
));
1113 if (__ipmr_fill_mroute(mrt
, skb
, c
, nlmsg_data(nlh
)) > 0) {
1114 nlh
->nlmsg_len
= skb_tail_pointer(skb
) -
1117 nlh
->nlmsg_type
= NLMSG_ERROR
;
1118 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
1119 skb_trim(skb
, nlh
->nlmsg_len
);
1120 e
= nlmsg_data(nlh
);
1121 e
->error
= -EMSGSIZE
;
1122 memset(&e
->msg
, 0, sizeof(e
->msg
));
1125 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
1127 ip_mr_forward(net
, mrt
, skb
->dev
, skb
, c
, 0);
1132 /* Bounce a cache query up to mrouted and netlink.
1134 * Called under mrt_lock.
1136 static int ipmr_cache_report(struct mr_table
*mrt
,
1137 struct sk_buff
*pkt
, vifi_t vifi
, int assert)
1139 const int ihl
= ip_hdrlen(pkt
);
1140 struct sock
*mroute_sk
;
1141 struct igmphdr
*igmp
;
1142 struct igmpmsg
*msg
;
1143 struct sk_buff
*skb
;
1146 if (assert == IGMPMSG_WHOLEPKT
)
1147 skb
= skb_realloc_headroom(pkt
, sizeof(struct iphdr
));
1149 skb
= alloc_skb(128, GFP_ATOMIC
);
1154 if (assert == IGMPMSG_WHOLEPKT
) {
1155 /* Ugly, but we have no choice with this interface.
1156 * Duplicate old header, fix ihl, length etc.
1157 * And all this only to mangle msg->im_msgtype and
1158 * to set msg->im_mbz to "mbz" :-)
1160 skb_push(skb
, sizeof(struct iphdr
));
1161 skb_reset_network_header(skb
);
1162 skb_reset_transport_header(skb
);
1163 msg
= (struct igmpmsg
*)skb_network_header(skb
);
1164 memcpy(msg
, skb_network_header(pkt
), sizeof(struct iphdr
));
1165 msg
->im_msgtype
= IGMPMSG_WHOLEPKT
;
1167 msg
->im_vif
= mrt
->mroute_reg_vif_num
;
1168 ip_hdr(skb
)->ihl
= sizeof(struct iphdr
) >> 2;
1169 ip_hdr(skb
)->tot_len
= htons(ntohs(ip_hdr(pkt
)->tot_len
) +
1170 sizeof(struct iphdr
));
1172 /* Copy the IP header */
1173 skb_set_network_header(skb
, skb
->len
);
1175 skb_copy_to_linear_data(skb
, pkt
->data
, ihl
);
1176 /* Flag to the kernel this is a route add */
1177 ip_hdr(skb
)->protocol
= 0;
1178 msg
= (struct igmpmsg
*)skb_network_header(skb
);
1180 skb_dst_set(skb
, dst_clone(skb_dst(pkt
)));
1181 /* Add our header */
1182 igmp
= skb_put(skb
, sizeof(struct igmphdr
));
1183 igmp
->type
= assert;
1184 msg
->im_msgtype
= assert;
1186 ip_hdr(skb
)->tot_len
= htons(skb
->len
); /* Fix the length */
1187 skb
->transport_header
= skb
->network_header
;
1191 mroute_sk
= rcu_dereference(mrt
->mroute_sk
);
1198 igmpmsg_netlink_event(mrt
, skb
);
1200 /* Deliver to mrouted */
1201 ret
= sock_queue_rcv_skb(mroute_sk
, skb
);
1204 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1211 /* Queue a packet for resolution. It gets locked cache entry! */
1212 static int ipmr_cache_unresolved(struct mr_table
*mrt
, vifi_t vifi
,
1213 struct sk_buff
*skb
, struct net_device
*dev
)
1215 const struct iphdr
*iph
= ip_hdr(skb
);
1216 struct mfc_cache
*c
;
1220 spin_lock_bh(&mfc_unres_lock
);
1221 list_for_each_entry(c
, &mrt
->mfc_unres_queue
, list
) {
1222 if (c
->mfc_mcastgrp
== iph
->daddr
&&
1223 c
->mfc_origin
== iph
->saddr
) {
1230 /* Create a new entry if allowable */
1231 if (atomic_read(&mrt
->cache_resolve_queue_len
) >= 10 ||
1232 (c
= ipmr_cache_alloc_unres()) == NULL
) {
1233 spin_unlock_bh(&mfc_unres_lock
);
1239 /* Fill in the new cache entry */
1241 c
->mfc_origin
= iph
->saddr
;
1242 c
->mfc_mcastgrp
= iph
->daddr
;
1244 /* Reflect first query at mrouted. */
1245 err
= ipmr_cache_report(mrt
, skb
, vifi
, IGMPMSG_NOCACHE
);
1247 /* If the report failed throw the cache entry
1250 spin_unlock_bh(&mfc_unres_lock
);
1257 atomic_inc(&mrt
->cache_resolve_queue_len
);
1258 list_add(&c
->list
, &mrt
->mfc_unres_queue
);
1259 mroute_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1261 if (atomic_read(&mrt
->cache_resolve_queue_len
) == 1)
1262 mod_timer(&mrt
->ipmr_expire_timer
, c
->mfc_un
.unres
.expires
);
1265 /* See if we can append the packet */
1266 if (c
->mfc_un
.unres
.unresolved
.qlen
> 3) {
1272 skb
->skb_iif
= dev
->ifindex
;
1274 skb_queue_tail(&c
->mfc_un
.unres
.unresolved
, skb
);
1278 spin_unlock_bh(&mfc_unres_lock
);
1282 /* MFC cache manipulation by user space mroute daemon */
1284 static int ipmr_mfc_delete(struct mr_table
*mrt
, struct mfcctl
*mfc
, int parent
)
1286 struct net
*net
= read_pnet(&mrt
->net
);
1287 struct mfc_cache
*c
;
1289 /* The entries are added/deleted only under RTNL */
1291 c
= ipmr_cache_find_parent(mrt
, mfc
->mfcc_origin
.s_addr
,
1292 mfc
->mfcc_mcastgrp
.s_addr
, parent
);
1296 rhltable_remove(&mrt
->mfc_hash
, &c
->mnode
, ipmr_rht_params
);
1297 list_del_rcu(&c
->list
);
1298 call_ipmr_mfc_entry_notifiers(net
, FIB_EVENT_ENTRY_DEL
, c
, mrt
->id
);
1299 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
1305 static int ipmr_mfc_add(struct net
*net
, struct mr_table
*mrt
,
1306 struct mfcctl
*mfc
, int mrtsock
, int parent
)
1308 struct mfc_cache
*uc
, *c
;
1312 if (mfc
->mfcc_parent
>= MAXVIFS
)
1315 /* The entries are added/deleted only under RTNL */
1317 c
= ipmr_cache_find_parent(mrt
, mfc
->mfcc_origin
.s_addr
,
1318 mfc
->mfcc_mcastgrp
.s_addr
, parent
);
1321 write_lock_bh(&mrt_lock
);
1322 c
->mfc_parent
= mfc
->mfcc_parent
;
1323 ipmr_update_thresholds(mrt
, c
, mfc
->mfcc_ttls
);
1325 c
->mfc_flags
|= MFC_STATIC
;
1326 write_unlock_bh(&mrt_lock
);
1327 call_ipmr_mfc_entry_notifiers(net
, FIB_EVENT_ENTRY_REPLACE
, c
,
1329 mroute_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1333 if (mfc
->mfcc_mcastgrp
.s_addr
!= htonl(INADDR_ANY
) &&
1334 !ipv4_is_multicast(mfc
->mfcc_mcastgrp
.s_addr
))
1337 c
= ipmr_cache_alloc();
1341 c
->mfc_origin
= mfc
->mfcc_origin
.s_addr
;
1342 c
->mfc_mcastgrp
= mfc
->mfcc_mcastgrp
.s_addr
;
1343 c
->mfc_parent
= mfc
->mfcc_parent
;
1344 ipmr_update_thresholds(mrt
, c
, mfc
->mfcc_ttls
);
1346 c
->mfc_flags
|= MFC_STATIC
;
1348 ret
= rhltable_insert_key(&mrt
->mfc_hash
, &c
->cmparg
, &c
->mnode
,
1351 pr_err("ipmr: rhtable insert error %d\n", ret
);
1355 list_add_tail_rcu(&c
->list
, &mrt
->mfc_cache_list
);
1356 /* Check to see if we resolved a queued list. If so we
1357 * need to send on the frames and tidy up.
1360 spin_lock_bh(&mfc_unres_lock
);
1361 list_for_each_entry(uc
, &mrt
->mfc_unres_queue
, list
) {
1362 if (uc
->mfc_origin
== c
->mfc_origin
&&
1363 uc
->mfc_mcastgrp
== c
->mfc_mcastgrp
) {
1364 list_del(&uc
->list
);
1365 atomic_dec(&mrt
->cache_resolve_queue_len
);
1370 if (list_empty(&mrt
->mfc_unres_queue
))
1371 del_timer(&mrt
->ipmr_expire_timer
);
1372 spin_unlock_bh(&mfc_unres_lock
);
1375 ipmr_cache_resolve(net
, mrt
, uc
, c
);
1376 ipmr_cache_free(uc
);
1378 call_ipmr_mfc_entry_notifiers(net
, FIB_EVENT_ENTRY_ADD
, c
, mrt
->id
);
1379 mroute_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1383 /* Close the multicast socket, and clear the vif tables etc */
1384 static void mroute_clean_tables(struct mr_table
*mrt
, bool all
)
1386 struct net
*net
= read_pnet(&mrt
->net
);
1387 struct mfc_cache
*c
, *tmp
;
1391 /* Shut down all active vif entries */
1392 for (i
= 0; i
< mrt
->maxvif
; i
++) {
1393 if (!all
&& (mrt
->vif_table
[i
].flags
& VIFF_STATIC
))
1395 vif_delete(mrt
, i
, 0, &list
);
1397 unregister_netdevice_many(&list
);
1399 /* Wipe the cache */
1400 list_for_each_entry_safe(c
, tmp
, &mrt
->mfc_cache_list
, list
) {
1401 if (!all
&& (c
->mfc_flags
& MFC_STATIC
))
1403 rhltable_remove(&mrt
->mfc_hash
, &c
->mnode
, ipmr_rht_params
);
1404 list_del_rcu(&c
->list
);
1405 call_ipmr_mfc_entry_notifiers(net
, FIB_EVENT_ENTRY_DEL
, c
,
1407 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
1411 if (atomic_read(&mrt
->cache_resolve_queue_len
) != 0) {
1412 spin_lock_bh(&mfc_unres_lock
);
1413 list_for_each_entry_safe(c
, tmp
, &mrt
->mfc_unres_queue
, list
) {
1415 mroute_netlink_event(mrt
, c
, RTM_DELROUTE
);
1416 ipmr_destroy_unres(mrt
, c
);
1418 spin_unlock_bh(&mfc_unres_lock
);
1422 /* called from ip_ra_control(), before an RCU grace period,
1423 * we dont need to call synchronize_rcu() here
1425 static void mrtsock_destruct(struct sock
*sk
)
1427 struct net
*net
= sock_net(sk
);
1428 struct mr_table
*mrt
;
1431 ipmr_for_each_table(mrt
, net
) {
1432 if (sk
== rtnl_dereference(mrt
->mroute_sk
)) {
1433 IPV4_DEVCONF_ALL(net
, MC_FORWARDING
)--;
1434 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
1435 NETCONFA_MC_FORWARDING
,
1436 NETCONFA_IFINDEX_ALL
,
1437 net
->ipv4
.devconf_all
);
1438 RCU_INIT_POINTER(mrt
->mroute_sk
, NULL
);
1439 mroute_clean_tables(mrt
, false);
1444 /* Socket options and virtual interface manipulation. The whole
1445 * virtual interface system is a complete heap, but unfortunately
1446 * that's how BSD mrouted happens to think. Maybe one day with a proper
1447 * MOSPF/PIM router set up we can clean this up.
1450 int ip_mroute_setsockopt(struct sock
*sk
, int optname
, char __user
*optval
,
1451 unsigned int optlen
)
1453 struct net
*net
= sock_net(sk
);
1454 int val
, ret
= 0, parent
= 0;
1455 struct mr_table
*mrt
;
1460 /* There's one exception to the lock - MRT_DONE which needs to unlock */
1462 if (sk
->sk_type
!= SOCK_RAW
||
1463 inet_sk(sk
)->inet_num
!= IPPROTO_IGMP
) {
1468 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1473 if (optname
!= MRT_INIT
) {
1474 if (sk
!= rcu_access_pointer(mrt
->mroute_sk
) &&
1475 !ns_capable(net
->user_ns
, CAP_NET_ADMIN
)) {
1483 if (optlen
!= sizeof(int)) {
1487 if (rtnl_dereference(mrt
->mroute_sk
)) {
1492 ret
= ip_ra_control(sk
, 1, mrtsock_destruct
);
1494 rcu_assign_pointer(mrt
->mroute_sk
, sk
);
1495 IPV4_DEVCONF_ALL(net
, MC_FORWARDING
)++;
1496 inet_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
1497 NETCONFA_MC_FORWARDING
,
1498 NETCONFA_IFINDEX_ALL
,
1499 net
->ipv4
.devconf_all
);
1503 if (sk
!= rcu_access_pointer(mrt
->mroute_sk
)) {
1506 ret
= ip_ra_control(sk
, 0, NULL
);
1512 if (optlen
!= sizeof(vif
)) {
1516 if (copy_from_user(&vif
, optval
, sizeof(vif
))) {
1520 if (vif
.vifc_vifi
>= MAXVIFS
) {
1524 if (optname
== MRT_ADD_VIF
) {
1525 ret
= vif_add(net
, mrt
, &vif
,
1526 sk
== rtnl_dereference(mrt
->mroute_sk
));
1528 ret
= vif_delete(mrt
, vif
.vifc_vifi
, 0, NULL
);
1531 /* Manipulate the forwarding caches. These live
1532 * in a sort of kernel/user symbiosis.
1538 case MRT_ADD_MFC_PROXY
:
1539 case MRT_DEL_MFC_PROXY
:
1540 if (optlen
!= sizeof(mfc
)) {
1544 if (copy_from_user(&mfc
, optval
, sizeof(mfc
))) {
1549 parent
= mfc
.mfcc_parent
;
1550 if (optname
== MRT_DEL_MFC
|| optname
== MRT_DEL_MFC_PROXY
)
1551 ret
= ipmr_mfc_delete(mrt
, &mfc
, parent
);
1553 ret
= ipmr_mfc_add(net
, mrt
, &mfc
,
1554 sk
== rtnl_dereference(mrt
->mroute_sk
),
1557 /* Control PIM assert. */
1559 if (optlen
!= sizeof(val
)) {
1563 if (get_user(val
, (int __user
*)optval
)) {
1567 mrt
->mroute_do_assert
= val
;
1570 if (!ipmr_pimsm_enabled()) {
1574 if (optlen
!= sizeof(val
)) {
1578 if (get_user(val
, (int __user
*)optval
)) {
1584 if (val
!= mrt
->mroute_do_pim
) {
1585 mrt
->mroute_do_pim
= val
;
1586 mrt
->mroute_do_assert
= val
;
1590 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES
)) {
1594 if (optlen
!= sizeof(uval
)) {
1598 if (get_user(uval
, (u32 __user
*)optval
)) {
1603 if (sk
== rtnl_dereference(mrt
->mroute_sk
)) {
1606 mrt
= ipmr_new_table(net
, uval
);
1610 raw_sk(sk
)->ipmr_table
= uval
;
1613 /* Spurious command, or MRT_VERSION which you cannot set. */
1622 /* Getsock opt support for the multicast routing system. */
1623 int ip_mroute_getsockopt(struct sock
*sk
, int optname
, char __user
*optval
, int __user
*optlen
)
1627 struct net
*net
= sock_net(sk
);
1628 struct mr_table
*mrt
;
1630 if (sk
->sk_type
!= SOCK_RAW
||
1631 inet_sk(sk
)->inet_num
!= IPPROTO_IGMP
)
1634 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1643 if (!ipmr_pimsm_enabled())
1644 return -ENOPROTOOPT
;
1645 val
= mrt
->mroute_do_pim
;
1648 val
= mrt
->mroute_do_assert
;
1651 return -ENOPROTOOPT
;
1654 if (get_user(olr
, optlen
))
1656 olr
= min_t(unsigned int, olr
, sizeof(int));
1659 if (put_user(olr
, optlen
))
1661 if (copy_to_user(optval
, &val
, olr
))
1666 /* The IP multicast ioctl support routines. */
1667 int ipmr_ioctl(struct sock
*sk
, int cmd
, void __user
*arg
)
1669 struct sioc_sg_req sr
;
1670 struct sioc_vif_req vr
;
1671 struct vif_device
*vif
;
1672 struct mfc_cache
*c
;
1673 struct net
*net
= sock_net(sk
);
1674 struct mr_table
*mrt
;
1676 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1682 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1684 if (vr
.vifi
>= mrt
->maxvif
)
1686 vr
.vifi
= array_index_nospec(vr
.vifi
, mrt
->maxvif
);
1687 read_lock(&mrt_lock
);
1688 vif
= &mrt
->vif_table
[vr
.vifi
];
1689 if (VIF_EXISTS(mrt
, vr
.vifi
)) {
1690 vr
.icount
= vif
->pkt_in
;
1691 vr
.ocount
= vif
->pkt_out
;
1692 vr
.ibytes
= vif
->bytes_in
;
1693 vr
.obytes
= vif
->bytes_out
;
1694 read_unlock(&mrt_lock
);
1696 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1700 read_unlock(&mrt_lock
);
1701 return -EADDRNOTAVAIL
;
1703 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1707 c
= ipmr_cache_find(mrt
, sr
.src
.s_addr
, sr
.grp
.s_addr
);
1709 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1710 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1711 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1714 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1719 return -EADDRNOTAVAIL
;
1721 return -ENOIOCTLCMD
;
1725 #ifdef CONFIG_COMPAT
1726 struct compat_sioc_sg_req
{
1729 compat_ulong_t pktcnt
;
1730 compat_ulong_t bytecnt
;
1731 compat_ulong_t wrong_if
;
1734 struct compat_sioc_vif_req
{
1735 vifi_t vifi
; /* Which iface */
1736 compat_ulong_t icount
;
1737 compat_ulong_t ocount
;
1738 compat_ulong_t ibytes
;
1739 compat_ulong_t obytes
;
1742 int ipmr_compat_ioctl(struct sock
*sk
, unsigned int cmd
, void __user
*arg
)
1744 struct compat_sioc_sg_req sr
;
1745 struct compat_sioc_vif_req vr
;
1746 struct vif_device
*vif
;
1747 struct mfc_cache
*c
;
1748 struct net
*net
= sock_net(sk
);
1749 struct mr_table
*mrt
;
1751 mrt
= ipmr_get_table(net
, raw_sk(sk
)->ipmr_table
? : RT_TABLE_DEFAULT
);
1757 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1759 if (vr
.vifi
>= mrt
->maxvif
)
1761 vr
.vifi
= array_index_nospec(vr
.vifi
, mrt
->maxvif
);
1762 read_lock(&mrt_lock
);
1763 vif
= &mrt
->vif_table
[vr
.vifi
];
1764 if (VIF_EXISTS(mrt
, vr
.vifi
)) {
1765 vr
.icount
= vif
->pkt_in
;
1766 vr
.ocount
= vif
->pkt_out
;
1767 vr
.ibytes
= vif
->bytes_in
;
1768 vr
.obytes
= vif
->bytes_out
;
1769 read_unlock(&mrt_lock
);
1771 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1775 read_unlock(&mrt_lock
);
1776 return -EADDRNOTAVAIL
;
1778 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1782 c
= ipmr_cache_find(mrt
, sr
.src
.s_addr
, sr
.grp
.s_addr
);
1784 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1785 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1786 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1789 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1794 return -EADDRNOTAVAIL
;
1796 return -ENOIOCTLCMD
;
1801 static int ipmr_device_event(struct notifier_block
*this, unsigned long event
, void *ptr
)
1803 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1804 struct net
*net
= dev_net(dev
);
1805 struct mr_table
*mrt
;
1806 struct vif_device
*v
;
1809 if (event
!= NETDEV_UNREGISTER
)
1812 ipmr_for_each_table(mrt
, net
) {
1813 v
= &mrt
->vif_table
[0];
1814 for (ct
= 0; ct
< mrt
->maxvif
; ct
++, v
++) {
1816 vif_delete(mrt
, ct
, 1, NULL
);
1822 static struct notifier_block ip_mr_notifier
= {
1823 .notifier_call
= ipmr_device_event
,
1826 /* Encapsulate a packet by attaching a valid IPIP header to it.
1827 * This avoids tunnel drivers and other mess and gives us the speed so
1828 * important for multicast video.
1830 static void ip_encap(struct net
*net
, struct sk_buff
*skb
,
1831 __be32 saddr
, __be32 daddr
)
1834 const struct iphdr
*old_iph
= ip_hdr(skb
);
1836 skb_push(skb
, sizeof(struct iphdr
));
1837 skb
->transport_header
= skb
->network_header
;
1838 skb_reset_network_header(skb
);
1842 iph
->tos
= old_iph
->tos
;
1843 iph
->ttl
= old_iph
->ttl
;
1847 iph
->protocol
= IPPROTO_IPIP
;
1849 iph
->tot_len
= htons(skb
->len
);
1850 ip_select_ident(net
, skb
, NULL
);
1853 memset(&(IPCB(skb
)->opt
), 0, sizeof(IPCB(skb
)->opt
));
1857 static inline int ipmr_forward_finish(struct net
*net
, struct sock
*sk
,
1858 struct sk_buff
*skb
)
1860 struct ip_options
*opt
= &(IPCB(skb
)->opt
);
1862 IP_INC_STATS(net
, IPSTATS_MIB_OUTFORWDATAGRAMS
);
1863 IP_ADD_STATS(net
, IPSTATS_MIB_OUTOCTETS
, skb
->len
);
1865 if (unlikely(opt
->optlen
))
1866 ip_forward_options(skb
);
1868 return dst_output(net
, sk
, skb
);
1871 #ifdef CONFIG_NET_SWITCHDEV
1872 static bool ipmr_forward_offloaded(struct sk_buff
*skb
, struct mr_table
*mrt
,
1873 int in_vifi
, int out_vifi
)
1875 struct vif_device
*out_vif
= &mrt
->vif_table
[out_vifi
];
1876 struct vif_device
*in_vif
= &mrt
->vif_table
[in_vifi
];
1878 if (!skb
->offload_mr_fwd_mark
)
1880 if (!out_vif
->dev_parent_id
.id_len
|| !in_vif
->dev_parent_id
.id_len
)
1882 return netdev_phys_item_id_same(&out_vif
->dev_parent_id
,
1883 &in_vif
->dev_parent_id
);
1886 static bool ipmr_forward_offloaded(struct sk_buff
*skb
, struct mr_table
*mrt
,
1887 int in_vifi
, int out_vifi
)
1893 /* Processing handlers for ipmr_forward */
1895 static void ipmr_queue_xmit(struct net
*net
, struct mr_table
*mrt
,
1896 int in_vifi
, struct sk_buff
*skb
,
1897 struct mfc_cache
*c
, int vifi
)
1899 const struct iphdr
*iph
= ip_hdr(skb
);
1900 struct vif_device
*vif
= &mrt
->vif_table
[vifi
];
1901 struct net_device
*dev
;
1909 if (vif
->flags
& VIFF_REGISTER
) {
1911 vif
->bytes_out
+= skb
->len
;
1912 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1913 vif
->dev
->stats
.tx_packets
++;
1914 ipmr_cache_report(mrt
, skb
, vifi
, IGMPMSG_WHOLEPKT
);
1918 if (ipmr_forward_offloaded(skb
, mrt
, in_vifi
, vifi
))
1921 if (vif
->flags
& VIFF_TUNNEL
) {
1922 rt
= ip_route_output_ports(net
, &fl4
, NULL
,
1923 vif
->remote
, vif
->local
,
1926 RT_TOS(iph
->tos
), vif
->link
);
1929 encap
= sizeof(struct iphdr
);
1931 rt
= ip_route_output_ports(net
, &fl4
, NULL
, iph
->daddr
, 0,
1934 RT_TOS(iph
->tos
), vif
->link
);
1941 if (skb
->len
+encap
> dst_mtu(&rt
->dst
) && (ntohs(iph
->frag_off
) & IP_DF
)) {
1942 /* Do not fragment multicasts. Alas, IPv4 does not
1943 * allow to send ICMP, so that packets will disappear
1946 IP_INC_STATS(net
, IPSTATS_MIB_FRAGFAILS
);
1951 encap
+= LL_RESERVED_SPACE(dev
) + rt
->dst
.header_len
;
1953 if (skb_cow(skb
, encap
)) {
1959 vif
->bytes_out
+= skb
->len
;
1962 skb_dst_set(skb
, &rt
->dst
);
1963 ip_decrease_ttl(ip_hdr(skb
));
1965 /* FIXME: forward and output firewalls used to be called here.
1966 * What do we do with netfilter? -- RR
1968 if (vif
->flags
& VIFF_TUNNEL
) {
1969 ip_encap(net
, skb
, vif
->local
, vif
->remote
);
1970 /* FIXME: extra output firewall step used to be here. --RR */
1971 vif
->dev
->stats
.tx_packets
++;
1972 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
1975 IPCB(skb
)->flags
|= IPSKB_FORWARDED
;
1977 /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1978 * not only before forwarding, but after forwarding on all output
1979 * interfaces. It is clear, if mrouter runs a multicasting
1980 * program, it should receive packets not depending to what interface
1981 * program is joined.
1982 * If we will not make it, the program will have to join on all
1983 * interfaces. On the other hand, multihoming host (or router, but
1984 * not mrouter) cannot join to more than one interface - it will
1985 * result in receiving multiple packets.
1987 NF_HOOK(NFPROTO_IPV4
, NF_INET_FORWARD
,
1988 net
, NULL
, skb
, skb
->dev
, dev
,
1989 ipmr_forward_finish
);
1996 static int ipmr_find_vif(struct mr_table
*mrt
, struct net_device
*dev
)
2000 for (ct
= mrt
->maxvif
-1; ct
>= 0; ct
--) {
2001 if (mrt
->vif_table
[ct
].dev
== dev
)
2007 /* "local" means that we should preserve one skb (for local delivery) */
2008 static void ip_mr_forward(struct net
*net
, struct mr_table
*mrt
,
2009 struct net_device
*dev
, struct sk_buff
*skb
,
2010 struct mfc_cache
*cache
, int local
)
2012 int true_vifi
= ipmr_find_vif(mrt
, dev
);
2016 vif
= cache
->mfc_parent
;
2017 cache
->mfc_un
.res
.pkt
++;
2018 cache
->mfc_un
.res
.bytes
+= skb
->len
;
2019 cache
->mfc_un
.res
.lastuse
= jiffies
;
2021 if (cache
->mfc_origin
== htonl(INADDR_ANY
) && true_vifi
>= 0) {
2022 struct mfc_cache
*cache_proxy
;
2024 /* For an (*,G) entry, we only check that the incomming
2025 * interface is part of the static tree.
2027 cache_proxy
= ipmr_cache_find_any_parent(mrt
, vif
);
2029 cache_proxy
->mfc_un
.res
.ttls
[true_vifi
] < 255)
2033 /* Wrong interface: drop packet and (maybe) send PIM assert. */
2034 if (mrt
->vif_table
[vif
].dev
!= dev
) {
2035 if (rt_is_output_route(skb_rtable(skb
))) {
2036 /* It is our own packet, looped back.
2037 * Very complicated situation...
2039 * The best workaround until routing daemons will be
2040 * fixed is not to redistribute packet, if it was
2041 * send through wrong interface. It means, that
2042 * multicast applications WILL NOT work for
2043 * (S,G), which have default multicast route pointing
2044 * to wrong oif. In any case, it is not a good
2045 * idea to use multicasting applications on router.
2050 cache
->mfc_un
.res
.wrong_if
++;
2052 if (true_vifi
>= 0 && mrt
->mroute_do_assert
&&
2053 /* pimsm uses asserts, when switching from RPT to SPT,
2054 * so that we cannot check that packet arrived on an oif.
2055 * It is bad, but otherwise we would need to move pretty
2056 * large chunk of pimd to kernel. Ough... --ANK
2058 (mrt
->mroute_do_pim
||
2059 cache
->mfc_un
.res
.ttls
[true_vifi
] < 255) &&
2061 cache
->mfc_un
.res
.last_assert
+ MFC_ASSERT_THRESH
)) {
2062 cache
->mfc_un
.res
.last_assert
= jiffies
;
2063 ipmr_cache_report(mrt
, skb
, true_vifi
, IGMPMSG_WRONGVIF
);
2069 mrt
->vif_table
[vif
].pkt_in
++;
2070 mrt
->vif_table
[vif
].bytes_in
+= skb
->len
;
2072 /* Forward the frame */
2073 if (cache
->mfc_origin
== htonl(INADDR_ANY
) &&
2074 cache
->mfc_mcastgrp
== htonl(INADDR_ANY
)) {
2075 if (true_vifi
>= 0 &&
2076 true_vifi
!= cache
->mfc_parent
&&
2078 cache
->mfc_un
.res
.ttls
[cache
->mfc_parent
]) {
2079 /* It's an (*,*) entry and the packet is not coming from
2080 * the upstream: forward the packet to the upstream
2083 psend
= cache
->mfc_parent
;
2088 for (ct
= cache
->mfc_un
.res
.maxvif
- 1;
2089 ct
>= cache
->mfc_un
.res
.minvif
; ct
--) {
2090 /* For (*,G) entry, don't forward to the incoming interface */
2091 if ((cache
->mfc_origin
!= htonl(INADDR_ANY
) ||
2093 ip_hdr(skb
)->ttl
> cache
->mfc_un
.res
.ttls
[ct
]) {
2095 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
2098 ipmr_queue_xmit(net
, mrt
, true_vifi
,
2099 skb2
, cache
, psend
);
2107 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
2110 ipmr_queue_xmit(net
, mrt
, true_vifi
, skb2
,
2113 ipmr_queue_xmit(net
, mrt
, true_vifi
, skb
, cache
, psend
);
2123 static struct mr_table
*ipmr_rt_fib_lookup(struct net
*net
, struct sk_buff
*skb
)
2125 struct rtable
*rt
= skb_rtable(skb
);
2126 struct iphdr
*iph
= ip_hdr(skb
);
2127 struct flowi4 fl4
= {
2128 .daddr
= iph
->daddr
,
2129 .saddr
= iph
->saddr
,
2130 .flowi4_tos
= RT_TOS(iph
->tos
),
2131 .flowi4_oif
= (rt_is_output_route(rt
) ?
2132 skb
->dev
->ifindex
: 0),
2133 .flowi4_iif
= (rt_is_output_route(rt
) ?
2136 .flowi4_mark
= skb
->mark
,
2138 struct mr_table
*mrt
;
2141 err
= ipmr_fib_lookup(net
, &fl4
, &mrt
);
2143 return ERR_PTR(err
);
2147 /* Multicast packets for forwarding arrive here
2148 * Called with rcu_read_lock();
2150 int ip_mr_input(struct sk_buff
*skb
)
2152 struct mfc_cache
*cache
;
2153 struct net
*net
= dev_net(skb
->dev
);
2154 int local
= skb_rtable(skb
)->rt_flags
& RTCF_LOCAL
;
2155 struct mr_table
*mrt
;
2156 struct net_device
*dev
;
2158 /* skb->dev passed in is the loX master dev for vrfs.
2159 * As there are no vifs associated with loopback devices,
2160 * get the proper interface that does have a vif associated with it.
2163 if (netif_is_l3_master(skb
->dev
)) {
2164 dev
= dev_get_by_index_rcu(net
, IPCB(skb
)->iif
);
2171 /* Packet is looped back after forward, it should not be
2172 * forwarded second time, but still can be delivered locally.
2174 if (IPCB(skb
)->flags
& IPSKB_FORWARDED
)
2177 mrt
= ipmr_rt_fib_lookup(net
, skb
);
2180 return PTR_ERR(mrt
);
2183 if (IPCB(skb
)->opt
.router_alert
) {
2184 if (ip_call_ra_chain(skb
))
2186 } else if (ip_hdr(skb
)->protocol
== IPPROTO_IGMP
) {
2187 /* IGMPv1 (and broken IGMPv2 implementations sort of
2188 * Cisco IOS <= 11.2(8)) do not put router alert
2189 * option to IGMP packets destined to routable
2190 * groups. It is very bad, because it means
2191 * that we can forward NO IGMP messages.
2193 struct sock
*mroute_sk
;
2195 mroute_sk
= rcu_dereference(mrt
->mroute_sk
);
2198 raw_rcv(mroute_sk
, skb
);
2204 /* already under rcu_read_lock() */
2205 cache
= ipmr_cache_find(mrt
, ip_hdr(skb
)->saddr
, ip_hdr(skb
)->daddr
);
2207 int vif
= ipmr_find_vif(mrt
, dev
);
2210 cache
= ipmr_cache_find_any(mrt
, ip_hdr(skb
)->daddr
,
2214 /* No usable cache entry */
2219 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
2220 ip_local_deliver(skb
);
2226 read_lock(&mrt_lock
);
2227 vif
= ipmr_find_vif(mrt
, dev
);
2229 int err2
= ipmr_cache_unresolved(mrt
, vif
, skb
, dev
);
2230 read_unlock(&mrt_lock
);
2234 read_unlock(&mrt_lock
);
2239 read_lock(&mrt_lock
);
2240 ip_mr_forward(net
, mrt
, dev
, skb
, cache
, local
);
2241 read_unlock(&mrt_lock
);
2244 return ip_local_deliver(skb
);
2250 return ip_local_deliver(skb
);
2255 #ifdef CONFIG_IP_PIMSM_V1
2256 /* Handle IGMP messages of PIMv1 */
2257 int pim_rcv_v1(struct sk_buff
*skb
)
2259 struct igmphdr
*pim
;
2260 struct net
*net
= dev_net(skb
->dev
);
2261 struct mr_table
*mrt
;
2263 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(struct iphdr
)))
2266 pim
= igmp_hdr(skb
);
2268 mrt
= ipmr_rt_fib_lookup(net
, skb
);
2271 if (!mrt
->mroute_do_pim
||
2272 pim
->group
!= PIM_V1_VERSION
|| pim
->code
!= PIM_V1_REGISTER
)
2275 if (__pim_rcv(mrt
, skb
, sizeof(*pim
))) {
2283 #ifdef CONFIG_IP_PIMSM_V2
2284 static int pim_rcv(struct sk_buff
*skb
)
2286 struct pimreghdr
*pim
;
2287 struct net
*net
= dev_net(skb
->dev
);
2288 struct mr_table
*mrt
;
2290 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(struct iphdr
)))
2293 pim
= (struct pimreghdr
*)skb_transport_header(skb
);
2294 if (pim
->type
!= ((PIM_VERSION
<< 4) | (PIM_TYPE_REGISTER
)) ||
2295 (pim
->flags
& PIM_NULL_REGISTER
) ||
2296 (ip_compute_csum((void *)pim
, sizeof(*pim
)) != 0 &&
2297 csum_fold(skb_checksum(skb
, 0, skb
->len
, 0))))
2300 mrt
= ipmr_rt_fib_lookup(net
, skb
);
2303 if (__pim_rcv(mrt
, skb
, sizeof(*pim
))) {
2311 static int __ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
2312 struct mfc_cache
*c
, struct rtmsg
*rtm
)
2314 struct rta_mfc_stats mfcs
;
2315 struct nlattr
*mp_attr
;
2316 struct rtnexthop
*nhp
;
2317 unsigned long lastuse
;
2320 /* If cache is unresolved, don't try to parse IIF and OIF */
2321 if (c
->mfc_parent
>= MAXVIFS
) {
2322 rtm
->rtm_flags
|= RTNH_F_UNRESOLVED
;
2326 if (VIF_EXISTS(mrt
, c
->mfc_parent
) &&
2327 nla_put_u32(skb
, RTA_IIF
, mrt
->vif_table
[c
->mfc_parent
].dev
->ifindex
) < 0)
2330 if (c
->mfc_flags
& MFC_OFFLOAD
)
2331 rtm
->rtm_flags
|= RTNH_F_OFFLOAD
;
2333 if (!(mp_attr
= nla_nest_start(skb
, RTA_MULTIPATH
)))
2336 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
2337 if (VIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
2338 if (!(nhp
= nla_reserve_nohdr(skb
, sizeof(*nhp
)))) {
2339 nla_nest_cancel(skb
, mp_attr
);
2343 nhp
->rtnh_flags
= 0;
2344 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
2345 nhp
->rtnh_ifindex
= mrt
->vif_table
[ct
].dev
->ifindex
;
2346 nhp
->rtnh_len
= sizeof(*nhp
);
2350 nla_nest_end(skb
, mp_attr
);
2352 lastuse
= READ_ONCE(c
->mfc_un
.res
.lastuse
);
2353 lastuse
= time_after_eq(jiffies
, lastuse
) ? jiffies
- lastuse
: 0;
2355 mfcs
.mfcs_packets
= c
->mfc_un
.res
.pkt
;
2356 mfcs
.mfcs_bytes
= c
->mfc_un
.res
.bytes
;
2357 mfcs
.mfcs_wrong_if
= c
->mfc_un
.res
.wrong_if
;
2358 if (nla_put_64bit(skb
, RTA_MFC_STATS
, sizeof(mfcs
), &mfcs
, RTA_PAD
) ||
2359 nla_put_u64_64bit(skb
, RTA_EXPIRES
, jiffies_to_clock_t(lastuse
),
2363 rtm
->rtm_type
= RTN_MULTICAST
;
2367 int ipmr_get_route(struct net
*net
, struct sk_buff
*skb
,
2368 __be32 saddr
, __be32 daddr
,
2369 struct rtmsg
*rtm
, u32 portid
)
2371 struct mfc_cache
*cache
;
2372 struct mr_table
*mrt
;
2375 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
2380 cache
= ipmr_cache_find(mrt
, saddr
, daddr
);
2381 if (!cache
&& skb
->dev
) {
2382 int vif
= ipmr_find_vif(mrt
, skb
->dev
);
2385 cache
= ipmr_cache_find_any(mrt
, daddr
, vif
);
2388 struct sk_buff
*skb2
;
2390 struct net_device
*dev
;
2394 read_lock(&mrt_lock
);
2396 vif
= ipmr_find_vif(mrt
, dev
);
2398 read_unlock(&mrt_lock
);
2402 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2404 read_unlock(&mrt_lock
);
2409 NETLINK_CB(skb2
).portid
= portid
;
2410 skb_push(skb2
, sizeof(struct iphdr
));
2411 skb_reset_network_header(skb2
);
2413 iph
->ihl
= sizeof(struct iphdr
) >> 2;
2417 err
= ipmr_cache_unresolved(mrt
, vif
, skb2
, dev
);
2418 read_unlock(&mrt_lock
);
2423 read_lock(&mrt_lock
);
2424 err
= __ipmr_fill_mroute(mrt
, skb
, cache
, rtm
);
2425 read_unlock(&mrt_lock
);
2430 static int ipmr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
2431 u32 portid
, u32 seq
, struct mfc_cache
*c
, int cmd
,
2434 struct nlmsghdr
*nlh
;
2438 nlh
= nlmsg_put(skb
, portid
, seq
, cmd
, sizeof(*rtm
), flags
);
2442 rtm
= nlmsg_data(nlh
);
2443 rtm
->rtm_family
= RTNL_FAMILY_IPMR
;
2444 rtm
->rtm_dst_len
= 32;
2445 rtm
->rtm_src_len
= 32;
2447 rtm
->rtm_table
= mrt
->id
;
2448 if (nla_put_u32(skb
, RTA_TABLE
, mrt
->id
))
2449 goto nla_put_failure
;
2450 rtm
->rtm_type
= RTN_MULTICAST
;
2451 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2452 if (c
->mfc_flags
& MFC_STATIC
)
2453 rtm
->rtm_protocol
= RTPROT_STATIC
;
2455 rtm
->rtm_protocol
= RTPROT_MROUTED
;
2458 if (nla_put_in_addr(skb
, RTA_SRC
, c
->mfc_origin
) ||
2459 nla_put_in_addr(skb
, RTA_DST
, c
->mfc_mcastgrp
))
2460 goto nla_put_failure
;
2461 err
= __ipmr_fill_mroute(mrt
, skb
, c
, rtm
);
2462 /* do not break the dump if cache is unresolved */
2463 if (err
< 0 && err
!= -ENOENT
)
2464 goto nla_put_failure
;
2466 nlmsg_end(skb
, nlh
);
2470 nlmsg_cancel(skb
, nlh
);
2474 static size_t mroute_msgsize(bool unresolved
, int maxvif
)
2477 NLMSG_ALIGN(sizeof(struct rtmsg
))
2478 + nla_total_size(4) /* RTA_TABLE */
2479 + nla_total_size(4) /* RTA_SRC */
2480 + nla_total_size(4) /* RTA_DST */
2485 + nla_total_size(4) /* RTA_IIF */
2486 + nla_total_size(0) /* RTA_MULTIPATH */
2487 + maxvif
* NLA_ALIGN(sizeof(struct rtnexthop
))
2489 + nla_total_size_64bit(sizeof(struct rta_mfc_stats
))
2495 static void mroute_netlink_event(struct mr_table
*mrt
, struct mfc_cache
*mfc
,
2498 struct net
*net
= read_pnet(&mrt
->net
);
2499 struct sk_buff
*skb
;
2502 skb
= nlmsg_new(mroute_msgsize(mfc
->mfc_parent
>= MAXVIFS
, mrt
->maxvif
),
2507 err
= ipmr_fill_mroute(mrt
, skb
, 0, 0, mfc
, cmd
, 0);
2511 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV4_MROUTE
, NULL
, GFP_ATOMIC
);
2517 rtnl_set_sk_err(net
, RTNLGRP_IPV4_MROUTE
, err
);
2520 static size_t igmpmsg_netlink_msgsize(size_t payloadlen
)
2523 NLMSG_ALIGN(sizeof(struct rtgenmsg
))
2524 + nla_total_size(1) /* IPMRA_CREPORT_MSGTYPE */
2525 + nla_total_size(4) /* IPMRA_CREPORT_VIF_ID */
2526 + nla_total_size(4) /* IPMRA_CREPORT_SRC_ADDR */
2527 + nla_total_size(4) /* IPMRA_CREPORT_DST_ADDR */
2528 /* IPMRA_CREPORT_PKT */
2529 + nla_total_size(payloadlen
)
2535 static void igmpmsg_netlink_event(struct mr_table
*mrt
, struct sk_buff
*pkt
)
2537 struct net
*net
= read_pnet(&mrt
->net
);
2538 struct nlmsghdr
*nlh
;
2539 struct rtgenmsg
*rtgenm
;
2540 struct igmpmsg
*msg
;
2541 struct sk_buff
*skb
;
2545 payloadlen
= pkt
->len
- sizeof(struct igmpmsg
);
2546 msg
= (struct igmpmsg
*)skb_network_header(pkt
);
2548 skb
= nlmsg_new(igmpmsg_netlink_msgsize(payloadlen
), GFP_ATOMIC
);
2552 nlh
= nlmsg_put(skb
, 0, 0, RTM_NEWCACHEREPORT
,
2553 sizeof(struct rtgenmsg
), 0);
2556 rtgenm
= nlmsg_data(nlh
);
2557 rtgenm
->rtgen_family
= RTNL_FAMILY_IPMR
;
2558 if (nla_put_u8(skb
, IPMRA_CREPORT_MSGTYPE
, msg
->im_msgtype
) ||
2559 nla_put_u32(skb
, IPMRA_CREPORT_VIF_ID
, msg
->im_vif
) ||
2560 nla_put_in_addr(skb
, IPMRA_CREPORT_SRC_ADDR
,
2561 msg
->im_src
.s_addr
) ||
2562 nla_put_in_addr(skb
, IPMRA_CREPORT_DST_ADDR
,
2563 msg
->im_dst
.s_addr
))
2564 goto nla_put_failure
;
2566 nla
= nla_reserve(skb
, IPMRA_CREPORT_PKT
, payloadlen
);
2567 if (!nla
|| skb_copy_bits(pkt
, sizeof(struct igmpmsg
),
2568 nla_data(nla
), payloadlen
))
2569 goto nla_put_failure
;
2571 nlmsg_end(skb
, nlh
);
2573 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV4_MROUTE_R
, NULL
, GFP_ATOMIC
);
2577 nlmsg_cancel(skb
, nlh
);
2580 rtnl_set_sk_err(net
, RTNLGRP_IPV4_MROUTE_R
, -ENOBUFS
);
2583 static int ipmr_rtm_getroute(struct sk_buff
*in_skb
, struct nlmsghdr
*nlh
,
2584 struct netlink_ext_ack
*extack
)
2586 struct net
*net
= sock_net(in_skb
->sk
);
2587 struct nlattr
*tb
[RTA_MAX
+ 1];
2588 struct sk_buff
*skb
= NULL
;
2589 struct mfc_cache
*cache
;
2590 struct mr_table
*mrt
;
2596 err
= nlmsg_parse(nlh
, sizeof(*rtm
), tb
, RTA_MAX
,
2597 rtm_ipv4_policy
, extack
);
2601 rtm
= nlmsg_data(nlh
);
2603 src
= tb
[RTA_SRC
] ? nla_get_in_addr(tb
[RTA_SRC
]) : 0;
2604 grp
= tb
[RTA_DST
] ? nla_get_in_addr(tb
[RTA_DST
]) : 0;
2605 tableid
= tb
[RTA_TABLE
] ? nla_get_u32(tb
[RTA_TABLE
]) : 0;
2607 mrt
= ipmr_get_table(net
, tableid
? tableid
: RT_TABLE_DEFAULT
);
2613 /* entries are added/deleted only under RTNL */
2615 cache
= ipmr_cache_find(mrt
, src
, grp
);
2622 skb
= nlmsg_new(mroute_msgsize(false, mrt
->maxvif
), GFP_KERNEL
);
2628 err
= ipmr_fill_mroute(mrt
, skb
, NETLINK_CB(in_skb
).portid
,
2629 nlh
->nlmsg_seq
, cache
,
2634 err
= rtnl_unicast(skb
, net
, NETLINK_CB(in_skb
).portid
);
2644 static int ipmr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2646 struct net
*net
= sock_net(skb
->sk
);
2647 struct mr_table
*mrt
;
2648 struct mfc_cache
*mfc
;
2649 unsigned int t
= 0, s_t
;
2650 unsigned int e
= 0, s_e
;
2656 ipmr_for_each_table(mrt
, net
) {
2659 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
) {
2662 if (ipmr_fill_mroute(mrt
, skb
,
2663 NETLINK_CB(cb
->skb
).portid
,
2674 spin_lock_bh(&mfc_unres_lock
);
2675 list_for_each_entry(mfc
, &mrt
->mfc_unres_queue
, list
) {
2678 if (ipmr_fill_mroute(mrt
, skb
,
2679 NETLINK_CB(cb
->skb
).portid
,
2683 spin_unlock_bh(&mfc_unres_lock
);
2689 spin_unlock_bh(&mfc_unres_lock
);
2704 static const struct nla_policy rtm_ipmr_policy
[RTA_MAX
+ 1] = {
2705 [RTA_SRC
] = { .type
= NLA_U32
},
2706 [RTA_DST
] = { .type
= NLA_U32
},
2707 [RTA_IIF
] = { .type
= NLA_U32
},
2708 [RTA_TABLE
] = { .type
= NLA_U32
},
2709 [RTA_MULTIPATH
] = { .len
= sizeof(struct rtnexthop
) },
2712 static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol
)
2714 switch (rtm_protocol
) {
2716 case RTPROT_MROUTED
:
2722 static int ipmr_nla_get_ttls(const struct nlattr
*nla
, struct mfcctl
*mfcc
)
2724 struct rtnexthop
*rtnh
= nla_data(nla
);
2725 int remaining
= nla_len(nla
), vifi
= 0;
2727 while (rtnh_ok(rtnh
, remaining
)) {
2728 mfcc
->mfcc_ttls
[vifi
] = rtnh
->rtnh_hops
;
2729 if (++vifi
== MAXVIFS
)
2731 rtnh
= rtnh_next(rtnh
, &remaining
);
2734 return remaining
> 0 ? -EINVAL
: vifi
;
2737 /* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2738 static int rtm_to_ipmr_mfcc(struct net
*net
, struct nlmsghdr
*nlh
,
2739 struct mfcctl
*mfcc
, int *mrtsock
,
2740 struct mr_table
**mrtret
,
2741 struct netlink_ext_ack
*extack
)
2743 struct net_device
*dev
= NULL
;
2744 u32 tblid
= RT_TABLE_DEFAULT
;
2745 struct mr_table
*mrt
;
2746 struct nlattr
*attr
;
2750 ret
= nlmsg_validate(nlh
, sizeof(*rtm
), RTA_MAX
, rtm_ipmr_policy
,
2754 rtm
= nlmsg_data(nlh
);
2757 if (rtm
->rtm_family
!= RTNL_FAMILY_IPMR
|| rtm
->rtm_dst_len
!= 32 ||
2758 rtm
->rtm_type
!= RTN_MULTICAST
||
2759 rtm
->rtm_scope
!= RT_SCOPE_UNIVERSE
||
2760 !ipmr_rtm_validate_proto(rtm
->rtm_protocol
))
2763 memset(mfcc
, 0, sizeof(*mfcc
));
2764 mfcc
->mfcc_parent
= -1;
2766 nlmsg_for_each_attr(attr
, nlh
, sizeof(struct rtmsg
), rem
) {
2767 switch (nla_type(attr
)) {
2769 mfcc
->mfcc_origin
.s_addr
= nla_get_be32(attr
);
2772 mfcc
->mfcc_mcastgrp
.s_addr
= nla_get_be32(attr
);
2775 dev
= __dev_get_by_index(net
, nla_get_u32(attr
));
2782 if (ipmr_nla_get_ttls(attr
, mfcc
) < 0) {
2791 tblid
= nla_get_u32(attr
);
2795 mrt
= ipmr_get_table(net
, tblid
);
2801 *mrtsock
= rtm
->rtm_protocol
== RTPROT_MROUTED
? 1 : 0;
2803 mfcc
->mfcc_parent
= ipmr_find_vif(mrt
, dev
);
2809 /* takes care of both newroute and delroute */
2810 static int ipmr_rtm_route(struct sk_buff
*skb
, struct nlmsghdr
*nlh
,
2811 struct netlink_ext_ack
*extack
)
2813 struct net
*net
= sock_net(skb
->sk
);
2814 int ret
, mrtsock
, parent
;
2815 struct mr_table
*tbl
;
2820 ret
= rtm_to_ipmr_mfcc(net
, nlh
, &mfcc
, &mrtsock
, &tbl
, extack
);
2824 parent
= ret
? mfcc
.mfcc_parent
: -1;
2825 if (nlh
->nlmsg_type
== RTM_NEWROUTE
)
2826 return ipmr_mfc_add(net
, tbl
, &mfcc
, mrtsock
, parent
);
2828 return ipmr_mfc_delete(tbl
, &mfcc
, parent
);
2831 static bool ipmr_fill_table(struct mr_table
*mrt
, struct sk_buff
*skb
)
2833 u32 queue_len
= atomic_read(&mrt
->cache_resolve_queue_len
);
2835 if (nla_put_u32(skb
, IPMRA_TABLE_ID
, mrt
->id
) ||
2836 nla_put_u32(skb
, IPMRA_TABLE_CACHE_RES_QUEUE_LEN
, queue_len
) ||
2837 nla_put_s32(skb
, IPMRA_TABLE_MROUTE_REG_VIF_NUM
,
2838 mrt
->mroute_reg_vif_num
) ||
2839 nla_put_u8(skb
, IPMRA_TABLE_MROUTE_DO_ASSERT
,
2840 mrt
->mroute_do_assert
) ||
2841 nla_put_u8(skb
, IPMRA_TABLE_MROUTE_DO_PIM
, mrt
->mroute_do_pim
))
2847 static bool ipmr_fill_vif(struct mr_table
*mrt
, u32 vifid
, struct sk_buff
*skb
)
2849 struct nlattr
*vif_nest
;
2850 struct vif_device
*vif
;
2852 /* if the VIF doesn't exist just continue */
2853 if (!VIF_EXISTS(mrt
, vifid
))
2856 vif
= &mrt
->vif_table
[vifid
];
2857 vif_nest
= nla_nest_start(skb
, IPMRA_VIF
);
2860 if (nla_put_u32(skb
, IPMRA_VIFA_IFINDEX
, vif
->dev
->ifindex
) ||
2861 nla_put_u32(skb
, IPMRA_VIFA_VIF_ID
, vifid
) ||
2862 nla_put_u16(skb
, IPMRA_VIFA_FLAGS
, vif
->flags
) ||
2863 nla_put_u64_64bit(skb
, IPMRA_VIFA_BYTES_IN
, vif
->bytes_in
,
2865 nla_put_u64_64bit(skb
, IPMRA_VIFA_BYTES_OUT
, vif
->bytes_out
,
2867 nla_put_u64_64bit(skb
, IPMRA_VIFA_PACKETS_IN
, vif
->pkt_in
,
2869 nla_put_u64_64bit(skb
, IPMRA_VIFA_PACKETS_OUT
, vif
->pkt_out
,
2871 nla_put_be32(skb
, IPMRA_VIFA_LOCAL_ADDR
, vif
->local
) ||
2872 nla_put_be32(skb
, IPMRA_VIFA_REMOTE_ADDR
, vif
->remote
)) {
2873 nla_nest_cancel(skb
, vif_nest
);
2876 nla_nest_end(skb
, vif_nest
);
2881 static int ipmr_rtm_dumplink(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2883 struct net
*net
= sock_net(skb
->sk
);
2884 struct nlmsghdr
*nlh
= NULL
;
2885 unsigned int t
= 0, s_t
;
2886 unsigned int e
= 0, s_e
;
2887 struct mr_table
*mrt
;
2892 ipmr_for_each_table(mrt
, net
) {
2893 struct nlattr
*vifs
, *af
;
2894 struct ifinfomsg
*hdr
;
2899 nlh
= nlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
2900 cb
->nlh
->nlmsg_seq
, RTM_NEWLINK
,
2901 sizeof(*hdr
), NLM_F_MULTI
);
2905 hdr
= nlmsg_data(nlh
);
2906 memset(hdr
, 0, sizeof(*hdr
));
2907 hdr
->ifi_family
= RTNL_FAMILY_IPMR
;
2909 af
= nla_nest_start(skb
, IFLA_AF_SPEC
);
2911 nlmsg_cancel(skb
, nlh
);
2915 if (!ipmr_fill_table(mrt
, skb
)) {
2916 nlmsg_cancel(skb
, nlh
);
2920 vifs
= nla_nest_start(skb
, IPMRA_TABLE_VIFS
);
2922 nla_nest_end(skb
, af
);
2923 nlmsg_end(skb
, nlh
);
2926 for (i
= 0; i
< mrt
->maxvif
; i
++) {
2929 if (!ipmr_fill_vif(mrt
, i
, skb
)) {
2930 nla_nest_end(skb
, vifs
);
2931 nla_nest_end(skb
, af
);
2932 nlmsg_end(skb
, nlh
);
2940 nla_nest_end(skb
, vifs
);
2941 nla_nest_end(skb
, af
);
2942 nlmsg_end(skb
, nlh
);
2954 #ifdef CONFIG_PROC_FS
2955 /* The /proc interfaces to multicast routing :
2956 * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2958 struct ipmr_vif_iter
{
2959 struct seq_net_private p
;
2960 struct mr_table
*mrt
;
2964 static struct vif_device
*ipmr_vif_seq_idx(struct net
*net
,
2965 struct ipmr_vif_iter
*iter
,
2968 struct mr_table
*mrt
= iter
->mrt
;
2970 for (iter
->ct
= 0; iter
->ct
< mrt
->maxvif
; ++iter
->ct
) {
2971 if (!VIF_EXISTS(mrt
, iter
->ct
))
2974 return &mrt
->vif_table
[iter
->ct
];
2979 static void *ipmr_vif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
2980 __acquires(mrt_lock
)
2982 struct ipmr_vif_iter
*iter
= seq
->private;
2983 struct net
*net
= seq_file_net(seq
);
2984 struct mr_table
*mrt
;
2986 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
2988 return ERR_PTR(-ENOENT
);
2992 read_lock(&mrt_lock
);
2993 return *pos
? ipmr_vif_seq_idx(net
, seq
->private, *pos
- 1)
2997 static void *ipmr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2999 struct ipmr_vif_iter
*iter
= seq
->private;
3000 struct net
*net
= seq_file_net(seq
);
3001 struct mr_table
*mrt
= iter
->mrt
;
3004 if (v
== SEQ_START_TOKEN
)
3005 return ipmr_vif_seq_idx(net
, iter
, 0);
3007 while (++iter
->ct
< mrt
->maxvif
) {
3008 if (!VIF_EXISTS(mrt
, iter
->ct
))
3010 return &mrt
->vif_table
[iter
->ct
];
3015 static void ipmr_vif_seq_stop(struct seq_file
*seq
, void *v
)
3016 __releases(mrt_lock
)
3018 read_unlock(&mrt_lock
);
3021 static int ipmr_vif_seq_show(struct seq_file
*seq
, void *v
)
3023 struct ipmr_vif_iter
*iter
= seq
->private;
3024 struct mr_table
*mrt
= iter
->mrt
;
3026 if (v
== SEQ_START_TOKEN
) {
3028 "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
3030 const struct vif_device
*vif
= v
;
3031 const char *name
= vif
->dev
? vif
->dev
->name
: "none";
3034 "%2zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
3035 vif
- mrt
->vif_table
,
3036 name
, vif
->bytes_in
, vif
->pkt_in
,
3037 vif
->bytes_out
, vif
->pkt_out
,
3038 vif
->flags
, vif
->local
, vif
->remote
);
3043 static const struct seq_operations ipmr_vif_seq_ops
= {
3044 .start
= ipmr_vif_seq_start
,
3045 .next
= ipmr_vif_seq_next
,
3046 .stop
= ipmr_vif_seq_stop
,
3047 .show
= ipmr_vif_seq_show
,
3050 static int ipmr_vif_open(struct inode
*inode
, struct file
*file
)
3052 return seq_open_net(inode
, file
, &ipmr_vif_seq_ops
,
3053 sizeof(struct ipmr_vif_iter
));
3056 static const struct file_operations ipmr_vif_fops
= {
3057 .owner
= THIS_MODULE
,
3058 .open
= ipmr_vif_open
,
3060 .llseek
= seq_lseek
,
3061 .release
= seq_release_net
,
3064 struct ipmr_mfc_iter
{
3065 struct seq_net_private p
;
3066 struct mr_table
*mrt
;
3067 struct list_head
*cache
;
3070 static struct mfc_cache
*ipmr_mfc_seq_idx(struct net
*net
,
3071 struct ipmr_mfc_iter
*it
, loff_t pos
)
3073 struct mr_table
*mrt
= it
->mrt
;
3074 struct mfc_cache
*mfc
;
3077 it
->cache
= &mrt
->mfc_cache_list
;
3078 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
)
3083 spin_lock_bh(&mfc_unres_lock
);
3084 it
->cache
= &mrt
->mfc_unres_queue
;
3085 list_for_each_entry(mfc
, it
->cache
, list
)
3088 spin_unlock_bh(&mfc_unres_lock
);
3095 static void *ipmr_mfc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
3097 struct ipmr_mfc_iter
*it
= seq
->private;
3098 struct net
*net
= seq_file_net(seq
);
3099 struct mr_table
*mrt
;
3101 mrt
= ipmr_get_table(net
, RT_TABLE_DEFAULT
);
3103 return ERR_PTR(-ENOENT
);
3107 return *pos
? ipmr_mfc_seq_idx(net
, seq
->private, *pos
- 1)
3111 static void *ipmr_mfc_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
3113 struct ipmr_mfc_iter
*it
= seq
->private;
3114 struct net
*net
= seq_file_net(seq
);
3115 struct mr_table
*mrt
= it
->mrt
;
3116 struct mfc_cache
*mfc
= v
;
3120 if (v
== SEQ_START_TOKEN
)
3121 return ipmr_mfc_seq_idx(net
, seq
->private, 0);
3123 if (mfc
->list
.next
!= it
->cache
)
3124 return list_entry(mfc
->list
.next
, struct mfc_cache
, list
);
3126 if (it
->cache
== &mrt
->mfc_unres_queue
)
3129 /* exhausted cache_array, show unresolved */
3131 it
->cache
= &mrt
->mfc_unres_queue
;
3133 spin_lock_bh(&mfc_unres_lock
);
3134 if (!list_empty(it
->cache
))
3135 return list_first_entry(it
->cache
, struct mfc_cache
, list
);
3138 spin_unlock_bh(&mfc_unres_lock
);
3144 static void ipmr_mfc_seq_stop(struct seq_file
*seq
, void *v
)
3146 struct ipmr_mfc_iter
*it
= seq
->private;
3147 struct mr_table
*mrt
= it
->mrt
;
3149 if (it
->cache
== &mrt
->mfc_unres_queue
)
3150 spin_unlock_bh(&mfc_unres_lock
);
3151 else if (it
->cache
== &mrt
->mfc_cache_list
)
3155 static int ipmr_mfc_seq_show(struct seq_file
*seq
, void *v
)
3159 if (v
== SEQ_START_TOKEN
) {
3161 "Group Origin Iif Pkts Bytes Wrong Oifs\n");
3163 const struct mfc_cache
*mfc
= v
;
3164 const struct ipmr_mfc_iter
*it
= seq
->private;
3165 const struct mr_table
*mrt
= it
->mrt
;
3167 seq_printf(seq
, "%08X %08X %-3hd",
3168 (__force u32
) mfc
->mfc_mcastgrp
,
3169 (__force u32
) mfc
->mfc_origin
,
3172 if (it
->cache
!= &mrt
->mfc_unres_queue
) {
3173 seq_printf(seq
, " %8lu %8lu %8lu",
3174 mfc
->mfc_un
.res
.pkt
,
3175 mfc
->mfc_un
.res
.bytes
,
3176 mfc
->mfc_un
.res
.wrong_if
);
3177 for (n
= mfc
->mfc_un
.res
.minvif
;
3178 n
< mfc
->mfc_un
.res
.maxvif
; n
++) {
3179 if (VIF_EXISTS(mrt
, n
) &&
3180 mfc
->mfc_un
.res
.ttls
[n
] < 255)
3183 n
, mfc
->mfc_un
.res
.ttls
[n
]);
3186 /* unresolved mfc_caches don't contain
3187 * pkt, bytes and wrong_if values
3189 seq_printf(seq
, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
3191 seq_putc(seq
, '\n');
3196 static const struct seq_operations ipmr_mfc_seq_ops
= {
3197 .start
= ipmr_mfc_seq_start
,
3198 .next
= ipmr_mfc_seq_next
,
3199 .stop
= ipmr_mfc_seq_stop
,
3200 .show
= ipmr_mfc_seq_show
,
3203 static int ipmr_mfc_open(struct inode
*inode
, struct file
*file
)
3205 return seq_open_net(inode
, file
, &ipmr_mfc_seq_ops
,
3206 sizeof(struct ipmr_mfc_iter
));
3209 static const struct file_operations ipmr_mfc_fops
= {
3210 .owner
= THIS_MODULE
,
3211 .open
= ipmr_mfc_open
,
3213 .llseek
= seq_lseek
,
3214 .release
= seq_release_net
,
3218 #ifdef CONFIG_IP_PIMSM_V2
3219 static const struct net_protocol pim_protocol
= {
3225 static unsigned int ipmr_seq_read(struct net
*net
)
3229 return net
->ipv4
.ipmr_seq
+ ipmr_rules_seq_read(net
);
3232 static int ipmr_dump(struct net
*net
, struct notifier_block
*nb
)
3234 struct mr_table
*mrt
;
3237 err
= ipmr_rules_dump(net
, nb
);
3241 ipmr_for_each_table(mrt
, net
) {
3242 struct vif_device
*v
= &mrt
->vif_table
[0];
3243 struct mfc_cache
*mfc
;
3246 /* Notifiy on table VIF entries */
3247 read_lock(&mrt_lock
);
3248 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++, v
++) {
3252 call_ipmr_vif_entry_notifier(nb
, net
, FIB_EVENT_VIF_ADD
,
3255 read_unlock(&mrt_lock
);
3257 /* Notify on table MFC entries */
3258 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
)
3259 call_ipmr_mfc_entry_notifier(nb
, net
,
3260 FIB_EVENT_ENTRY_ADD
, mfc
,
3267 static const struct fib_notifier_ops ipmr_notifier_ops_template
= {
3268 .family
= RTNL_FAMILY_IPMR
,
3269 .fib_seq_read
= ipmr_seq_read
,
3270 .fib_dump
= ipmr_dump
,
3271 .owner
= THIS_MODULE
,
3274 static int __net_init
ipmr_notifier_init(struct net
*net
)
3276 struct fib_notifier_ops
*ops
;
3278 net
->ipv4
.ipmr_seq
= 0;
3280 ops
= fib_notifier_ops_register(&ipmr_notifier_ops_template
, net
);
3282 return PTR_ERR(ops
);
3283 net
->ipv4
.ipmr_notifier_ops
= ops
;
3288 static void __net_exit
ipmr_notifier_exit(struct net
*net
)
3290 fib_notifier_ops_unregister(net
->ipv4
.ipmr_notifier_ops
);
3291 net
->ipv4
.ipmr_notifier_ops
= NULL
;
3294 /* Setup for IP multicast routing */
3295 static int __net_init
ipmr_net_init(struct net
*net
)
3299 err
= ipmr_notifier_init(net
);
3301 goto ipmr_notifier_fail
;
3303 err
= ipmr_rules_init(net
);
3305 goto ipmr_rules_fail
;
3307 #ifdef CONFIG_PROC_FS
3309 if (!proc_create("ip_mr_vif", 0, net
->proc_net
, &ipmr_vif_fops
))
3311 if (!proc_create("ip_mr_cache", 0, net
->proc_net
, &ipmr_mfc_fops
))
3312 goto proc_cache_fail
;
3316 #ifdef CONFIG_PROC_FS
3318 remove_proc_entry("ip_mr_vif", net
->proc_net
);
3320 ipmr_rules_exit(net
);
3323 ipmr_notifier_exit(net
);
3328 static void __net_exit
ipmr_net_exit(struct net
*net
)
3330 #ifdef CONFIG_PROC_FS
3331 remove_proc_entry("ip_mr_cache", net
->proc_net
);
3332 remove_proc_entry("ip_mr_vif", net
->proc_net
);
3334 ipmr_notifier_exit(net
);
3335 ipmr_rules_exit(net
);
3338 static struct pernet_operations ipmr_net_ops
= {
3339 .init
= ipmr_net_init
,
3340 .exit
= ipmr_net_exit
,
3343 int __init
ip_mr_init(void)
3347 mrt_cachep
= kmem_cache_create("ip_mrt_cache",
3348 sizeof(struct mfc_cache
),
3349 0, SLAB_HWCACHE_ALIGN
| SLAB_PANIC
,
3352 err
= register_pernet_subsys(&ipmr_net_ops
);
3354 goto reg_pernet_fail
;
3356 err
= register_netdevice_notifier(&ip_mr_notifier
);
3358 goto reg_notif_fail
;
3359 #ifdef CONFIG_IP_PIMSM_V2
3360 if (inet_add_protocol(&pim_protocol
, IPPROTO_PIM
) < 0) {
3361 pr_err("%s: can't add PIM protocol\n", __func__
);
3363 goto add_proto_fail
;
3366 rtnl_register(RTNL_FAMILY_IPMR
, RTM_GETROUTE
,
3367 ipmr_rtm_getroute
, ipmr_rtm_dumproute
, 0);
3368 rtnl_register(RTNL_FAMILY_IPMR
, RTM_NEWROUTE
,
3369 ipmr_rtm_route
, NULL
, 0);
3370 rtnl_register(RTNL_FAMILY_IPMR
, RTM_DELROUTE
,
3371 ipmr_rtm_route
, NULL
, 0);
3373 rtnl_register(RTNL_FAMILY_IPMR
, RTM_GETLINK
,
3374 NULL
, ipmr_rtm_dumplink
, 0);
3377 #ifdef CONFIG_IP_PIMSM_V2
3379 unregister_netdevice_notifier(&ip_mr_notifier
);
3382 unregister_pernet_subsys(&ipmr_net_ops
);
3384 kmem_cache_destroy(mrt_cachep
);