2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <linux/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
23 #include <linux/timer.h>
25 #include <linux/kernel.h>
26 #include <linux/fcntl.h>
27 #include <linux/stat.h>
28 #include <linux/socket.h>
29 #include <linux/inet.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/compat.h>
37 #include <net/protocol.h>
38 #include <linux/skbuff.h>
41 #include <linux/notifier.h>
42 #include <linux/if_arp.h>
43 #include <net/checksum.h>
44 #include <net/netlink.h>
45 #include <net/fib_rules.h>
48 #include <net/ip6_route.h>
49 #include <linux/mroute6.h>
50 #include <linux/pim.h>
51 #include <net/addrconf.h>
52 #include <linux/netfilter_ipv6.h>
53 #include <linux/export.h>
54 #include <net/ip6_checksum.h>
55 #include <linux/netconf.h>
56 #include <linux/nospec.h>
59 struct list_head list
;
62 struct sock
*mroute6_sk
;
63 struct timer_list ipmr_expire_timer
;
64 struct list_head mfc6_unres_queue
;
65 struct list_head mfc6_cache_array
[MFC6_LINES
];
66 struct mif_device vif6_table
[MAXMIFS
];
68 atomic_t cache_resolve_queue_len
;
69 bool mroute_do_assert
;
71 #ifdef CONFIG_IPV6_PIMSM_V2
72 int mroute_reg_vif_num
;
77 struct fib_rule common
;
81 struct mr6_table
*mrt
;
84 /* Big lock, protecting vif table, mrt cache and mroute socket state.
85 Note that the changes are semaphored via rtnl_lock.
88 static DEFINE_RWLOCK(mrt_lock
);
91 * Multicast router control variables
94 #define MIF_EXISTS(_mrt, _idx) ((_mrt)->vif6_table[_idx].dev != NULL)
96 /* Special spinlock for queue of unresolved entries */
97 static DEFINE_SPINLOCK(mfc_unres_lock
);
99 /* We return to original Alan's scheme. Hash table of resolved
100 entries is changed only in process context and protected
101 with weak lock mrt_lock. Queue of unresolved entries is protected
102 with strong spinlock mfc_unres_lock.
104 In this case data path is free of exclusive locks at all.
107 static struct kmem_cache
*mrt_cachep __read_mostly
;
109 static struct mr6_table
*ip6mr_new_table(struct net
*net
, u32 id
);
110 static void ip6mr_free_table(struct mr6_table
*mrt
);
112 static void ip6_mr_forward(struct net
*net
, struct mr6_table
*mrt
,
113 struct sk_buff
*skb
, struct mfc6_cache
*cache
);
114 static int ip6mr_cache_report(struct mr6_table
*mrt
, struct sk_buff
*pkt
,
115 mifi_t mifi
, int assert);
116 static int __ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
117 struct mfc6_cache
*c
, struct rtmsg
*rtm
);
118 static void mr6_netlink_event(struct mr6_table
*mrt
, struct mfc6_cache
*mfc
,
120 static void mrt6msg_netlink_event(struct mr6_table
*mrt
, struct sk_buff
*pkt
);
121 static int ip6mr_rtm_dumproute(struct sk_buff
*skb
,
122 struct netlink_callback
*cb
);
123 static void mroute_clean_tables(struct mr6_table
*mrt
, bool all
);
124 static void ipmr_expire_process(struct timer_list
*t
);
126 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
127 #define ip6mr_for_each_table(mrt, net) \
128 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
130 static struct mr6_table
*ip6mr_get_table(struct net
*net
, u32 id
)
132 struct mr6_table
*mrt
;
134 ip6mr_for_each_table(mrt
, net
) {
141 static int ip6mr_fib_lookup(struct net
*net
, struct flowi6
*flp6
,
142 struct mr6_table
**mrt
)
145 struct ip6mr_result res
;
146 struct fib_lookup_arg arg
= {
148 .flags
= FIB_LOOKUP_NOREF
,
151 err
= fib_rules_lookup(net
->ipv6
.mr6_rules_ops
,
152 flowi6_to_flowi(flp6
), 0, &arg
);
159 static int ip6mr_rule_action(struct fib_rule
*rule
, struct flowi
*flp
,
160 int flags
, struct fib_lookup_arg
*arg
)
162 struct ip6mr_result
*res
= arg
->result
;
163 struct mr6_table
*mrt
;
165 switch (rule
->action
) {
168 case FR_ACT_UNREACHABLE
:
170 case FR_ACT_PROHIBIT
:
172 case FR_ACT_BLACKHOLE
:
177 mrt
= ip6mr_get_table(rule
->fr_net
, rule
->table
);
184 static int ip6mr_rule_match(struct fib_rule
*rule
, struct flowi
*flp
, int flags
)
189 static const struct nla_policy ip6mr_rule_policy
[FRA_MAX
+ 1] = {
193 static int ip6mr_rule_configure(struct fib_rule
*rule
, struct sk_buff
*skb
,
194 struct fib_rule_hdr
*frh
, struct nlattr
**tb
)
199 static int ip6mr_rule_compare(struct fib_rule
*rule
, struct fib_rule_hdr
*frh
,
205 static int ip6mr_rule_fill(struct fib_rule
*rule
, struct sk_buff
*skb
,
206 struct fib_rule_hdr
*frh
)
214 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template
= {
215 .family
= RTNL_FAMILY_IP6MR
,
216 .rule_size
= sizeof(struct ip6mr_rule
),
217 .addr_size
= sizeof(struct in6_addr
),
218 .action
= ip6mr_rule_action
,
219 .match
= ip6mr_rule_match
,
220 .configure
= ip6mr_rule_configure
,
221 .compare
= ip6mr_rule_compare
,
222 .fill
= ip6mr_rule_fill
,
223 .nlgroup
= RTNLGRP_IPV6_RULE
,
224 .policy
= ip6mr_rule_policy
,
225 .owner
= THIS_MODULE
,
228 static int __net_init
ip6mr_rules_init(struct net
*net
)
230 struct fib_rules_ops
*ops
;
231 struct mr6_table
*mrt
;
234 ops
= fib_rules_register(&ip6mr_rules_ops_template
, net
);
238 INIT_LIST_HEAD(&net
->ipv6
.mr6_tables
);
240 mrt
= ip6mr_new_table(net
, RT6_TABLE_DFLT
);
246 err
= fib_default_rule_add(ops
, 0x7fff, RT6_TABLE_DFLT
, 0);
250 net
->ipv6
.mr6_rules_ops
= ops
;
254 ip6mr_free_table(mrt
);
256 fib_rules_unregister(ops
);
260 static void __net_exit
ip6mr_rules_exit(struct net
*net
)
262 struct mr6_table
*mrt
, *next
;
265 list_for_each_entry_safe(mrt
, next
, &net
->ipv6
.mr6_tables
, list
) {
266 list_del(&mrt
->list
);
267 ip6mr_free_table(mrt
);
269 fib_rules_unregister(net
->ipv6
.mr6_rules_ops
);
273 #define ip6mr_for_each_table(mrt, net) \
274 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
276 static struct mr6_table
*ip6mr_get_table(struct net
*net
, u32 id
)
278 return net
->ipv6
.mrt6
;
281 static int ip6mr_fib_lookup(struct net
*net
, struct flowi6
*flp6
,
282 struct mr6_table
**mrt
)
284 *mrt
= net
->ipv6
.mrt6
;
288 static int __net_init
ip6mr_rules_init(struct net
*net
)
290 net
->ipv6
.mrt6
= ip6mr_new_table(net
, RT6_TABLE_DFLT
);
291 return net
->ipv6
.mrt6
? 0 : -ENOMEM
;
294 static void __net_exit
ip6mr_rules_exit(struct net
*net
)
297 ip6mr_free_table(net
->ipv6
.mrt6
);
298 net
->ipv6
.mrt6
= NULL
;
303 static struct mr6_table
*ip6mr_new_table(struct net
*net
, u32 id
)
305 struct mr6_table
*mrt
;
308 mrt
= ip6mr_get_table(net
, id
);
312 mrt
= kzalloc(sizeof(*mrt
), GFP_KERNEL
);
316 write_pnet(&mrt
->net
, net
);
318 /* Forwarding cache */
319 for (i
= 0; i
< MFC6_LINES
; i
++)
320 INIT_LIST_HEAD(&mrt
->mfc6_cache_array
[i
]);
322 INIT_LIST_HEAD(&mrt
->mfc6_unres_queue
);
324 timer_setup(&mrt
->ipmr_expire_timer
, ipmr_expire_process
, 0);
326 #ifdef CONFIG_IPV6_PIMSM_V2
327 mrt
->mroute_reg_vif_num
= -1;
329 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
330 list_add_tail_rcu(&mrt
->list
, &net
->ipv6
.mr6_tables
);
335 static void ip6mr_free_table(struct mr6_table
*mrt
)
337 del_timer_sync(&mrt
->ipmr_expire_timer
);
338 mroute_clean_tables(mrt
, true);
342 #ifdef CONFIG_PROC_FS
344 struct ipmr_mfc_iter
{
345 struct seq_net_private p
;
346 struct mr6_table
*mrt
;
347 struct list_head
*cache
;
352 static struct mfc6_cache
*ipmr_mfc_seq_idx(struct net
*net
,
353 struct ipmr_mfc_iter
*it
, loff_t pos
)
355 struct mr6_table
*mrt
= it
->mrt
;
356 struct mfc6_cache
*mfc
;
358 read_lock(&mrt_lock
);
359 for (it
->ct
= 0; it
->ct
< MFC6_LINES
; it
->ct
++) {
360 it
->cache
= &mrt
->mfc6_cache_array
[it
->ct
];
361 list_for_each_entry(mfc
, it
->cache
, list
)
365 read_unlock(&mrt_lock
);
367 spin_lock_bh(&mfc_unres_lock
);
368 it
->cache
= &mrt
->mfc6_unres_queue
;
369 list_for_each_entry(mfc
, it
->cache
, list
)
372 spin_unlock_bh(&mfc_unres_lock
);
379 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
382 struct ipmr_vif_iter
{
383 struct seq_net_private p
;
384 struct mr6_table
*mrt
;
388 static struct mif_device
*ip6mr_vif_seq_idx(struct net
*net
,
389 struct ipmr_vif_iter
*iter
,
392 struct mr6_table
*mrt
= iter
->mrt
;
394 for (iter
->ct
= 0; iter
->ct
< mrt
->maxvif
; ++iter
->ct
) {
395 if (!MIF_EXISTS(mrt
, iter
->ct
))
398 return &mrt
->vif6_table
[iter
->ct
];
403 static void *ip6mr_vif_seq_start(struct seq_file
*seq
, loff_t
*pos
)
406 struct ipmr_vif_iter
*iter
= seq
->private;
407 struct net
*net
= seq_file_net(seq
);
408 struct mr6_table
*mrt
;
410 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
412 return ERR_PTR(-ENOENT
);
416 read_lock(&mrt_lock
);
417 return *pos
? ip6mr_vif_seq_idx(net
, seq
->private, *pos
- 1)
421 static void *ip6mr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
423 struct ipmr_vif_iter
*iter
= seq
->private;
424 struct net
*net
= seq_file_net(seq
);
425 struct mr6_table
*mrt
= iter
->mrt
;
428 if (v
== SEQ_START_TOKEN
)
429 return ip6mr_vif_seq_idx(net
, iter
, 0);
431 while (++iter
->ct
< mrt
->maxvif
) {
432 if (!MIF_EXISTS(mrt
, iter
->ct
))
434 return &mrt
->vif6_table
[iter
->ct
];
439 static void ip6mr_vif_seq_stop(struct seq_file
*seq
, void *v
)
442 read_unlock(&mrt_lock
);
445 static int ip6mr_vif_seq_show(struct seq_file
*seq
, void *v
)
447 struct ipmr_vif_iter
*iter
= seq
->private;
448 struct mr6_table
*mrt
= iter
->mrt
;
450 if (v
== SEQ_START_TOKEN
) {
452 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
454 const struct mif_device
*vif
= v
;
455 const char *name
= vif
->dev
? vif
->dev
->name
: "none";
458 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
459 vif
- mrt
->vif6_table
,
460 name
, vif
->bytes_in
, vif
->pkt_in
,
461 vif
->bytes_out
, vif
->pkt_out
,
467 static const struct seq_operations ip6mr_vif_seq_ops
= {
468 .start
= ip6mr_vif_seq_start
,
469 .next
= ip6mr_vif_seq_next
,
470 .stop
= ip6mr_vif_seq_stop
,
471 .show
= ip6mr_vif_seq_show
,
474 static int ip6mr_vif_open(struct inode
*inode
, struct file
*file
)
476 return seq_open_net(inode
, file
, &ip6mr_vif_seq_ops
,
477 sizeof(struct ipmr_vif_iter
));
480 static const struct file_operations ip6mr_vif_fops
= {
481 .owner
= THIS_MODULE
,
482 .open
= ip6mr_vif_open
,
485 .release
= seq_release_net
,
488 static void *ipmr_mfc_seq_start(struct seq_file
*seq
, loff_t
*pos
)
490 struct ipmr_mfc_iter
*it
= seq
->private;
491 struct net
*net
= seq_file_net(seq
);
492 struct mr6_table
*mrt
;
494 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
496 return ERR_PTR(-ENOENT
);
500 return *pos
? ipmr_mfc_seq_idx(net
, seq
->private, *pos
- 1)
504 static void *ipmr_mfc_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
506 struct mfc6_cache
*mfc
= v
;
507 struct ipmr_mfc_iter
*it
= seq
->private;
508 struct net
*net
= seq_file_net(seq
);
509 struct mr6_table
*mrt
= it
->mrt
;
513 if (v
== SEQ_START_TOKEN
)
514 return ipmr_mfc_seq_idx(net
, seq
->private, 0);
516 if (mfc
->list
.next
!= it
->cache
)
517 return list_entry(mfc
->list
.next
, struct mfc6_cache
, list
);
519 if (it
->cache
== &mrt
->mfc6_unres_queue
)
522 BUG_ON(it
->cache
!= &mrt
->mfc6_cache_array
[it
->ct
]);
524 while (++it
->ct
< MFC6_LINES
) {
525 it
->cache
= &mrt
->mfc6_cache_array
[it
->ct
];
526 if (list_empty(it
->cache
))
528 return list_first_entry(it
->cache
, struct mfc6_cache
, list
);
531 /* exhausted cache_array, show unresolved */
532 read_unlock(&mrt_lock
);
533 it
->cache
= &mrt
->mfc6_unres_queue
;
536 spin_lock_bh(&mfc_unres_lock
);
537 if (!list_empty(it
->cache
))
538 return list_first_entry(it
->cache
, struct mfc6_cache
, list
);
541 spin_unlock_bh(&mfc_unres_lock
);
547 static void ipmr_mfc_seq_stop(struct seq_file
*seq
, void *v
)
549 struct ipmr_mfc_iter
*it
= seq
->private;
550 struct mr6_table
*mrt
= it
->mrt
;
552 if (it
->cache
== &mrt
->mfc6_unres_queue
)
553 spin_unlock_bh(&mfc_unres_lock
);
554 else if (it
->cache
== &mrt
->mfc6_cache_array
[it
->ct
])
555 read_unlock(&mrt_lock
);
558 static int ipmr_mfc_seq_show(struct seq_file
*seq
, void *v
)
562 if (v
== SEQ_START_TOKEN
) {
566 "Iif Pkts Bytes Wrong Oifs\n");
568 const struct mfc6_cache
*mfc
= v
;
569 const struct ipmr_mfc_iter
*it
= seq
->private;
570 struct mr6_table
*mrt
= it
->mrt
;
572 seq_printf(seq
, "%pI6 %pI6 %-3hd",
573 &mfc
->mf6c_mcastgrp
, &mfc
->mf6c_origin
,
576 if (it
->cache
!= &mrt
->mfc6_unres_queue
) {
577 seq_printf(seq
, " %8lu %8lu %8lu",
579 mfc
->mfc_un
.res
.bytes
,
580 mfc
->mfc_un
.res
.wrong_if
);
581 for (n
= mfc
->mfc_un
.res
.minvif
;
582 n
< mfc
->mfc_un
.res
.maxvif
; n
++) {
583 if (MIF_EXISTS(mrt
, n
) &&
584 mfc
->mfc_un
.res
.ttls
[n
] < 255)
587 n
, mfc
->mfc_un
.res
.ttls
[n
]);
590 /* unresolved mfc_caches don't contain
591 * pkt, bytes and wrong_if values
593 seq_printf(seq
, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
600 static const struct seq_operations ipmr_mfc_seq_ops
= {
601 .start
= ipmr_mfc_seq_start
,
602 .next
= ipmr_mfc_seq_next
,
603 .stop
= ipmr_mfc_seq_stop
,
604 .show
= ipmr_mfc_seq_show
,
607 static int ipmr_mfc_open(struct inode
*inode
, struct file
*file
)
609 return seq_open_net(inode
, file
, &ipmr_mfc_seq_ops
,
610 sizeof(struct ipmr_mfc_iter
));
613 static const struct file_operations ip6mr_mfc_fops
= {
614 .owner
= THIS_MODULE
,
615 .open
= ipmr_mfc_open
,
618 .release
= seq_release_net
,
622 #ifdef CONFIG_IPV6_PIMSM_V2
624 static int pim6_rcv(struct sk_buff
*skb
)
626 struct pimreghdr
*pim
;
627 struct ipv6hdr
*encap
;
628 struct net_device
*reg_dev
= NULL
;
629 struct net
*net
= dev_net(skb
->dev
);
630 struct mr6_table
*mrt
;
631 struct flowi6 fl6
= {
632 .flowi6_iif
= skb
->dev
->ifindex
,
633 .flowi6_mark
= skb
->mark
,
637 if (!pskb_may_pull(skb
, sizeof(*pim
) + sizeof(*encap
)))
640 pim
= (struct pimreghdr
*)skb_transport_header(skb
);
641 if (pim
->type
!= ((PIM_VERSION
<< 4) | PIM_TYPE_REGISTER
) ||
642 (pim
->flags
& PIM_NULL_REGISTER
) ||
643 (csum_ipv6_magic(&ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
,
644 sizeof(*pim
), IPPROTO_PIM
,
645 csum_partial((void *)pim
, sizeof(*pim
), 0)) &&
646 csum_fold(skb_checksum(skb
, 0, skb
->len
, 0))))
649 /* check if the inner packet is destined to mcast group */
650 encap
= (struct ipv6hdr
*)(skb_transport_header(skb
) +
653 if (!ipv6_addr_is_multicast(&encap
->daddr
) ||
654 encap
->payload_len
== 0 ||
655 ntohs(encap
->payload_len
) + sizeof(*pim
) > skb
->len
)
658 if (ip6mr_fib_lookup(net
, &fl6
, &mrt
) < 0)
660 reg_vif_num
= mrt
->mroute_reg_vif_num
;
662 read_lock(&mrt_lock
);
663 if (reg_vif_num
>= 0)
664 reg_dev
= mrt
->vif6_table
[reg_vif_num
].dev
;
667 read_unlock(&mrt_lock
);
672 skb
->mac_header
= skb
->network_header
;
673 skb_pull(skb
, (u8
*)encap
- skb
->data
);
674 skb_reset_network_header(skb
);
675 skb
->protocol
= htons(ETH_P_IPV6
);
676 skb
->ip_summed
= CHECKSUM_NONE
;
678 skb_tunnel_rx(skb
, reg_dev
, dev_net(reg_dev
));
689 static const struct inet6_protocol pim6_protocol
= {
693 /* Service routines creating virtual interfaces: PIMREG */
695 static netdev_tx_t
reg_vif_xmit(struct sk_buff
*skb
,
696 struct net_device
*dev
)
698 struct net
*net
= dev_net(dev
);
699 struct mr6_table
*mrt
;
700 struct flowi6 fl6
= {
701 .flowi6_oif
= dev
->ifindex
,
702 .flowi6_iif
= skb
->skb_iif
? : LOOPBACK_IFINDEX
,
703 .flowi6_mark
= skb
->mark
,
707 err
= ip6mr_fib_lookup(net
, &fl6
, &mrt
);
713 read_lock(&mrt_lock
);
714 dev
->stats
.tx_bytes
+= skb
->len
;
715 dev
->stats
.tx_packets
++;
716 ip6mr_cache_report(mrt
, skb
, mrt
->mroute_reg_vif_num
, MRT6MSG_WHOLEPKT
);
717 read_unlock(&mrt_lock
);
722 static int reg_vif_get_iflink(const struct net_device
*dev
)
727 static const struct net_device_ops reg_vif_netdev_ops
= {
728 .ndo_start_xmit
= reg_vif_xmit
,
729 .ndo_get_iflink
= reg_vif_get_iflink
,
732 static void reg_vif_setup(struct net_device
*dev
)
734 dev
->type
= ARPHRD_PIMREG
;
735 dev
->mtu
= 1500 - sizeof(struct ipv6hdr
) - 8;
736 dev
->flags
= IFF_NOARP
;
737 dev
->netdev_ops
= ®_vif_netdev_ops
;
738 dev
->needs_free_netdev
= true;
739 dev
->features
|= NETIF_F_NETNS_LOCAL
;
742 static struct net_device
*ip6mr_reg_vif(struct net
*net
, struct mr6_table
*mrt
)
744 struct net_device
*dev
;
747 if (mrt
->id
== RT6_TABLE_DFLT
)
748 sprintf(name
, "pim6reg");
750 sprintf(name
, "pim6reg%u", mrt
->id
);
752 dev
= alloc_netdev(0, name
, NET_NAME_UNKNOWN
, reg_vif_setup
);
756 dev_net_set(dev
, net
);
758 if (register_netdevice(dev
)) {
770 unregister_netdevice(dev
);
779 static int mif6_delete(struct mr6_table
*mrt
, int vifi
, int notify
,
780 struct list_head
*head
)
782 struct mif_device
*v
;
783 struct net_device
*dev
;
784 struct inet6_dev
*in6_dev
;
786 if (vifi
< 0 || vifi
>= mrt
->maxvif
)
787 return -EADDRNOTAVAIL
;
789 v
= &mrt
->vif6_table
[vifi
];
791 write_lock_bh(&mrt_lock
);
796 write_unlock_bh(&mrt_lock
);
797 return -EADDRNOTAVAIL
;
800 #ifdef CONFIG_IPV6_PIMSM_V2
801 if (vifi
== mrt
->mroute_reg_vif_num
)
802 mrt
->mroute_reg_vif_num
= -1;
805 if (vifi
+ 1 == mrt
->maxvif
) {
807 for (tmp
= vifi
- 1; tmp
>= 0; tmp
--) {
808 if (MIF_EXISTS(mrt
, tmp
))
811 mrt
->maxvif
= tmp
+ 1;
814 write_unlock_bh(&mrt_lock
);
816 dev_set_allmulti(dev
, -1);
818 in6_dev
= __in6_dev_get(dev
);
820 in6_dev
->cnf
.mc_forwarding
--;
821 inet6_netconf_notify_devconf(dev_net(dev
), RTM_NEWNETCONF
,
822 NETCONFA_MC_FORWARDING
,
823 dev
->ifindex
, &in6_dev
->cnf
);
826 if ((v
->flags
& MIFF_REGISTER
) && !notify
)
827 unregister_netdevice_queue(dev
, head
);
833 static inline void ip6mr_cache_free(struct mfc6_cache
*c
)
835 kmem_cache_free(mrt_cachep
, c
);
838 /* Destroy an unresolved cache entry, killing queued skbs
839 and reporting error to netlink readers.
842 static void ip6mr_destroy_unres(struct mr6_table
*mrt
, struct mfc6_cache
*c
)
844 struct net
*net
= read_pnet(&mrt
->net
);
847 atomic_dec(&mrt
->cache_resolve_queue_len
);
849 while ((skb
= skb_dequeue(&c
->mfc_un
.unres
.unresolved
)) != NULL
) {
850 if (ipv6_hdr(skb
)->version
== 0) {
851 struct nlmsghdr
*nlh
= skb_pull(skb
,
852 sizeof(struct ipv6hdr
));
853 nlh
->nlmsg_type
= NLMSG_ERROR
;
854 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
855 skb_trim(skb
, nlh
->nlmsg_len
);
856 ((struct nlmsgerr
*)nlmsg_data(nlh
))->error
= -ETIMEDOUT
;
857 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
866 /* Timer process for all the unresolved queue. */
868 static void ipmr_do_expire_process(struct mr6_table
*mrt
)
870 unsigned long now
= jiffies
;
871 unsigned long expires
= 10 * HZ
;
872 struct mfc6_cache
*c
, *next
;
874 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_unres_queue
, list
) {
875 if (time_after(c
->mfc_un
.unres
.expires
, now
)) {
877 unsigned long interval
= c
->mfc_un
.unres
.expires
- now
;
878 if (interval
< expires
)
884 mr6_netlink_event(mrt
, c
, RTM_DELROUTE
);
885 ip6mr_destroy_unres(mrt
, c
);
888 if (!list_empty(&mrt
->mfc6_unres_queue
))
889 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ expires
);
892 static void ipmr_expire_process(struct timer_list
*t
)
894 struct mr6_table
*mrt
= from_timer(mrt
, t
, ipmr_expire_timer
);
896 if (!spin_trylock(&mfc_unres_lock
)) {
897 mod_timer(&mrt
->ipmr_expire_timer
, jiffies
+ 1);
901 if (!list_empty(&mrt
->mfc6_unres_queue
))
902 ipmr_do_expire_process(mrt
);
904 spin_unlock(&mfc_unres_lock
);
907 /* Fill oifs list. It is called under write locked mrt_lock. */
909 static void ip6mr_update_thresholds(struct mr6_table
*mrt
, struct mfc6_cache
*cache
,
914 cache
->mfc_un
.res
.minvif
= MAXMIFS
;
915 cache
->mfc_un
.res
.maxvif
= 0;
916 memset(cache
->mfc_un
.res
.ttls
, 255, MAXMIFS
);
918 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++) {
919 if (MIF_EXISTS(mrt
, vifi
) &&
920 ttls
[vifi
] && ttls
[vifi
] < 255) {
921 cache
->mfc_un
.res
.ttls
[vifi
] = ttls
[vifi
];
922 if (cache
->mfc_un
.res
.minvif
> vifi
)
923 cache
->mfc_un
.res
.minvif
= vifi
;
924 if (cache
->mfc_un
.res
.maxvif
<= vifi
)
925 cache
->mfc_un
.res
.maxvif
= vifi
+ 1;
928 cache
->mfc_un
.res
.lastuse
= jiffies
;
931 static int mif6_add(struct net
*net
, struct mr6_table
*mrt
,
932 struct mif6ctl
*vifc
, int mrtsock
)
934 int vifi
= vifc
->mif6c_mifi
;
935 struct mif_device
*v
= &mrt
->vif6_table
[vifi
];
936 struct net_device
*dev
;
937 struct inet6_dev
*in6_dev
;
941 if (MIF_EXISTS(mrt
, vifi
))
944 switch (vifc
->mif6c_flags
) {
945 #ifdef CONFIG_IPV6_PIMSM_V2
948 * Special Purpose VIF in PIM
949 * All the packets will be sent to the daemon
951 if (mrt
->mroute_reg_vif_num
>= 0)
953 dev
= ip6mr_reg_vif(net
, mrt
);
956 err
= dev_set_allmulti(dev
, 1);
958 unregister_netdevice(dev
);
965 dev
= dev_get_by_index(net
, vifc
->mif6c_pifi
);
967 return -EADDRNOTAVAIL
;
968 err
= dev_set_allmulti(dev
, 1);
978 in6_dev
= __in6_dev_get(dev
);
980 in6_dev
->cnf
.mc_forwarding
++;
981 inet6_netconf_notify_devconf(dev_net(dev
), RTM_NEWNETCONF
,
982 NETCONFA_MC_FORWARDING
,
983 dev
->ifindex
, &in6_dev
->cnf
);
987 * Fill in the VIF structures
989 v
->rate_limit
= vifc
->vifc_rate_limit
;
990 v
->flags
= vifc
->mif6c_flags
;
992 v
->flags
|= VIFF_STATIC
;
993 v
->threshold
= vifc
->vifc_threshold
;
998 v
->link
= dev
->ifindex
;
999 if (v
->flags
& MIFF_REGISTER
)
1000 v
->link
= dev_get_iflink(dev
);
1002 /* And finish update writing critical data */
1003 write_lock_bh(&mrt_lock
);
1005 #ifdef CONFIG_IPV6_PIMSM_V2
1006 if (v
->flags
& MIFF_REGISTER
)
1007 mrt
->mroute_reg_vif_num
= vifi
;
1009 if (vifi
+ 1 > mrt
->maxvif
)
1010 mrt
->maxvif
= vifi
+ 1;
1011 write_unlock_bh(&mrt_lock
);
1015 static struct mfc6_cache
*ip6mr_cache_find(struct mr6_table
*mrt
,
1016 const struct in6_addr
*origin
,
1017 const struct in6_addr
*mcastgrp
)
1019 int line
= MFC6_HASH(mcastgrp
, origin
);
1020 struct mfc6_cache
*c
;
1022 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
) {
1023 if (ipv6_addr_equal(&c
->mf6c_origin
, origin
) &&
1024 ipv6_addr_equal(&c
->mf6c_mcastgrp
, mcastgrp
))
1030 /* Look for a (*,*,oif) entry */
1031 static struct mfc6_cache
*ip6mr_cache_find_any_parent(struct mr6_table
*mrt
,
1034 int line
= MFC6_HASH(&in6addr_any
, &in6addr_any
);
1035 struct mfc6_cache
*c
;
1037 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
)
1038 if (ipv6_addr_any(&c
->mf6c_origin
) &&
1039 ipv6_addr_any(&c
->mf6c_mcastgrp
) &&
1040 (c
->mfc_un
.res
.ttls
[mifi
] < 255))
1046 /* Look for a (*,G) entry */
1047 static struct mfc6_cache
*ip6mr_cache_find_any(struct mr6_table
*mrt
,
1048 struct in6_addr
*mcastgrp
,
1051 int line
= MFC6_HASH(mcastgrp
, &in6addr_any
);
1052 struct mfc6_cache
*c
, *proxy
;
1054 if (ipv6_addr_any(mcastgrp
))
1057 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
)
1058 if (ipv6_addr_any(&c
->mf6c_origin
) &&
1059 ipv6_addr_equal(&c
->mf6c_mcastgrp
, mcastgrp
)) {
1060 if (c
->mfc_un
.res
.ttls
[mifi
] < 255)
1063 /* It's ok if the mifi is part of the static tree */
1064 proxy
= ip6mr_cache_find_any_parent(mrt
,
1066 if (proxy
&& proxy
->mfc_un
.res
.ttls
[mifi
] < 255)
1071 return ip6mr_cache_find_any_parent(mrt
, mifi
);
1075 * Allocate a multicast cache entry
1077 static struct mfc6_cache
*ip6mr_cache_alloc(void)
1079 struct mfc6_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_KERNEL
);
1082 c
->mfc_un
.res
.last_assert
= jiffies
- MFC_ASSERT_THRESH
- 1;
1083 c
->mfc_un
.res
.minvif
= MAXMIFS
;
1087 static struct mfc6_cache
*ip6mr_cache_alloc_unres(void)
1089 struct mfc6_cache
*c
= kmem_cache_zalloc(mrt_cachep
, GFP_ATOMIC
);
1092 skb_queue_head_init(&c
->mfc_un
.unres
.unresolved
);
1093 c
->mfc_un
.unres
.expires
= jiffies
+ 10 * HZ
;
1098 * A cache entry has gone into a resolved state from queued
1101 static void ip6mr_cache_resolve(struct net
*net
, struct mr6_table
*mrt
,
1102 struct mfc6_cache
*uc
, struct mfc6_cache
*c
)
1104 struct sk_buff
*skb
;
1107 * Play the pending entries through our router
1110 while ((skb
= __skb_dequeue(&uc
->mfc_un
.unres
.unresolved
))) {
1111 if (ipv6_hdr(skb
)->version
== 0) {
1112 struct nlmsghdr
*nlh
= skb_pull(skb
,
1113 sizeof(struct ipv6hdr
));
1115 if (__ip6mr_fill_mroute(mrt
, skb
, c
, nlmsg_data(nlh
)) > 0) {
1116 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - (u8
*)nlh
;
1118 nlh
->nlmsg_type
= NLMSG_ERROR
;
1119 nlh
->nlmsg_len
= nlmsg_msg_size(sizeof(struct nlmsgerr
));
1120 skb_trim(skb
, nlh
->nlmsg_len
);
1121 ((struct nlmsgerr
*)nlmsg_data(nlh
))->error
= -EMSGSIZE
;
1123 rtnl_unicast(skb
, net
, NETLINK_CB(skb
).portid
);
1125 ip6_mr_forward(net
, mrt
, skb
, c
);
1130 * Bounce a cache query up to pim6sd and netlink.
1132 * Called under mrt_lock.
1135 static int ip6mr_cache_report(struct mr6_table
*mrt
, struct sk_buff
*pkt
,
1136 mifi_t mifi
, int assert)
1138 struct sk_buff
*skb
;
1139 struct mrt6msg
*msg
;
1142 #ifdef CONFIG_IPV6_PIMSM_V2
1143 if (assert == MRT6MSG_WHOLEPKT
)
1144 skb
= skb_realloc_headroom(pkt
, -skb_network_offset(pkt
)
1148 skb
= alloc_skb(sizeof(struct ipv6hdr
) + sizeof(*msg
), GFP_ATOMIC
);
1153 /* I suppose that internal messages
1154 * do not require checksums */
1156 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1158 #ifdef CONFIG_IPV6_PIMSM_V2
1159 if (assert == MRT6MSG_WHOLEPKT
) {
1160 /* Ugly, but we have no choice with this interface.
1161 Duplicate old header, fix length etc.
1162 And all this only to mangle msg->im6_msgtype and
1163 to set msg->im6_mbz to "mbz" :-)
1165 skb_push(skb
, -skb_network_offset(pkt
));
1167 skb_push(skb
, sizeof(*msg
));
1168 skb_reset_transport_header(skb
);
1169 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
1171 msg
->im6_msgtype
= MRT6MSG_WHOLEPKT
;
1172 msg
->im6_mif
= mrt
->mroute_reg_vif_num
;
1174 msg
->im6_src
= ipv6_hdr(pkt
)->saddr
;
1175 msg
->im6_dst
= ipv6_hdr(pkt
)->daddr
;
1177 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1182 * Copy the IP header
1185 skb_put(skb
, sizeof(struct ipv6hdr
));
1186 skb_reset_network_header(skb
);
1187 skb_copy_to_linear_data(skb
, ipv6_hdr(pkt
), sizeof(struct ipv6hdr
));
1192 skb_put(skb
, sizeof(*msg
));
1193 skb_reset_transport_header(skb
);
1194 msg
= (struct mrt6msg
*)skb_transport_header(skb
);
1197 msg
->im6_msgtype
= assert;
1198 msg
->im6_mif
= mifi
;
1200 msg
->im6_src
= ipv6_hdr(pkt
)->saddr
;
1201 msg
->im6_dst
= ipv6_hdr(pkt
)->daddr
;
1203 skb_dst_set(skb
, dst_clone(skb_dst(pkt
)));
1204 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1207 if (!mrt
->mroute6_sk
) {
1212 mrt6msg_netlink_event(mrt
, skb
);
1215 * Deliver to user space multicast routing algorithms
1217 ret
= sock_queue_rcv_skb(mrt
->mroute6_sk
, skb
);
1219 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1227 * Queue a packet for resolution. It gets locked cache entry!
1231 ip6mr_cache_unresolved(struct mr6_table
*mrt
, mifi_t mifi
, struct sk_buff
*skb
)
1235 struct mfc6_cache
*c
;
1237 spin_lock_bh(&mfc_unres_lock
);
1238 list_for_each_entry(c
, &mrt
->mfc6_unres_queue
, list
) {
1239 if (ipv6_addr_equal(&c
->mf6c_mcastgrp
, &ipv6_hdr(skb
)->daddr
) &&
1240 ipv6_addr_equal(&c
->mf6c_origin
, &ipv6_hdr(skb
)->saddr
)) {
1248 * Create a new entry if allowable
1251 if (atomic_read(&mrt
->cache_resolve_queue_len
) >= 10 ||
1252 (c
= ip6mr_cache_alloc_unres()) == NULL
) {
1253 spin_unlock_bh(&mfc_unres_lock
);
1260 * Fill in the new cache entry
1262 c
->mf6c_parent
= -1;
1263 c
->mf6c_origin
= ipv6_hdr(skb
)->saddr
;
1264 c
->mf6c_mcastgrp
= ipv6_hdr(skb
)->daddr
;
1267 * Reflect first query at pim6sd
1269 err
= ip6mr_cache_report(mrt
, skb
, mifi
, MRT6MSG_NOCACHE
);
1271 /* If the report failed throw the cache entry
1274 spin_unlock_bh(&mfc_unres_lock
);
1276 ip6mr_cache_free(c
);
1281 atomic_inc(&mrt
->cache_resolve_queue_len
);
1282 list_add(&c
->list
, &mrt
->mfc6_unres_queue
);
1283 mr6_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1285 ipmr_do_expire_process(mrt
);
1289 * See if we can append the packet
1291 if (c
->mfc_un
.unres
.unresolved
.qlen
> 3) {
1295 skb_queue_tail(&c
->mfc_un
.unres
.unresolved
, skb
);
1299 spin_unlock_bh(&mfc_unres_lock
);
1304 * MFC6 cache manipulation by user space
1307 static int ip6mr_mfc_delete(struct mr6_table
*mrt
, struct mf6cctl
*mfc
,
1311 struct mfc6_cache
*c
, *next
;
1313 line
= MFC6_HASH(&mfc
->mf6cc_mcastgrp
.sin6_addr
, &mfc
->mf6cc_origin
.sin6_addr
);
1315 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_cache_array
[line
], list
) {
1316 if (ipv6_addr_equal(&c
->mf6c_origin
, &mfc
->mf6cc_origin
.sin6_addr
) &&
1317 ipv6_addr_equal(&c
->mf6c_mcastgrp
,
1318 &mfc
->mf6cc_mcastgrp
.sin6_addr
) &&
1319 (parent
== -1 || parent
== c
->mf6c_parent
)) {
1320 write_lock_bh(&mrt_lock
);
1322 write_unlock_bh(&mrt_lock
);
1324 mr6_netlink_event(mrt
, c
, RTM_DELROUTE
);
1325 ip6mr_cache_free(c
);
1332 static int ip6mr_device_event(struct notifier_block
*this,
1333 unsigned long event
, void *ptr
)
1335 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
1336 struct net
*net
= dev_net(dev
);
1337 struct mr6_table
*mrt
;
1338 struct mif_device
*v
;
1341 if (event
!= NETDEV_UNREGISTER
)
1344 ip6mr_for_each_table(mrt
, net
) {
1345 v
= &mrt
->vif6_table
[0];
1346 for (ct
= 0; ct
< mrt
->maxvif
; ct
++, v
++) {
1348 mif6_delete(mrt
, ct
, 1, NULL
);
1355 static struct notifier_block ip6_mr_notifier
= {
1356 .notifier_call
= ip6mr_device_event
1360 * Setup for IP multicast routing
1363 static int __net_init
ip6mr_net_init(struct net
*net
)
1367 err
= ip6mr_rules_init(net
);
1371 #ifdef CONFIG_PROC_FS
1373 if (!proc_create("ip6_mr_vif", 0, net
->proc_net
, &ip6mr_vif_fops
))
1375 if (!proc_create("ip6_mr_cache", 0, net
->proc_net
, &ip6mr_mfc_fops
))
1376 goto proc_cache_fail
;
1381 #ifdef CONFIG_PROC_FS
1383 remove_proc_entry("ip6_mr_vif", net
->proc_net
);
1385 ip6mr_rules_exit(net
);
1391 static void __net_exit
ip6mr_net_exit(struct net
*net
)
1393 #ifdef CONFIG_PROC_FS
1394 remove_proc_entry("ip6_mr_cache", net
->proc_net
);
1395 remove_proc_entry("ip6_mr_vif", net
->proc_net
);
1397 ip6mr_rules_exit(net
);
1400 static struct pernet_operations ip6mr_net_ops
= {
1401 .init
= ip6mr_net_init
,
1402 .exit
= ip6mr_net_exit
,
1405 int __init
ip6_mr_init(void)
1409 mrt_cachep
= kmem_cache_create("ip6_mrt_cache",
1410 sizeof(struct mfc6_cache
),
1411 0, SLAB_HWCACHE_ALIGN
,
1416 err
= register_pernet_subsys(&ip6mr_net_ops
);
1418 goto reg_pernet_fail
;
1420 err
= register_netdevice_notifier(&ip6_mr_notifier
);
1422 goto reg_notif_fail
;
1423 #ifdef CONFIG_IPV6_PIMSM_V2
1424 if (inet6_add_protocol(&pim6_protocol
, IPPROTO_PIM
) < 0) {
1425 pr_err("%s: can't add PIM protocol\n", __func__
);
1427 goto add_proto_fail
;
1430 rtnl_register(RTNL_FAMILY_IP6MR
, RTM_GETROUTE
, NULL
,
1431 ip6mr_rtm_dumproute
, 0);
1433 #ifdef CONFIG_IPV6_PIMSM_V2
1435 unregister_netdevice_notifier(&ip6_mr_notifier
);
1438 unregister_pernet_subsys(&ip6mr_net_ops
);
1440 kmem_cache_destroy(mrt_cachep
);
1444 void ip6_mr_cleanup(void)
1446 rtnl_unregister(RTNL_FAMILY_IP6MR
, RTM_GETROUTE
);
1447 #ifdef CONFIG_IPV6_PIMSM_V2
1448 inet6_del_protocol(&pim6_protocol
, IPPROTO_PIM
);
1450 unregister_netdevice_notifier(&ip6_mr_notifier
);
1451 unregister_pernet_subsys(&ip6mr_net_ops
);
1452 kmem_cache_destroy(mrt_cachep
);
1455 static int ip6mr_mfc_add(struct net
*net
, struct mr6_table
*mrt
,
1456 struct mf6cctl
*mfc
, int mrtsock
, int parent
)
1460 struct mfc6_cache
*uc
, *c
;
1461 unsigned char ttls
[MAXMIFS
];
1464 if (mfc
->mf6cc_parent
>= MAXMIFS
)
1467 memset(ttls
, 255, MAXMIFS
);
1468 for (i
= 0; i
< MAXMIFS
; i
++) {
1469 if (IF_ISSET(i
, &mfc
->mf6cc_ifset
))
1474 line
= MFC6_HASH(&mfc
->mf6cc_mcastgrp
.sin6_addr
, &mfc
->mf6cc_origin
.sin6_addr
);
1476 list_for_each_entry(c
, &mrt
->mfc6_cache_array
[line
], list
) {
1477 if (ipv6_addr_equal(&c
->mf6c_origin
, &mfc
->mf6cc_origin
.sin6_addr
) &&
1478 ipv6_addr_equal(&c
->mf6c_mcastgrp
,
1479 &mfc
->mf6cc_mcastgrp
.sin6_addr
) &&
1480 (parent
== -1 || parent
== mfc
->mf6cc_parent
)) {
1487 write_lock_bh(&mrt_lock
);
1488 c
->mf6c_parent
= mfc
->mf6cc_parent
;
1489 ip6mr_update_thresholds(mrt
, c
, ttls
);
1491 c
->mfc_flags
|= MFC_STATIC
;
1492 write_unlock_bh(&mrt_lock
);
1493 mr6_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1497 if (!ipv6_addr_any(&mfc
->mf6cc_mcastgrp
.sin6_addr
) &&
1498 !ipv6_addr_is_multicast(&mfc
->mf6cc_mcastgrp
.sin6_addr
))
1501 c
= ip6mr_cache_alloc();
1505 c
->mf6c_origin
= mfc
->mf6cc_origin
.sin6_addr
;
1506 c
->mf6c_mcastgrp
= mfc
->mf6cc_mcastgrp
.sin6_addr
;
1507 c
->mf6c_parent
= mfc
->mf6cc_parent
;
1508 ip6mr_update_thresholds(mrt
, c
, ttls
);
1510 c
->mfc_flags
|= MFC_STATIC
;
1512 write_lock_bh(&mrt_lock
);
1513 list_add(&c
->list
, &mrt
->mfc6_cache_array
[line
]);
1514 write_unlock_bh(&mrt_lock
);
1517 * Check to see if we resolved a queued list. If so we
1518 * need to send on the frames and tidy up.
1521 spin_lock_bh(&mfc_unres_lock
);
1522 list_for_each_entry(uc
, &mrt
->mfc6_unres_queue
, list
) {
1523 if (ipv6_addr_equal(&uc
->mf6c_origin
, &c
->mf6c_origin
) &&
1524 ipv6_addr_equal(&uc
->mf6c_mcastgrp
, &c
->mf6c_mcastgrp
)) {
1525 list_del(&uc
->list
);
1526 atomic_dec(&mrt
->cache_resolve_queue_len
);
1531 if (list_empty(&mrt
->mfc6_unres_queue
))
1532 del_timer(&mrt
->ipmr_expire_timer
);
1533 spin_unlock_bh(&mfc_unres_lock
);
1536 ip6mr_cache_resolve(net
, mrt
, uc
, c
);
1537 ip6mr_cache_free(uc
);
1539 mr6_netlink_event(mrt
, c
, RTM_NEWROUTE
);
1544 * Close the multicast socket, and clear the vif tables etc
1547 static void mroute_clean_tables(struct mr6_table
*mrt
, bool all
)
1551 struct mfc6_cache
*c
, *next
;
1554 * Shut down all active vif entries
1556 for (i
= 0; i
< mrt
->maxvif
; i
++) {
1557 if (!all
&& (mrt
->vif6_table
[i
].flags
& VIFF_STATIC
))
1559 mif6_delete(mrt
, i
, 0, &list
);
1561 unregister_netdevice_many(&list
);
1566 for (i
= 0; i
< MFC6_LINES
; i
++) {
1567 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_cache_array
[i
], list
) {
1568 if (!all
&& (c
->mfc_flags
& MFC_STATIC
))
1570 write_lock_bh(&mrt_lock
);
1572 write_unlock_bh(&mrt_lock
);
1574 mr6_netlink_event(mrt
, c
, RTM_DELROUTE
);
1575 ip6mr_cache_free(c
);
1579 if (atomic_read(&mrt
->cache_resolve_queue_len
) != 0) {
1580 spin_lock_bh(&mfc_unres_lock
);
1581 list_for_each_entry_safe(c
, next
, &mrt
->mfc6_unres_queue
, list
) {
1583 mr6_netlink_event(mrt
, c
, RTM_DELROUTE
);
1584 ip6mr_destroy_unres(mrt
, c
);
1586 spin_unlock_bh(&mfc_unres_lock
);
1590 static int ip6mr_sk_init(struct mr6_table
*mrt
, struct sock
*sk
)
1593 struct net
*net
= sock_net(sk
);
1596 write_lock_bh(&mrt_lock
);
1597 if (likely(mrt
->mroute6_sk
== NULL
)) {
1598 mrt
->mroute6_sk
= sk
;
1599 net
->ipv6
.devconf_all
->mc_forwarding
++;
1603 write_unlock_bh(&mrt_lock
);
1606 inet6_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
1607 NETCONFA_MC_FORWARDING
,
1608 NETCONFA_IFINDEX_ALL
,
1609 net
->ipv6
.devconf_all
);
1615 int ip6mr_sk_done(struct sock
*sk
)
1618 struct net
*net
= sock_net(sk
);
1619 struct mr6_table
*mrt
;
1621 if (sk
->sk_type
!= SOCK_RAW
||
1622 inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1626 ip6mr_for_each_table(mrt
, net
) {
1627 if (sk
== mrt
->mroute6_sk
) {
1628 write_lock_bh(&mrt_lock
);
1629 mrt
->mroute6_sk
= NULL
;
1630 net
->ipv6
.devconf_all
->mc_forwarding
--;
1631 write_unlock_bh(&mrt_lock
);
1632 inet6_netconf_notify_devconf(net
, RTM_NEWNETCONF
,
1633 NETCONFA_MC_FORWARDING
,
1634 NETCONFA_IFINDEX_ALL
,
1635 net
->ipv6
.devconf_all
);
1637 mroute_clean_tables(mrt
, false);
1647 struct sock
*mroute6_socket(struct net
*net
, struct sk_buff
*skb
)
1649 struct mr6_table
*mrt
;
1650 struct flowi6 fl6
= {
1651 .flowi6_iif
= skb
->skb_iif
? : LOOPBACK_IFINDEX
,
1652 .flowi6_oif
= skb
->dev
->ifindex
,
1653 .flowi6_mark
= skb
->mark
,
1656 if (ip6mr_fib_lookup(net
, &fl6
, &mrt
) < 0)
1659 return mrt
->mroute6_sk
;
1663 * Socket options and virtual interface manipulation. The whole
1664 * virtual interface system is a complete heap, but unfortunately
1665 * that's how BSD mrouted happens to think. Maybe one day with a proper
1666 * MOSPF/PIM router set up we can clean this up.
1669 int ip6_mroute_setsockopt(struct sock
*sk
, int optname
, char __user
*optval
, unsigned int optlen
)
1671 int ret
, parent
= 0;
1675 struct net
*net
= sock_net(sk
);
1676 struct mr6_table
*mrt
;
1678 if (sk
->sk_type
!= SOCK_RAW
||
1679 inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1682 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1686 if (optname
!= MRT6_INIT
) {
1687 if (sk
!= mrt
->mroute6_sk
&& !ns_capable(net
->user_ns
, CAP_NET_ADMIN
))
1693 if (optlen
< sizeof(int))
1696 return ip6mr_sk_init(mrt
, sk
);
1699 return ip6mr_sk_done(sk
);
1702 if (optlen
< sizeof(vif
))
1704 if (copy_from_user(&vif
, optval
, sizeof(vif
)))
1706 if (vif
.mif6c_mifi
>= MAXMIFS
)
1709 ret
= mif6_add(net
, mrt
, &vif
, sk
== mrt
->mroute6_sk
);
1714 if (optlen
< sizeof(mifi_t
))
1716 if (copy_from_user(&mifi
, optval
, sizeof(mifi_t
)))
1719 ret
= mif6_delete(mrt
, mifi
, 0, NULL
);
1724 * Manipulate the forwarding caches. These live
1725 * in a sort of kernel/user symbiosis.
1731 case MRT6_ADD_MFC_PROXY
:
1732 case MRT6_DEL_MFC_PROXY
:
1733 if (optlen
< sizeof(mfc
))
1735 if (copy_from_user(&mfc
, optval
, sizeof(mfc
)))
1738 parent
= mfc
.mf6cc_parent
;
1740 if (optname
== MRT6_DEL_MFC
|| optname
== MRT6_DEL_MFC_PROXY
)
1741 ret
= ip6mr_mfc_delete(mrt
, &mfc
, parent
);
1743 ret
= ip6mr_mfc_add(net
, mrt
, &mfc
,
1744 sk
== mrt
->mroute6_sk
, parent
);
1749 * Control PIM assert (to activate pim will activate assert)
1755 if (optlen
!= sizeof(v
))
1757 if (get_user(v
, (int __user
*)optval
))
1759 mrt
->mroute_do_assert
= v
;
1763 #ifdef CONFIG_IPV6_PIMSM_V2
1768 if (optlen
!= sizeof(v
))
1770 if (get_user(v
, (int __user
*)optval
))
1775 if (v
!= mrt
->mroute_do_pim
) {
1776 mrt
->mroute_do_pim
= v
;
1777 mrt
->mroute_do_assert
= v
;
1784 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1789 if (optlen
!= sizeof(u32
))
1791 if (get_user(v
, (u32 __user
*)optval
))
1793 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1794 if (v
!= RT_TABLE_DEFAULT
&& v
>= 100000000)
1796 if (sk
== mrt
->mroute6_sk
)
1801 if (!ip6mr_new_table(net
, v
))
1804 raw6_sk(sk
)->ip6mr_table
= v
;
1810 * Spurious command, or MRT6_VERSION which you cannot
1814 return -ENOPROTOOPT
;
1819 * Getsock opt support for the multicast routing system.
1822 int ip6_mroute_getsockopt(struct sock
*sk
, int optname
, char __user
*optval
,
1827 struct net
*net
= sock_net(sk
);
1828 struct mr6_table
*mrt
;
1830 if (sk
->sk_type
!= SOCK_RAW
||
1831 inet_sk(sk
)->inet_num
!= IPPROTO_ICMPV6
)
1834 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1842 #ifdef CONFIG_IPV6_PIMSM_V2
1844 val
= mrt
->mroute_do_pim
;
1848 val
= mrt
->mroute_do_assert
;
1851 return -ENOPROTOOPT
;
1854 if (get_user(olr
, optlen
))
1857 olr
= min_t(int, olr
, sizeof(int));
1861 if (put_user(olr
, optlen
))
1863 if (copy_to_user(optval
, &val
, olr
))
1869 * The IP multicast ioctl support routines.
1872 int ip6mr_ioctl(struct sock
*sk
, int cmd
, void __user
*arg
)
1874 struct sioc_sg_req6 sr
;
1875 struct sioc_mif_req6 vr
;
1876 struct mif_device
*vif
;
1877 struct mfc6_cache
*c
;
1878 struct net
*net
= sock_net(sk
);
1879 struct mr6_table
*mrt
;
1881 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1886 case SIOCGETMIFCNT_IN6
:
1887 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1889 if (vr
.mifi
>= mrt
->maxvif
)
1891 vr
.mifi
= array_index_nospec(vr
.mifi
, mrt
->maxvif
);
1892 read_lock(&mrt_lock
);
1893 vif
= &mrt
->vif6_table
[vr
.mifi
];
1894 if (MIF_EXISTS(mrt
, vr
.mifi
)) {
1895 vr
.icount
= vif
->pkt_in
;
1896 vr
.ocount
= vif
->pkt_out
;
1897 vr
.ibytes
= vif
->bytes_in
;
1898 vr
.obytes
= vif
->bytes_out
;
1899 read_unlock(&mrt_lock
);
1901 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1905 read_unlock(&mrt_lock
);
1906 return -EADDRNOTAVAIL
;
1907 case SIOCGETSGCNT_IN6
:
1908 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1911 read_lock(&mrt_lock
);
1912 c
= ip6mr_cache_find(mrt
, &sr
.src
.sin6_addr
, &sr
.grp
.sin6_addr
);
1914 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1915 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1916 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1917 read_unlock(&mrt_lock
);
1919 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1923 read_unlock(&mrt_lock
);
1924 return -EADDRNOTAVAIL
;
1926 return -ENOIOCTLCMD
;
1930 #ifdef CONFIG_COMPAT
1931 struct compat_sioc_sg_req6
{
1932 struct sockaddr_in6 src
;
1933 struct sockaddr_in6 grp
;
1934 compat_ulong_t pktcnt
;
1935 compat_ulong_t bytecnt
;
1936 compat_ulong_t wrong_if
;
1939 struct compat_sioc_mif_req6
{
1941 compat_ulong_t icount
;
1942 compat_ulong_t ocount
;
1943 compat_ulong_t ibytes
;
1944 compat_ulong_t obytes
;
1947 int ip6mr_compat_ioctl(struct sock
*sk
, unsigned int cmd
, void __user
*arg
)
1949 struct compat_sioc_sg_req6 sr
;
1950 struct compat_sioc_mif_req6 vr
;
1951 struct mif_device
*vif
;
1952 struct mfc6_cache
*c
;
1953 struct net
*net
= sock_net(sk
);
1954 struct mr6_table
*mrt
;
1956 mrt
= ip6mr_get_table(net
, raw6_sk(sk
)->ip6mr_table
? : RT6_TABLE_DFLT
);
1961 case SIOCGETMIFCNT_IN6
:
1962 if (copy_from_user(&vr
, arg
, sizeof(vr
)))
1964 if (vr
.mifi
>= mrt
->maxvif
)
1966 vr
.mifi
= array_index_nospec(vr
.mifi
, mrt
->maxvif
);
1967 read_lock(&mrt_lock
);
1968 vif
= &mrt
->vif6_table
[vr
.mifi
];
1969 if (MIF_EXISTS(mrt
, vr
.mifi
)) {
1970 vr
.icount
= vif
->pkt_in
;
1971 vr
.ocount
= vif
->pkt_out
;
1972 vr
.ibytes
= vif
->bytes_in
;
1973 vr
.obytes
= vif
->bytes_out
;
1974 read_unlock(&mrt_lock
);
1976 if (copy_to_user(arg
, &vr
, sizeof(vr
)))
1980 read_unlock(&mrt_lock
);
1981 return -EADDRNOTAVAIL
;
1982 case SIOCGETSGCNT_IN6
:
1983 if (copy_from_user(&sr
, arg
, sizeof(sr
)))
1986 read_lock(&mrt_lock
);
1987 c
= ip6mr_cache_find(mrt
, &sr
.src
.sin6_addr
, &sr
.grp
.sin6_addr
);
1989 sr
.pktcnt
= c
->mfc_un
.res
.pkt
;
1990 sr
.bytecnt
= c
->mfc_un
.res
.bytes
;
1991 sr
.wrong_if
= c
->mfc_un
.res
.wrong_if
;
1992 read_unlock(&mrt_lock
);
1994 if (copy_to_user(arg
, &sr
, sizeof(sr
)))
1998 read_unlock(&mrt_lock
);
1999 return -EADDRNOTAVAIL
;
2001 return -ENOIOCTLCMD
;
2006 static inline int ip6mr_forward2_finish(struct net
*net
, struct sock
*sk
, struct sk_buff
*skb
)
2008 __IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
2009 IPSTATS_MIB_OUTFORWDATAGRAMS
);
2010 __IP6_ADD_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
2011 IPSTATS_MIB_OUTOCTETS
, skb
->len
);
2012 return dst_output(net
, sk
, skb
);
2016 * Processing handlers for ip6mr_forward
2019 static int ip6mr_forward2(struct net
*net
, struct mr6_table
*mrt
,
2020 struct sk_buff
*skb
, struct mfc6_cache
*c
, int vifi
)
2022 struct ipv6hdr
*ipv6h
;
2023 struct mif_device
*vif
= &mrt
->vif6_table
[vifi
];
2024 struct net_device
*dev
;
2025 struct dst_entry
*dst
;
2031 #ifdef CONFIG_IPV6_PIMSM_V2
2032 if (vif
->flags
& MIFF_REGISTER
) {
2034 vif
->bytes_out
+= skb
->len
;
2035 vif
->dev
->stats
.tx_bytes
+= skb
->len
;
2036 vif
->dev
->stats
.tx_packets
++;
2037 ip6mr_cache_report(mrt
, skb
, vifi
, MRT6MSG_WHOLEPKT
);
2042 ipv6h
= ipv6_hdr(skb
);
2044 fl6
= (struct flowi6
) {
2045 .flowi6_oif
= vif
->link
,
2046 .daddr
= ipv6h
->daddr
,
2049 dst
= ip6_route_output(net
, NULL
, &fl6
);
2056 skb_dst_set(skb
, dst
);
2059 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2060 * not only before forwarding, but after forwarding on all output
2061 * interfaces. It is clear, if mrouter runs a multicasting
2062 * program, it should receive packets not depending to what interface
2063 * program is joined.
2064 * If we will not make it, the program will have to join on all
2065 * interfaces. On the other hand, multihoming host (or router, but
2066 * not mrouter) cannot join to more than one interface - it will
2067 * result in receiving multiple packets.
2072 vif
->bytes_out
+= skb
->len
;
2074 /* We are about to write */
2075 /* XXX: extension headers? */
2076 if (skb_cow(skb
, sizeof(*ipv6h
) + LL_RESERVED_SPACE(dev
)))
2079 ipv6h
= ipv6_hdr(skb
);
2082 IP6CB(skb
)->flags
|= IP6SKB_FORWARDED
;
2084 return NF_HOOK(NFPROTO_IPV6
, NF_INET_FORWARD
,
2085 net
, NULL
, skb
, skb
->dev
, dev
,
2086 ip6mr_forward2_finish
);
2093 static int ip6mr_find_vif(struct mr6_table
*mrt
, struct net_device
*dev
)
2097 for (ct
= mrt
->maxvif
- 1; ct
>= 0; ct
--) {
2098 if (mrt
->vif6_table
[ct
].dev
== dev
)
2104 static void ip6_mr_forward(struct net
*net
, struct mr6_table
*mrt
,
2105 struct sk_buff
*skb
, struct mfc6_cache
*cache
)
2109 int true_vifi
= ip6mr_find_vif(mrt
, skb
->dev
);
2111 vif
= cache
->mf6c_parent
;
2112 cache
->mfc_un
.res
.pkt
++;
2113 cache
->mfc_un
.res
.bytes
+= skb
->len
;
2114 cache
->mfc_un
.res
.lastuse
= jiffies
;
2116 if (ipv6_addr_any(&cache
->mf6c_origin
) && true_vifi
>= 0) {
2117 struct mfc6_cache
*cache_proxy
;
2119 /* For an (*,G) entry, we only check that the incoming
2120 * interface is part of the static tree.
2122 cache_proxy
= ip6mr_cache_find_any_parent(mrt
, vif
);
2124 cache_proxy
->mfc_un
.res
.ttls
[true_vifi
] < 255)
2129 * Wrong interface: drop packet and (maybe) send PIM assert.
2131 if (mrt
->vif6_table
[vif
].dev
!= skb
->dev
) {
2132 cache
->mfc_un
.res
.wrong_if
++;
2134 if (true_vifi
>= 0 && mrt
->mroute_do_assert
&&
2135 /* pimsm uses asserts, when switching from RPT to SPT,
2136 so that we cannot check that packet arrived on an oif.
2137 It is bad, but otherwise we would need to move pretty
2138 large chunk of pimd to kernel. Ough... --ANK
2140 (mrt
->mroute_do_pim
||
2141 cache
->mfc_un
.res
.ttls
[true_vifi
] < 255) &&
2143 cache
->mfc_un
.res
.last_assert
+ MFC_ASSERT_THRESH
)) {
2144 cache
->mfc_un
.res
.last_assert
= jiffies
;
2145 ip6mr_cache_report(mrt
, skb
, true_vifi
, MRT6MSG_WRONGMIF
);
2151 mrt
->vif6_table
[vif
].pkt_in
++;
2152 mrt
->vif6_table
[vif
].bytes_in
+= skb
->len
;
2157 if (ipv6_addr_any(&cache
->mf6c_origin
) &&
2158 ipv6_addr_any(&cache
->mf6c_mcastgrp
)) {
2159 if (true_vifi
>= 0 &&
2160 true_vifi
!= cache
->mf6c_parent
&&
2161 ipv6_hdr(skb
)->hop_limit
>
2162 cache
->mfc_un
.res
.ttls
[cache
->mf6c_parent
]) {
2163 /* It's an (*,*) entry and the packet is not coming from
2164 * the upstream: forward the packet to the upstream
2167 psend
= cache
->mf6c_parent
;
2172 for (ct
= cache
->mfc_un
.res
.maxvif
- 1; ct
>= cache
->mfc_un
.res
.minvif
; ct
--) {
2173 /* For (*,G) entry, don't forward to the incoming interface */
2174 if ((!ipv6_addr_any(&cache
->mf6c_origin
) || ct
!= true_vifi
) &&
2175 ipv6_hdr(skb
)->hop_limit
> cache
->mfc_un
.res
.ttls
[ct
]) {
2177 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
2179 ip6mr_forward2(net
, mrt
, skb2
, cache
, psend
);
2186 ip6mr_forward2(net
, mrt
, skb
, cache
, psend
);
2196 * Multicast packets for forwarding arrive here
2199 int ip6_mr_input(struct sk_buff
*skb
)
2201 struct mfc6_cache
*cache
;
2202 struct net
*net
= dev_net(skb
->dev
);
2203 struct mr6_table
*mrt
;
2204 struct flowi6 fl6
= {
2205 .flowi6_iif
= skb
->dev
->ifindex
,
2206 .flowi6_mark
= skb
->mark
,
2210 err
= ip6mr_fib_lookup(net
, &fl6
, &mrt
);
2216 read_lock(&mrt_lock
);
2217 cache
= ip6mr_cache_find(mrt
,
2218 &ipv6_hdr(skb
)->saddr
, &ipv6_hdr(skb
)->daddr
);
2220 int vif
= ip6mr_find_vif(mrt
, skb
->dev
);
2223 cache
= ip6mr_cache_find_any(mrt
,
2224 &ipv6_hdr(skb
)->daddr
,
2229 * No usable cache entry
2234 vif
= ip6mr_find_vif(mrt
, skb
->dev
);
2236 int err
= ip6mr_cache_unresolved(mrt
, vif
, skb
);
2237 read_unlock(&mrt_lock
);
2241 read_unlock(&mrt_lock
);
2246 ip6_mr_forward(net
, mrt
, skb
, cache
);
2248 read_unlock(&mrt_lock
);
2254 static int __ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
2255 struct mfc6_cache
*c
, struct rtmsg
*rtm
)
2257 struct rta_mfc_stats mfcs
;
2258 struct nlattr
*mp_attr
;
2259 struct rtnexthop
*nhp
;
2260 unsigned long lastuse
;
2263 /* If cache is unresolved, don't try to parse IIF and OIF */
2264 if (c
->mf6c_parent
>= MAXMIFS
) {
2265 rtm
->rtm_flags
|= RTNH_F_UNRESOLVED
;
2269 if (MIF_EXISTS(mrt
, c
->mf6c_parent
) &&
2270 nla_put_u32(skb
, RTA_IIF
, mrt
->vif6_table
[c
->mf6c_parent
].dev
->ifindex
) < 0)
2272 mp_attr
= nla_nest_start(skb
, RTA_MULTIPATH
);
2276 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
2277 if (MIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
2278 nhp
= nla_reserve_nohdr(skb
, sizeof(*nhp
));
2280 nla_nest_cancel(skb
, mp_attr
);
2284 nhp
->rtnh_flags
= 0;
2285 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
2286 nhp
->rtnh_ifindex
= mrt
->vif6_table
[ct
].dev
->ifindex
;
2287 nhp
->rtnh_len
= sizeof(*nhp
);
2291 nla_nest_end(skb
, mp_attr
);
2293 lastuse
= READ_ONCE(c
->mfc_un
.res
.lastuse
);
2294 lastuse
= time_after_eq(jiffies
, lastuse
) ? jiffies
- lastuse
: 0;
2296 mfcs
.mfcs_packets
= c
->mfc_un
.res
.pkt
;
2297 mfcs
.mfcs_bytes
= c
->mfc_un
.res
.bytes
;
2298 mfcs
.mfcs_wrong_if
= c
->mfc_un
.res
.wrong_if
;
2299 if (nla_put_64bit(skb
, RTA_MFC_STATS
, sizeof(mfcs
), &mfcs
, RTA_PAD
) ||
2300 nla_put_u64_64bit(skb
, RTA_EXPIRES
, jiffies_to_clock_t(lastuse
),
2304 rtm
->rtm_type
= RTN_MULTICAST
;
2308 int ip6mr_get_route(struct net
*net
, struct sk_buff
*skb
, struct rtmsg
*rtm
,
2312 struct mr6_table
*mrt
;
2313 struct mfc6_cache
*cache
;
2314 struct rt6_info
*rt
= (struct rt6_info
*)skb_dst(skb
);
2316 mrt
= ip6mr_get_table(net
, RT6_TABLE_DFLT
);
2320 read_lock(&mrt_lock
);
2321 cache
= ip6mr_cache_find(mrt
, &rt
->rt6i_src
.addr
, &rt
->rt6i_dst
.addr
);
2322 if (!cache
&& skb
->dev
) {
2323 int vif
= ip6mr_find_vif(mrt
, skb
->dev
);
2326 cache
= ip6mr_cache_find_any(mrt
, &rt
->rt6i_dst
.addr
,
2331 struct sk_buff
*skb2
;
2332 struct ipv6hdr
*iph
;
2333 struct net_device
*dev
;
2337 if (!dev
|| (vif
= ip6mr_find_vif(mrt
, dev
)) < 0) {
2338 read_unlock(&mrt_lock
);
2342 /* really correct? */
2343 skb2
= alloc_skb(sizeof(struct ipv6hdr
), GFP_ATOMIC
);
2345 read_unlock(&mrt_lock
);
2349 NETLINK_CB(skb2
).portid
= portid
;
2350 skb_reset_transport_header(skb2
);
2352 skb_put(skb2
, sizeof(struct ipv6hdr
));
2353 skb_reset_network_header(skb2
);
2355 iph
= ipv6_hdr(skb2
);
2358 iph
->flow_lbl
[0] = 0;
2359 iph
->flow_lbl
[1] = 0;
2360 iph
->flow_lbl
[2] = 0;
2361 iph
->payload_len
= 0;
2362 iph
->nexthdr
= IPPROTO_NONE
;
2364 iph
->saddr
= rt
->rt6i_src
.addr
;
2365 iph
->daddr
= rt
->rt6i_dst
.addr
;
2367 err
= ip6mr_cache_unresolved(mrt
, vif
, skb2
);
2368 read_unlock(&mrt_lock
);
2373 if (rtm
->rtm_flags
& RTM_F_NOTIFY
)
2374 cache
->mfc_flags
|= MFC_NOTIFY
;
2376 err
= __ip6mr_fill_mroute(mrt
, skb
, cache
, rtm
);
2377 read_unlock(&mrt_lock
);
2381 static int ip6mr_fill_mroute(struct mr6_table
*mrt
, struct sk_buff
*skb
,
2382 u32 portid
, u32 seq
, struct mfc6_cache
*c
, int cmd
,
2385 struct nlmsghdr
*nlh
;
2389 nlh
= nlmsg_put(skb
, portid
, seq
, cmd
, sizeof(*rtm
), flags
);
2393 rtm
= nlmsg_data(nlh
);
2394 rtm
->rtm_family
= RTNL_FAMILY_IP6MR
;
2395 rtm
->rtm_dst_len
= 128;
2396 rtm
->rtm_src_len
= 128;
2398 rtm
->rtm_table
= mrt
->id
;
2399 if (nla_put_u32(skb
, RTA_TABLE
, mrt
->id
))
2400 goto nla_put_failure
;
2401 rtm
->rtm_type
= RTN_MULTICAST
;
2402 rtm
->rtm_scope
= RT_SCOPE_UNIVERSE
;
2403 if (c
->mfc_flags
& MFC_STATIC
)
2404 rtm
->rtm_protocol
= RTPROT_STATIC
;
2406 rtm
->rtm_protocol
= RTPROT_MROUTED
;
2409 if (nla_put_in6_addr(skb
, RTA_SRC
, &c
->mf6c_origin
) ||
2410 nla_put_in6_addr(skb
, RTA_DST
, &c
->mf6c_mcastgrp
))
2411 goto nla_put_failure
;
2412 err
= __ip6mr_fill_mroute(mrt
, skb
, c
, rtm
);
2413 /* do not break the dump if cache is unresolved */
2414 if (err
< 0 && err
!= -ENOENT
)
2415 goto nla_put_failure
;
2417 nlmsg_end(skb
, nlh
);
2421 nlmsg_cancel(skb
, nlh
);
2425 static int mr6_msgsize(bool unresolved
, int maxvif
)
2428 NLMSG_ALIGN(sizeof(struct rtmsg
))
2429 + nla_total_size(4) /* RTA_TABLE */
2430 + nla_total_size(sizeof(struct in6_addr
)) /* RTA_SRC */
2431 + nla_total_size(sizeof(struct in6_addr
)) /* RTA_DST */
2436 + nla_total_size(4) /* RTA_IIF */
2437 + nla_total_size(0) /* RTA_MULTIPATH */
2438 + maxvif
* NLA_ALIGN(sizeof(struct rtnexthop
))
2440 + nla_total_size_64bit(sizeof(struct rta_mfc_stats
))
2446 static void mr6_netlink_event(struct mr6_table
*mrt
, struct mfc6_cache
*mfc
,
2449 struct net
*net
= read_pnet(&mrt
->net
);
2450 struct sk_buff
*skb
;
2453 skb
= nlmsg_new(mr6_msgsize(mfc
->mf6c_parent
>= MAXMIFS
, mrt
->maxvif
),
2458 err
= ip6mr_fill_mroute(mrt
, skb
, 0, 0, mfc
, cmd
, 0);
2462 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV6_MROUTE
, NULL
, GFP_ATOMIC
);
2468 rtnl_set_sk_err(net
, RTNLGRP_IPV6_MROUTE
, err
);
2471 static size_t mrt6msg_netlink_msgsize(size_t payloadlen
)
2474 NLMSG_ALIGN(sizeof(struct rtgenmsg
))
2475 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2476 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2477 /* IP6MRA_CREPORT_SRC_ADDR */
2478 + nla_total_size(sizeof(struct in6_addr
))
2479 /* IP6MRA_CREPORT_DST_ADDR */
2480 + nla_total_size(sizeof(struct in6_addr
))
2481 /* IP6MRA_CREPORT_PKT */
2482 + nla_total_size(payloadlen
)
2488 static void mrt6msg_netlink_event(struct mr6_table
*mrt
, struct sk_buff
*pkt
)
2490 struct net
*net
= read_pnet(&mrt
->net
);
2491 struct nlmsghdr
*nlh
;
2492 struct rtgenmsg
*rtgenm
;
2493 struct mrt6msg
*msg
;
2494 struct sk_buff
*skb
;
2498 payloadlen
= pkt
->len
- sizeof(struct mrt6msg
);
2499 msg
= (struct mrt6msg
*)skb_transport_header(pkt
);
2501 skb
= nlmsg_new(mrt6msg_netlink_msgsize(payloadlen
), GFP_ATOMIC
);
2505 nlh
= nlmsg_put(skb
, 0, 0, RTM_NEWCACHEREPORT
,
2506 sizeof(struct rtgenmsg
), 0);
2509 rtgenm
= nlmsg_data(nlh
);
2510 rtgenm
->rtgen_family
= RTNL_FAMILY_IP6MR
;
2511 if (nla_put_u8(skb
, IP6MRA_CREPORT_MSGTYPE
, msg
->im6_msgtype
) ||
2512 nla_put_u32(skb
, IP6MRA_CREPORT_MIF_ID
, msg
->im6_mif
) ||
2513 nla_put_in6_addr(skb
, IP6MRA_CREPORT_SRC_ADDR
,
2515 nla_put_in6_addr(skb
, IP6MRA_CREPORT_DST_ADDR
,
2517 goto nla_put_failure
;
2519 nla
= nla_reserve(skb
, IP6MRA_CREPORT_PKT
, payloadlen
);
2520 if (!nla
|| skb_copy_bits(pkt
, sizeof(struct mrt6msg
),
2521 nla_data(nla
), payloadlen
))
2522 goto nla_put_failure
;
2524 nlmsg_end(skb
, nlh
);
2526 rtnl_notify(skb
, net
, 0, RTNLGRP_IPV6_MROUTE_R
, NULL
, GFP_ATOMIC
);
2530 nlmsg_cancel(skb
, nlh
);
2533 rtnl_set_sk_err(net
, RTNLGRP_IPV6_MROUTE_R
, -ENOBUFS
);
2536 static int ip6mr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2538 struct net
*net
= sock_net(skb
->sk
);
2539 struct mr6_table
*mrt
;
2540 struct mfc6_cache
*mfc
;
2541 unsigned int t
= 0, s_t
;
2542 unsigned int h
= 0, s_h
;
2543 unsigned int e
= 0, s_e
;
2549 read_lock(&mrt_lock
);
2550 ip6mr_for_each_table(mrt
, net
) {
2555 for (h
= s_h
; h
< MFC6_LINES
; h
++) {
2556 list_for_each_entry(mfc
, &mrt
->mfc6_cache_array
[h
], list
) {
2559 if (ip6mr_fill_mroute(mrt
, skb
,
2560 NETLINK_CB(cb
->skb
).portid
,
2570 spin_lock_bh(&mfc_unres_lock
);
2571 list_for_each_entry(mfc
, &mrt
->mfc6_unres_queue
, list
) {
2574 if (ip6mr_fill_mroute(mrt
, skb
,
2575 NETLINK_CB(cb
->skb
).portid
,
2579 spin_unlock_bh(&mfc_unres_lock
);
2585 spin_unlock_bh(&mfc_unres_lock
);
2592 read_unlock(&mrt_lock
);