]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/ipv6/ip6mr.c
net: skb->dst accessors
[mirror_ubuntu-artful-kernel.git] / net / ipv6 / ip6mr.c
1 /*
2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
4 *
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
8 * 6WIND, Paris, France
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 */
18
19 #include <asm/system.h>
20 #include <asm/uaccess.h>
21 #include <linux/types.h>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/timer.h>
25 #include <linux/mm.h>
26 #include <linux/kernel.h>
27 #include <linux/fcntl.h>
28 #include <linux/stat.h>
29 #include <linux/socket.h>
30 #include <linux/inet.h>
31 #include <linux/netdevice.h>
32 #include <linux/inetdevice.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/init.h>
36 #include <net/protocol.h>
37 #include <linux/skbuff.h>
38 #include <net/sock.h>
39 #include <net/raw.h>
40 #include <linux/notifier.h>
41 #include <linux/if_arp.h>
42 #include <net/checksum.h>
43 #include <net/netlink.h>
44
45 #include <net/ipv6.h>
46 #include <net/ip6_route.h>
47 #include <linux/mroute6.h>
48 #include <linux/pim.h>
49 #include <net/addrconf.h>
50 #include <linux/netfilter_ipv6.h>
51 #include <net/ip6_checksum.h>
52
53 /* Big lock, protecting vif table, mrt cache and mroute socket state.
54 Note that the changes are semaphored via rtnl_lock.
55 */
56
57 static DEFINE_RWLOCK(mrt_lock);
58
59 /*
60 * Multicast router control variables
61 */
62
63 #define MIF_EXISTS(_net, _idx) ((_net)->ipv6.vif6_table[_idx].dev != NULL)
64
65 static struct mfc6_cache *mfc_unres_queue; /* Queue of unresolved entries */
66
67 /* Special spinlock for queue of unresolved entries */
68 static DEFINE_SPINLOCK(mfc_unres_lock);
69
70 /* We return to original Alan's scheme. Hash table of resolved
71 entries is changed only in process context and protected
72 with weak lock mrt_lock. Queue of unresolved entries is protected
73 with strong spinlock mfc_unres_lock.
74
75 In this case data path is free of exclusive locks at all.
76 */
77
78 static struct kmem_cache *mrt_cachep __read_mostly;
79
80 static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache);
81 static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt,
82 mifi_t mifi, int assert);
83 static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm);
84 static void mroute_clean_tables(struct net *net);
85
86 #ifdef CONFIG_IPV6_PIMSM_V2
87 static struct inet6_protocol pim6_protocol;
88 #endif
89
90 static struct timer_list ipmr_expire_timer;
91
92
93 #ifdef CONFIG_PROC_FS
94
95 struct ipmr_mfc_iter {
96 struct seq_net_private p;
97 struct mfc6_cache **cache;
98 int ct;
99 };
100
101
102 static struct mfc6_cache *ipmr_mfc_seq_idx(struct net *net,
103 struct ipmr_mfc_iter *it, loff_t pos)
104 {
105 struct mfc6_cache *mfc;
106
107 it->cache = net->ipv6.mfc6_cache_array;
108 read_lock(&mrt_lock);
109 for (it->ct = 0; it->ct < MFC6_LINES; it->ct++)
110 for (mfc = net->ipv6.mfc6_cache_array[it->ct];
111 mfc; mfc = mfc->next)
112 if (pos-- == 0)
113 return mfc;
114 read_unlock(&mrt_lock);
115
116 it->cache = &mfc_unres_queue;
117 spin_lock_bh(&mfc_unres_lock);
118 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
119 if (net_eq(mfc6_net(mfc), net) &&
120 pos-- == 0)
121 return mfc;
122 spin_unlock_bh(&mfc_unres_lock);
123
124 it->cache = NULL;
125 return NULL;
126 }
127
128
129
130
131 /*
132 * The /proc interfaces to multicast routing /proc/ip6_mr_cache /proc/ip6_mr_vif
133 */
134
135 struct ipmr_vif_iter {
136 struct seq_net_private p;
137 int ct;
138 };
139
140 static struct mif_device *ip6mr_vif_seq_idx(struct net *net,
141 struct ipmr_vif_iter *iter,
142 loff_t pos)
143 {
144 for (iter->ct = 0; iter->ct < net->ipv6.maxvif; ++iter->ct) {
145 if (!MIF_EXISTS(net, iter->ct))
146 continue;
147 if (pos-- == 0)
148 return &net->ipv6.vif6_table[iter->ct];
149 }
150 return NULL;
151 }
152
153 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
154 __acquires(mrt_lock)
155 {
156 struct net *net = seq_file_net(seq);
157
158 read_lock(&mrt_lock);
159 return *pos ? ip6mr_vif_seq_idx(net, seq->private, *pos - 1)
160 : SEQ_START_TOKEN;
161 }
162
163 static void *ip6mr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
164 {
165 struct ipmr_vif_iter *iter = seq->private;
166 struct net *net = seq_file_net(seq);
167
168 ++*pos;
169 if (v == SEQ_START_TOKEN)
170 return ip6mr_vif_seq_idx(net, iter, 0);
171
172 while (++iter->ct < net->ipv6.maxvif) {
173 if (!MIF_EXISTS(net, iter->ct))
174 continue;
175 return &net->ipv6.vif6_table[iter->ct];
176 }
177 return NULL;
178 }
179
180 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
181 __releases(mrt_lock)
182 {
183 read_unlock(&mrt_lock);
184 }
185
186 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
187 {
188 struct net *net = seq_file_net(seq);
189
190 if (v == SEQ_START_TOKEN) {
191 seq_puts(seq,
192 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
193 } else {
194 const struct mif_device *vif = v;
195 const char *name = vif->dev ? vif->dev->name : "none";
196
197 seq_printf(seq,
198 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
199 vif - net->ipv6.vif6_table,
200 name, vif->bytes_in, vif->pkt_in,
201 vif->bytes_out, vif->pkt_out,
202 vif->flags);
203 }
204 return 0;
205 }
206
207 static struct seq_operations ip6mr_vif_seq_ops = {
208 .start = ip6mr_vif_seq_start,
209 .next = ip6mr_vif_seq_next,
210 .stop = ip6mr_vif_seq_stop,
211 .show = ip6mr_vif_seq_show,
212 };
213
214 static int ip6mr_vif_open(struct inode *inode, struct file *file)
215 {
216 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
217 sizeof(struct ipmr_vif_iter));
218 }
219
220 static struct file_operations ip6mr_vif_fops = {
221 .owner = THIS_MODULE,
222 .open = ip6mr_vif_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = seq_release_net,
226 };
227
228 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
229 {
230 struct net *net = seq_file_net(seq);
231
232 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
233 : SEQ_START_TOKEN;
234 }
235
236 static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
237 {
238 struct mfc6_cache *mfc = v;
239 struct ipmr_mfc_iter *it = seq->private;
240 struct net *net = seq_file_net(seq);
241
242 ++*pos;
243
244 if (v == SEQ_START_TOKEN)
245 return ipmr_mfc_seq_idx(net, seq->private, 0);
246
247 if (mfc->next)
248 return mfc->next;
249
250 if (it->cache == &mfc_unres_queue)
251 goto end_of_list;
252
253 BUG_ON(it->cache != net->ipv6.mfc6_cache_array);
254
255 while (++it->ct < MFC6_LINES) {
256 mfc = net->ipv6.mfc6_cache_array[it->ct];
257 if (mfc)
258 return mfc;
259 }
260
261 /* exhausted cache_array, show unresolved */
262 read_unlock(&mrt_lock);
263 it->cache = &mfc_unres_queue;
264 it->ct = 0;
265
266 spin_lock_bh(&mfc_unres_lock);
267 mfc = mfc_unres_queue;
268 if (mfc)
269 return mfc;
270
271 end_of_list:
272 spin_unlock_bh(&mfc_unres_lock);
273 it->cache = NULL;
274
275 return NULL;
276 }
277
278 static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
279 {
280 struct ipmr_mfc_iter *it = seq->private;
281 struct net *net = seq_file_net(seq);
282
283 if (it->cache == &mfc_unres_queue)
284 spin_unlock_bh(&mfc_unres_lock);
285 else if (it->cache == net->ipv6.mfc6_cache_array)
286 read_unlock(&mrt_lock);
287 }
288
289 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
290 {
291 int n;
292 struct net *net = seq_file_net(seq);
293
294 if (v == SEQ_START_TOKEN) {
295 seq_puts(seq,
296 "Group "
297 "Origin "
298 "Iif Pkts Bytes Wrong Oifs\n");
299 } else {
300 const struct mfc6_cache *mfc = v;
301 const struct ipmr_mfc_iter *it = seq->private;
302
303 seq_printf(seq, "%pI6 %pI6 %-3hd",
304 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
305 mfc->mf6c_parent);
306
307 if (it->cache != &mfc_unres_queue) {
308 seq_printf(seq, " %8lu %8lu %8lu",
309 mfc->mfc_un.res.pkt,
310 mfc->mfc_un.res.bytes,
311 mfc->mfc_un.res.wrong_if);
312 for (n = mfc->mfc_un.res.minvif;
313 n < mfc->mfc_un.res.maxvif; n++) {
314 if (MIF_EXISTS(net, n) &&
315 mfc->mfc_un.res.ttls[n] < 255)
316 seq_printf(seq,
317 " %2d:%-3d",
318 n, mfc->mfc_un.res.ttls[n]);
319 }
320 } else {
321 /* unresolved mfc_caches don't contain
322 * pkt, bytes and wrong_if values
323 */
324 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
325 }
326 seq_putc(seq, '\n');
327 }
328 return 0;
329 }
330
331 static struct seq_operations ipmr_mfc_seq_ops = {
332 .start = ipmr_mfc_seq_start,
333 .next = ipmr_mfc_seq_next,
334 .stop = ipmr_mfc_seq_stop,
335 .show = ipmr_mfc_seq_show,
336 };
337
338 static int ipmr_mfc_open(struct inode *inode, struct file *file)
339 {
340 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
341 sizeof(struct ipmr_mfc_iter));
342 }
343
344 static struct file_operations ip6mr_mfc_fops = {
345 .owner = THIS_MODULE,
346 .open = ipmr_mfc_open,
347 .read = seq_read,
348 .llseek = seq_lseek,
349 .release = seq_release_net,
350 };
351 #endif
352
353 #ifdef CONFIG_IPV6_PIMSM_V2
354
355 static int pim6_rcv(struct sk_buff *skb)
356 {
357 struct pimreghdr *pim;
358 struct ipv6hdr *encap;
359 struct net_device *reg_dev = NULL;
360 struct net *net = dev_net(skb->dev);
361 int reg_vif_num = net->ipv6.mroute_reg_vif_num;
362
363 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
364 goto drop;
365
366 pim = (struct pimreghdr *)skb_transport_header(skb);
367 if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
368 (pim->flags & PIM_NULL_REGISTER) ||
369 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
370 sizeof(*pim), IPPROTO_PIM,
371 csum_partial((void *)pim, sizeof(*pim), 0)) &&
372 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
373 goto drop;
374
375 /* check if the inner packet is destined to mcast group */
376 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
377 sizeof(*pim));
378
379 if (!ipv6_addr_is_multicast(&encap->daddr) ||
380 encap->payload_len == 0 ||
381 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
382 goto drop;
383
384 read_lock(&mrt_lock);
385 if (reg_vif_num >= 0)
386 reg_dev = net->ipv6.vif6_table[reg_vif_num].dev;
387 if (reg_dev)
388 dev_hold(reg_dev);
389 read_unlock(&mrt_lock);
390
391 if (reg_dev == NULL)
392 goto drop;
393
394 skb->mac_header = skb->network_header;
395 skb_pull(skb, (u8 *)encap - skb->data);
396 skb_reset_network_header(skb);
397 skb->dev = reg_dev;
398 skb->protocol = htons(ETH_P_IPV6);
399 skb->ip_summed = 0;
400 skb->pkt_type = PACKET_HOST;
401 skb_dst_drop(skb);
402 reg_dev->stats.rx_bytes += skb->len;
403 reg_dev->stats.rx_packets++;
404 nf_reset(skb);
405 netif_rx(skb);
406 dev_put(reg_dev);
407 return 0;
408 drop:
409 kfree_skb(skb);
410 return 0;
411 }
412
413 static struct inet6_protocol pim6_protocol = {
414 .handler = pim6_rcv,
415 };
416
417 /* Service routines creating virtual interfaces: PIMREG */
418
419 static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
420 {
421 struct net *net = dev_net(dev);
422
423 read_lock(&mrt_lock);
424 dev->stats.tx_bytes += skb->len;
425 dev->stats.tx_packets++;
426 ip6mr_cache_report(net, skb, net->ipv6.mroute_reg_vif_num,
427 MRT6MSG_WHOLEPKT);
428 read_unlock(&mrt_lock);
429 kfree_skb(skb);
430 return 0;
431 }
432
433 static const struct net_device_ops reg_vif_netdev_ops = {
434 .ndo_start_xmit = reg_vif_xmit,
435 };
436
437 static void reg_vif_setup(struct net_device *dev)
438 {
439 dev->type = ARPHRD_PIMREG;
440 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
441 dev->flags = IFF_NOARP;
442 dev->netdev_ops = &reg_vif_netdev_ops;
443 dev->destructor = free_netdev;
444 }
445
446 static struct net_device *ip6mr_reg_vif(struct net *net)
447 {
448 struct net_device *dev;
449
450 dev = alloc_netdev(0, "pim6reg", reg_vif_setup);
451 if (dev == NULL)
452 return NULL;
453
454 dev_net_set(dev, net);
455
456 if (register_netdevice(dev)) {
457 free_netdev(dev);
458 return NULL;
459 }
460 dev->iflink = 0;
461
462 if (dev_open(dev))
463 goto failure;
464
465 dev_hold(dev);
466 return dev;
467
468 failure:
469 /* allow the register to be completed before unregistering. */
470 rtnl_unlock();
471 rtnl_lock();
472
473 unregister_netdevice(dev);
474 return NULL;
475 }
476 #endif
477
478 /*
479 * Delete a VIF entry
480 */
481
482 static int mif6_delete(struct net *net, int vifi)
483 {
484 struct mif_device *v;
485 struct net_device *dev;
486 struct inet6_dev *in6_dev;
487 if (vifi < 0 || vifi >= net->ipv6.maxvif)
488 return -EADDRNOTAVAIL;
489
490 v = &net->ipv6.vif6_table[vifi];
491
492 write_lock_bh(&mrt_lock);
493 dev = v->dev;
494 v->dev = NULL;
495
496 if (!dev) {
497 write_unlock_bh(&mrt_lock);
498 return -EADDRNOTAVAIL;
499 }
500
501 #ifdef CONFIG_IPV6_PIMSM_V2
502 if (vifi == net->ipv6.mroute_reg_vif_num)
503 net->ipv6.mroute_reg_vif_num = -1;
504 #endif
505
506 if (vifi + 1 == net->ipv6.maxvif) {
507 int tmp;
508 for (tmp = vifi - 1; tmp >= 0; tmp--) {
509 if (MIF_EXISTS(net, tmp))
510 break;
511 }
512 net->ipv6.maxvif = tmp + 1;
513 }
514
515 write_unlock_bh(&mrt_lock);
516
517 dev_set_allmulti(dev, -1);
518
519 in6_dev = __in6_dev_get(dev);
520 if (in6_dev)
521 in6_dev->cnf.mc_forwarding--;
522
523 if (v->flags & MIFF_REGISTER)
524 unregister_netdevice(dev);
525
526 dev_put(dev);
527 return 0;
528 }
529
530 static inline void ip6mr_cache_free(struct mfc6_cache *c)
531 {
532 release_net(mfc6_net(c));
533 kmem_cache_free(mrt_cachep, c);
534 }
535
536 /* Destroy an unresolved cache entry, killing queued skbs
537 and reporting error to netlink readers.
538 */
539
540 static void ip6mr_destroy_unres(struct mfc6_cache *c)
541 {
542 struct sk_buff *skb;
543 struct net *net = mfc6_net(c);
544
545 atomic_dec(&net->ipv6.cache_resolve_queue_len);
546
547 while((skb = skb_dequeue(&c->mfc_un.unres.unresolved)) != NULL) {
548 if (ipv6_hdr(skb)->version == 0) {
549 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
550 nlh->nlmsg_type = NLMSG_ERROR;
551 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
552 skb_trim(skb, nlh->nlmsg_len);
553 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT;
554 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
555 } else
556 kfree_skb(skb);
557 }
558
559 ip6mr_cache_free(c);
560 }
561
562
563 /* Single timer process for all the unresolved queue. */
564
565 static void ipmr_do_expire_process(unsigned long dummy)
566 {
567 unsigned long now = jiffies;
568 unsigned long expires = 10 * HZ;
569 struct mfc6_cache *c, **cp;
570
571 cp = &mfc_unres_queue;
572
573 while ((c = *cp) != NULL) {
574 if (time_after(c->mfc_un.unres.expires, now)) {
575 /* not yet... */
576 unsigned long interval = c->mfc_un.unres.expires - now;
577 if (interval < expires)
578 expires = interval;
579 cp = &c->next;
580 continue;
581 }
582
583 *cp = c->next;
584 ip6mr_destroy_unres(c);
585 }
586
587 if (mfc_unres_queue != NULL)
588 mod_timer(&ipmr_expire_timer, jiffies + expires);
589 }
590
591 static void ipmr_expire_process(unsigned long dummy)
592 {
593 if (!spin_trylock(&mfc_unres_lock)) {
594 mod_timer(&ipmr_expire_timer, jiffies + 1);
595 return;
596 }
597
598 if (mfc_unres_queue != NULL)
599 ipmr_do_expire_process(dummy);
600
601 spin_unlock(&mfc_unres_lock);
602 }
603
604 /* Fill oifs list. It is called under write locked mrt_lock. */
605
606 static void ip6mr_update_thresholds(struct mfc6_cache *cache, unsigned char *ttls)
607 {
608 int vifi;
609 struct net *net = mfc6_net(cache);
610
611 cache->mfc_un.res.minvif = MAXMIFS;
612 cache->mfc_un.res.maxvif = 0;
613 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
614
615 for (vifi = 0; vifi < net->ipv6.maxvif; vifi++) {
616 if (MIF_EXISTS(net, vifi) &&
617 ttls[vifi] && ttls[vifi] < 255) {
618 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
619 if (cache->mfc_un.res.minvif > vifi)
620 cache->mfc_un.res.minvif = vifi;
621 if (cache->mfc_un.res.maxvif <= vifi)
622 cache->mfc_un.res.maxvif = vifi + 1;
623 }
624 }
625 }
626
627 static int mif6_add(struct net *net, struct mif6ctl *vifc, int mrtsock)
628 {
629 int vifi = vifc->mif6c_mifi;
630 struct mif_device *v = &net->ipv6.vif6_table[vifi];
631 struct net_device *dev;
632 struct inet6_dev *in6_dev;
633 int err;
634
635 /* Is vif busy ? */
636 if (MIF_EXISTS(net, vifi))
637 return -EADDRINUSE;
638
639 switch (vifc->mif6c_flags) {
640 #ifdef CONFIG_IPV6_PIMSM_V2
641 case MIFF_REGISTER:
642 /*
643 * Special Purpose VIF in PIM
644 * All the packets will be sent to the daemon
645 */
646 if (net->ipv6.mroute_reg_vif_num >= 0)
647 return -EADDRINUSE;
648 dev = ip6mr_reg_vif(net);
649 if (!dev)
650 return -ENOBUFS;
651 err = dev_set_allmulti(dev, 1);
652 if (err) {
653 unregister_netdevice(dev);
654 dev_put(dev);
655 return err;
656 }
657 break;
658 #endif
659 case 0:
660 dev = dev_get_by_index(net, vifc->mif6c_pifi);
661 if (!dev)
662 return -EADDRNOTAVAIL;
663 err = dev_set_allmulti(dev, 1);
664 if (err) {
665 dev_put(dev);
666 return err;
667 }
668 break;
669 default:
670 return -EINVAL;
671 }
672
673 in6_dev = __in6_dev_get(dev);
674 if (in6_dev)
675 in6_dev->cnf.mc_forwarding++;
676
677 /*
678 * Fill in the VIF structures
679 */
680 v->rate_limit = vifc->vifc_rate_limit;
681 v->flags = vifc->mif6c_flags;
682 if (!mrtsock)
683 v->flags |= VIFF_STATIC;
684 v->threshold = vifc->vifc_threshold;
685 v->bytes_in = 0;
686 v->bytes_out = 0;
687 v->pkt_in = 0;
688 v->pkt_out = 0;
689 v->link = dev->ifindex;
690 if (v->flags & MIFF_REGISTER)
691 v->link = dev->iflink;
692
693 /* And finish update writing critical data */
694 write_lock_bh(&mrt_lock);
695 v->dev = dev;
696 #ifdef CONFIG_IPV6_PIMSM_V2
697 if (v->flags & MIFF_REGISTER)
698 net->ipv6.mroute_reg_vif_num = vifi;
699 #endif
700 if (vifi + 1 > net->ipv6.maxvif)
701 net->ipv6.maxvif = vifi + 1;
702 write_unlock_bh(&mrt_lock);
703 return 0;
704 }
705
706 static struct mfc6_cache *ip6mr_cache_find(struct net *net,
707 struct in6_addr *origin,
708 struct in6_addr *mcastgrp)
709 {
710 int line = MFC6_HASH(mcastgrp, origin);
711 struct mfc6_cache *c;
712
713 for (c = net->ipv6.mfc6_cache_array[line]; c; c = c->next) {
714 if (ipv6_addr_equal(&c->mf6c_origin, origin) &&
715 ipv6_addr_equal(&c->mf6c_mcastgrp, mcastgrp))
716 break;
717 }
718 return c;
719 }
720
721 /*
722 * Allocate a multicast cache entry
723 */
724 static struct mfc6_cache *ip6mr_cache_alloc(struct net *net)
725 {
726 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
727 if (c == NULL)
728 return NULL;
729 c->mfc_un.res.minvif = MAXMIFS;
730 mfc6_net_set(c, net);
731 return c;
732 }
733
734 static struct mfc6_cache *ip6mr_cache_alloc_unres(struct net *net)
735 {
736 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
737 if (c == NULL)
738 return NULL;
739 skb_queue_head_init(&c->mfc_un.unres.unresolved);
740 c->mfc_un.unres.expires = jiffies + 10 * HZ;
741 mfc6_net_set(c, net);
742 return c;
743 }
744
745 /*
746 * A cache entry has gone into a resolved state from queued
747 */
748
749 static void ip6mr_cache_resolve(struct mfc6_cache *uc, struct mfc6_cache *c)
750 {
751 struct sk_buff *skb;
752
753 /*
754 * Play the pending entries through our router
755 */
756
757 while((skb = __skb_dequeue(&uc->mfc_un.unres.unresolved))) {
758 if (ipv6_hdr(skb)->version == 0) {
759 int err;
760 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
761
762 if (ip6mr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
763 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
764 } else {
765 nlh->nlmsg_type = NLMSG_ERROR;
766 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
767 skb_trim(skb, nlh->nlmsg_len);
768 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE;
769 }
770 err = rtnl_unicast(skb, mfc6_net(uc), NETLINK_CB(skb).pid);
771 } else
772 ip6_mr_forward(skb, c);
773 }
774 }
775
776 /*
777 * Bounce a cache query up to pim6sd. We could use netlink for this but pim6sd
778 * expects the following bizarre scheme.
779 *
780 * Called under mrt_lock.
781 */
782
783 static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt, mifi_t mifi,
784 int assert)
785 {
786 struct sk_buff *skb;
787 struct mrt6msg *msg;
788 int ret;
789
790 #ifdef CONFIG_IPV6_PIMSM_V2
791 if (assert == MRT6MSG_WHOLEPKT)
792 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
793 +sizeof(*msg));
794 else
795 #endif
796 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
797
798 if (!skb)
799 return -ENOBUFS;
800
801 /* I suppose that internal messages
802 * do not require checksums */
803
804 skb->ip_summed = CHECKSUM_UNNECESSARY;
805
806 #ifdef CONFIG_IPV6_PIMSM_V2
807 if (assert == MRT6MSG_WHOLEPKT) {
808 /* Ugly, but we have no choice with this interface.
809 Duplicate old header, fix length etc.
810 And all this only to mangle msg->im6_msgtype and
811 to set msg->im6_mbz to "mbz" :-)
812 */
813 skb_push(skb, -skb_network_offset(pkt));
814
815 skb_push(skb, sizeof(*msg));
816 skb_reset_transport_header(skb);
817 msg = (struct mrt6msg *)skb_transport_header(skb);
818 msg->im6_mbz = 0;
819 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
820 msg->im6_mif = net->ipv6.mroute_reg_vif_num;
821 msg->im6_pad = 0;
822 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
823 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
824
825 skb->ip_summed = CHECKSUM_UNNECESSARY;
826 } else
827 #endif
828 {
829 /*
830 * Copy the IP header
831 */
832
833 skb_put(skb, sizeof(struct ipv6hdr));
834 skb_reset_network_header(skb);
835 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
836
837 /*
838 * Add our header
839 */
840 skb_put(skb, sizeof(*msg));
841 skb_reset_transport_header(skb);
842 msg = (struct mrt6msg *)skb_transport_header(skb);
843
844 msg->im6_mbz = 0;
845 msg->im6_msgtype = assert;
846 msg->im6_mif = mifi;
847 msg->im6_pad = 0;
848 ipv6_addr_copy(&msg->im6_src, &ipv6_hdr(pkt)->saddr);
849 ipv6_addr_copy(&msg->im6_dst, &ipv6_hdr(pkt)->daddr);
850
851 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
852 skb->ip_summed = CHECKSUM_UNNECESSARY;
853 }
854
855 if (net->ipv6.mroute6_sk == NULL) {
856 kfree_skb(skb);
857 return -EINVAL;
858 }
859
860 /*
861 * Deliver to user space multicast routing algorithms
862 */
863 ret = sock_queue_rcv_skb(net->ipv6.mroute6_sk, skb);
864 if (ret < 0) {
865 if (net_ratelimit())
866 printk(KERN_WARNING "mroute6: pending queue full, dropping entries.\n");
867 kfree_skb(skb);
868 }
869
870 return ret;
871 }
872
873 /*
874 * Queue a packet for resolution. It gets locked cache entry!
875 */
876
877 static int
878 ip6mr_cache_unresolved(struct net *net, mifi_t mifi, struct sk_buff *skb)
879 {
880 int err;
881 struct mfc6_cache *c;
882
883 spin_lock_bh(&mfc_unres_lock);
884 for (c = mfc_unres_queue; c; c = c->next) {
885 if (net_eq(mfc6_net(c), net) &&
886 ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
887 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
888 break;
889 }
890
891 if (c == NULL) {
892 /*
893 * Create a new entry if allowable
894 */
895
896 if (atomic_read(&net->ipv6.cache_resolve_queue_len) >= 10 ||
897 (c = ip6mr_cache_alloc_unres(net)) == NULL) {
898 spin_unlock_bh(&mfc_unres_lock);
899
900 kfree_skb(skb);
901 return -ENOBUFS;
902 }
903
904 /*
905 * Fill in the new cache entry
906 */
907 c->mf6c_parent = -1;
908 c->mf6c_origin = ipv6_hdr(skb)->saddr;
909 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
910
911 /*
912 * Reflect first query at pim6sd
913 */
914 err = ip6mr_cache_report(net, skb, mifi, MRT6MSG_NOCACHE);
915 if (err < 0) {
916 /* If the report failed throw the cache entry
917 out - Brad Parker
918 */
919 spin_unlock_bh(&mfc_unres_lock);
920
921 ip6mr_cache_free(c);
922 kfree_skb(skb);
923 return err;
924 }
925
926 atomic_inc(&net->ipv6.cache_resolve_queue_len);
927 c->next = mfc_unres_queue;
928 mfc_unres_queue = c;
929
930 ipmr_do_expire_process(1);
931 }
932
933 /*
934 * See if we can append the packet
935 */
936 if (c->mfc_un.unres.unresolved.qlen > 3) {
937 kfree_skb(skb);
938 err = -ENOBUFS;
939 } else {
940 skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
941 err = 0;
942 }
943
944 spin_unlock_bh(&mfc_unres_lock);
945 return err;
946 }
947
948 /*
949 * MFC6 cache manipulation by user space
950 */
951
952 static int ip6mr_mfc_delete(struct net *net, struct mf6cctl *mfc)
953 {
954 int line;
955 struct mfc6_cache *c, **cp;
956
957 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
958
959 for (cp = &net->ipv6.mfc6_cache_array[line];
960 (c = *cp) != NULL; cp = &c->next) {
961 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
962 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr)) {
963 write_lock_bh(&mrt_lock);
964 *cp = c->next;
965 write_unlock_bh(&mrt_lock);
966
967 ip6mr_cache_free(c);
968 return 0;
969 }
970 }
971 return -ENOENT;
972 }
973
974 static int ip6mr_device_event(struct notifier_block *this,
975 unsigned long event, void *ptr)
976 {
977 struct net_device *dev = ptr;
978 struct net *net = dev_net(dev);
979 struct mif_device *v;
980 int ct;
981
982 if (event != NETDEV_UNREGISTER)
983 return NOTIFY_DONE;
984
985 v = &net->ipv6.vif6_table[0];
986 for (ct = 0; ct < net->ipv6.maxvif; ct++, v++) {
987 if (v->dev == dev)
988 mif6_delete(net, ct);
989 }
990 return NOTIFY_DONE;
991 }
992
993 static struct notifier_block ip6_mr_notifier = {
994 .notifier_call = ip6mr_device_event
995 };
996
997 /*
998 * Setup for IP multicast routing
999 */
1000
1001 static int __net_init ip6mr_net_init(struct net *net)
1002 {
1003 int err = 0;
1004 net->ipv6.vif6_table = kcalloc(MAXMIFS, sizeof(struct mif_device),
1005 GFP_KERNEL);
1006 if (!net->ipv6.vif6_table) {
1007 err = -ENOMEM;
1008 goto fail;
1009 }
1010
1011 /* Forwarding cache */
1012 net->ipv6.mfc6_cache_array = kcalloc(MFC6_LINES,
1013 sizeof(struct mfc6_cache *),
1014 GFP_KERNEL);
1015 if (!net->ipv6.mfc6_cache_array) {
1016 err = -ENOMEM;
1017 goto fail_mfc6_cache;
1018 }
1019
1020 #ifdef CONFIG_IPV6_PIMSM_V2
1021 net->ipv6.mroute_reg_vif_num = -1;
1022 #endif
1023
1024 #ifdef CONFIG_PROC_FS
1025 err = -ENOMEM;
1026 if (!proc_net_fops_create(net, "ip6_mr_vif", 0, &ip6mr_vif_fops))
1027 goto proc_vif_fail;
1028 if (!proc_net_fops_create(net, "ip6_mr_cache", 0, &ip6mr_mfc_fops))
1029 goto proc_cache_fail;
1030 #endif
1031 return 0;
1032
1033 #ifdef CONFIG_PROC_FS
1034 proc_cache_fail:
1035 proc_net_remove(net, "ip6_mr_vif");
1036 proc_vif_fail:
1037 kfree(net->ipv6.mfc6_cache_array);
1038 #endif
1039 fail_mfc6_cache:
1040 kfree(net->ipv6.vif6_table);
1041 fail:
1042 return err;
1043 }
1044
1045 static void __net_exit ip6mr_net_exit(struct net *net)
1046 {
1047 #ifdef CONFIG_PROC_FS
1048 proc_net_remove(net, "ip6_mr_cache");
1049 proc_net_remove(net, "ip6_mr_vif");
1050 #endif
1051 mroute_clean_tables(net);
1052 kfree(net->ipv6.mfc6_cache_array);
1053 kfree(net->ipv6.vif6_table);
1054 }
1055
1056 static struct pernet_operations ip6mr_net_ops = {
1057 .init = ip6mr_net_init,
1058 .exit = ip6mr_net_exit,
1059 };
1060
1061 int __init ip6_mr_init(void)
1062 {
1063 int err;
1064
1065 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1066 sizeof(struct mfc6_cache),
1067 0, SLAB_HWCACHE_ALIGN,
1068 NULL);
1069 if (!mrt_cachep)
1070 return -ENOMEM;
1071
1072 err = register_pernet_subsys(&ip6mr_net_ops);
1073 if (err)
1074 goto reg_pernet_fail;
1075
1076 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
1077 err = register_netdevice_notifier(&ip6_mr_notifier);
1078 if (err)
1079 goto reg_notif_fail;
1080 return 0;
1081 reg_notif_fail:
1082 del_timer(&ipmr_expire_timer);
1083 unregister_pernet_subsys(&ip6mr_net_ops);
1084 reg_pernet_fail:
1085 kmem_cache_destroy(mrt_cachep);
1086 return err;
1087 }
1088
1089 void ip6_mr_cleanup(void)
1090 {
1091 unregister_netdevice_notifier(&ip6_mr_notifier);
1092 del_timer(&ipmr_expire_timer);
1093 unregister_pernet_subsys(&ip6mr_net_ops);
1094 kmem_cache_destroy(mrt_cachep);
1095 }
1096
1097 static int ip6mr_mfc_add(struct net *net, struct mf6cctl *mfc, int mrtsock)
1098 {
1099 int line;
1100 struct mfc6_cache *uc, *c, **cp;
1101 unsigned char ttls[MAXMIFS];
1102 int i;
1103
1104 memset(ttls, 255, MAXMIFS);
1105 for (i = 0; i < MAXMIFS; i++) {
1106 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1107 ttls[i] = 1;
1108
1109 }
1110
1111 line = MFC6_HASH(&mfc->mf6cc_mcastgrp.sin6_addr, &mfc->mf6cc_origin.sin6_addr);
1112
1113 for (cp = &net->ipv6.mfc6_cache_array[line];
1114 (c = *cp) != NULL; cp = &c->next) {
1115 if (ipv6_addr_equal(&c->mf6c_origin, &mfc->mf6cc_origin.sin6_addr) &&
1116 ipv6_addr_equal(&c->mf6c_mcastgrp, &mfc->mf6cc_mcastgrp.sin6_addr))
1117 break;
1118 }
1119
1120 if (c != NULL) {
1121 write_lock_bh(&mrt_lock);
1122 c->mf6c_parent = mfc->mf6cc_parent;
1123 ip6mr_update_thresholds(c, ttls);
1124 if (!mrtsock)
1125 c->mfc_flags |= MFC_STATIC;
1126 write_unlock_bh(&mrt_lock);
1127 return 0;
1128 }
1129
1130 if (!ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1131 return -EINVAL;
1132
1133 c = ip6mr_cache_alloc(net);
1134 if (c == NULL)
1135 return -ENOMEM;
1136
1137 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1138 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1139 c->mf6c_parent = mfc->mf6cc_parent;
1140 ip6mr_update_thresholds(c, ttls);
1141 if (!mrtsock)
1142 c->mfc_flags |= MFC_STATIC;
1143
1144 write_lock_bh(&mrt_lock);
1145 c->next = net->ipv6.mfc6_cache_array[line];
1146 net->ipv6.mfc6_cache_array[line] = c;
1147 write_unlock_bh(&mrt_lock);
1148
1149 /*
1150 * Check to see if we resolved a queued list. If so we
1151 * need to send on the frames and tidy up.
1152 */
1153 spin_lock_bh(&mfc_unres_lock);
1154 for (cp = &mfc_unres_queue; (uc = *cp) != NULL;
1155 cp = &uc->next) {
1156 if (net_eq(mfc6_net(uc), net) &&
1157 ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1158 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1159 *cp = uc->next;
1160 atomic_dec(&net->ipv6.cache_resolve_queue_len);
1161 break;
1162 }
1163 }
1164 if (mfc_unres_queue == NULL)
1165 del_timer(&ipmr_expire_timer);
1166 spin_unlock_bh(&mfc_unres_lock);
1167
1168 if (uc) {
1169 ip6mr_cache_resolve(uc, c);
1170 ip6mr_cache_free(uc);
1171 }
1172 return 0;
1173 }
1174
1175 /*
1176 * Close the multicast socket, and clear the vif tables etc
1177 */
1178
1179 static void mroute_clean_tables(struct net *net)
1180 {
1181 int i;
1182
1183 /*
1184 * Shut down all active vif entries
1185 */
1186 for (i = 0; i < net->ipv6.maxvif; i++) {
1187 if (!(net->ipv6.vif6_table[i].flags & VIFF_STATIC))
1188 mif6_delete(net, i);
1189 }
1190
1191 /*
1192 * Wipe the cache
1193 */
1194 for (i = 0; i < MFC6_LINES; i++) {
1195 struct mfc6_cache *c, **cp;
1196
1197 cp = &net->ipv6.mfc6_cache_array[i];
1198 while ((c = *cp) != NULL) {
1199 if (c->mfc_flags & MFC_STATIC) {
1200 cp = &c->next;
1201 continue;
1202 }
1203 write_lock_bh(&mrt_lock);
1204 *cp = c->next;
1205 write_unlock_bh(&mrt_lock);
1206
1207 ip6mr_cache_free(c);
1208 }
1209 }
1210
1211 if (atomic_read(&net->ipv6.cache_resolve_queue_len) != 0) {
1212 struct mfc6_cache *c, **cp;
1213
1214 spin_lock_bh(&mfc_unres_lock);
1215 cp = &mfc_unres_queue;
1216 while ((c = *cp) != NULL) {
1217 if (!net_eq(mfc6_net(c), net)) {
1218 cp = &c->next;
1219 continue;
1220 }
1221 *cp = c->next;
1222 ip6mr_destroy_unres(c);
1223 }
1224 spin_unlock_bh(&mfc_unres_lock);
1225 }
1226 }
1227
1228 static int ip6mr_sk_init(struct sock *sk)
1229 {
1230 int err = 0;
1231 struct net *net = sock_net(sk);
1232
1233 rtnl_lock();
1234 write_lock_bh(&mrt_lock);
1235 if (likely(net->ipv6.mroute6_sk == NULL)) {
1236 net->ipv6.mroute6_sk = sk;
1237 net->ipv6.devconf_all->mc_forwarding++;
1238 }
1239 else
1240 err = -EADDRINUSE;
1241 write_unlock_bh(&mrt_lock);
1242
1243 rtnl_unlock();
1244
1245 return err;
1246 }
1247
1248 int ip6mr_sk_done(struct sock *sk)
1249 {
1250 int err = 0;
1251 struct net *net = sock_net(sk);
1252
1253 rtnl_lock();
1254 if (sk == net->ipv6.mroute6_sk) {
1255 write_lock_bh(&mrt_lock);
1256 net->ipv6.mroute6_sk = NULL;
1257 net->ipv6.devconf_all->mc_forwarding--;
1258 write_unlock_bh(&mrt_lock);
1259
1260 mroute_clean_tables(net);
1261 } else
1262 err = -EACCES;
1263 rtnl_unlock();
1264
1265 return err;
1266 }
1267
1268 /*
1269 * Socket options and virtual interface manipulation. The whole
1270 * virtual interface system is a complete heap, but unfortunately
1271 * that's how BSD mrouted happens to think. Maybe one day with a proper
1272 * MOSPF/PIM router set up we can clean this up.
1273 */
1274
1275 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, int optlen)
1276 {
1277 int ret;
1278 struct mif6ctl vif;
1279 struct mf6cctl mfc;
1280 mifi_t mifi;
1281 struct net *net = sock_net(sk);
1282
1283 if (optname != MRT6_INIT) {
1284 if (sk != net->ipv6.mroute6_sk && !capable(CAP_NET_ADMIN))
1285 return -EACCES;
1286 }
1287
1288 switch (optname) {
1289 case MRT6_INIT:
1290 if (sk->sk_type != SOCK_RAW ||
1291 inet_sk(sk)->num != IPPROTO_ICMPV6)
1292 return -EOPNOTSUPP;
1293 if (optlen < sizeof(int))
1294 return -EINVAL;
1295
1296 return ip6mr_sk_init(sk);
1297
1298 case MRT6_DONE:
1299 return ip6mr_sk_done(sk);
1300
1301 case MRT6_ADD_MIF:
1302 if (optlen < sizeof(vif))
1303 return -EINVAL;
1304 if (copy_from_user(&vif, optval, sizeof(vif)))
1305 return -EFAULT;
1306 if (vif.mif6c_mifi >= MAXMIFS)
1307 return -ENFILE;
1308 rtnl_lock();
1309 ret = mif6_add(net, &vif, sk == net->ipv6.mroute6_sk);
1310 rtnl_unlock();
1311 return ret;
1312
1313 case MRT6_DEL_MIF:
1314 if (optlen < sizeof(mifi_t))
1315 return -EINVAL;
1316 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1317 return -EFAULT;
1318 rtnl_lock();
1319 ret = mif6_delete(net, mifi);
1320 rtnl_unlock();
1321 return ret;
1322
1323 /*
1324 * Manipulate the forwarding caches. These live
1325 * in a sort of kernel/user symbiosis.
1326 */
1327 case MRT6_ADD_MFC:
1328 case MRT6_DEL_MFC:
1329 if (optlen < sizeof(mfc))
1330 return -EINVAL;
1331 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1332 return -EFAULT;
1333 rtnl_lock();
1334 if (optname == MRT6_DEL_MFC)
1335 ret = ip6mr_mfc_delete(net, &mfc);
1336 else
1337 ret = ip6mr_mfc_add(net, &mfc,
1338 sk == net->ipv6.mroute6_sk);
1339 rtnl_unlock();
1340 return ret;
1341
1342 /*
1343 * Control PIM assert (to activate pim will activate assert)
1344 */
1345 case MRT6_ASSERT:
1346 {
1347 int v;
1348 if (get_user(v, (int __user *)optval))
1349 return -EFAULT;
1350 net->ipv6.mroute_do_assert = !!v;
1351 return 0;
1352 }
1353
1354 #ifdef CONFIG_IPV6_PIMSM_V2
1355 case MRT6_PIM:
1356 {
1357 int v;
1358 if (get_user(v, (int __user *)optval))
1359 return -EFAULT;
1360 v = !!v;
1361 rtnl_lock();
1362 ret = 0;
1363 if (v != net->ipv6.mroute_do_pim) {
1364 net->ipv6.mroute_do_pim = v;
1365 net->ipv6.mroute_do_assert = v;
1366 if (net->ipv6.mroute_do_pim)
1367 ret = inet6_add_protocol(&pim6_protocol,
1368 IPPROTO_PIM);
1369 else
1370 ret = inet6_del_protocol(&pim6_protocol,
1371 IPPROTO_PIM);
1372 if (ret < 0)
1373 ret = -EAGAIN;
1374 }
1375 rtnl_unlock();
1376 return ret;
1377 }
1378
1379 #endif
1380 /*
1381 * Spurious command, or MRT6_VERSION which you cannot
1382 * set.
1383 */
1384 default:
1385 return -ENOPROTOOPT;
1386 }
1387 }
1388
1389 /*
1390 * Getsock opt support for the multicast routing system.
1391 */
1392
1393 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1394 int __user *optlen)
1395 {
1396 int olr;
1397 int val;
1398 struct net *net = sock_net(sk);
1399
1400 switch (optname) {
1401 case MRT6_VERSION:
1402 val = 0x0305;
1403 break;
1404 #ifdef CONFIG_IPV6_PIMSM_V2
1405 case MRT6_PIM:
1406 val = net->ipv6.mroute_do_pim;
1407 break;
1408 #endif
1409 case MRT6_ASSERT:
1410 val = net->ipv6.mroute_do_assert;
1411 break;
1412 default:
1413 return -ENOPROTOOPT;
1414 }
1415
1416 if (get_user(olr, optlen))
1417 return -EFAULT;
1418
1419 olr = min_t(int, olr, sizeof(int));
1420 if (olr < 0)
1421 return -EINVAL;
1422
1423 if (put_user(olr, optlen))
1424 return -EFAULT;
1425 if (copy_to_user(optval, &val, olr))
1426 return -EFAULT;
1427 return 0;
1428 }
1429
1430 /*
1431 * The IP multicast ioctl support routines.
1432 */
1433
1434 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1435 {
1436 struct sioc_sg_req6 sr;
1437 struct sioc_mif_req6 vr;
1438 struct mif_device *vif;
1439 struct mfc6_cache *c;
1440 struct net *net = sock_net(sk);
1441
1442 switch (cmd) {
1443 case SIOCGETMIFCNT_IN6:
1444 if (copy_from_user(&vr, arg, sizeof(vr)))
1445 return -EFAULT;
1446 if (vr.mifi >= net->ipv6.maxvif)
1447 return -EINVAL;
1448 read_lock(&mrt_lock);
1449 vif = &net->ipv6.vif6_table[vr.mifi];
1450 if (MIF_EXISTS(net, vr.mifi)) {
1451 vr.icount = vif->pkt_in;
1452 vr.ocount = vif->pkt_out;
1453 vr.ibytes = vif->bytes_in;
1454 vr.obytes = vif->bytes_out;
1455 read_unlock(&mrt_lock);
1456
1457 if (copy_to_user(arg, &vr, sizeof(vr)))
1458 return -EFAULT;
1459 return 0;
1460 }
1461 read_unlock(&mrt_lock);
1462 return -EADDRNOTAVAIL;
1463 case SIOCGETSGCNT_IN6:
1464 if (copy_from_user(&sr, arg, sizeof(sr)))
1465 return -EFAULT;
1466
1467 read_lock(&mrt_lock);
1468 c = ip6mr_cache_find(net, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1469 if (c) {
1470 sr.pktcnt = c->mfc_un.res.pkt;
1471 sr.bytecnt = c->mfc_un.res.bytes;
1472 sr.wrong_if = c->mfc_un.res.wrong_if;
1473 read_unlock(&mrt_lock);
1474
1475 if (copy_to_user(arg, &sr, sizeof(sr)))
1476 return -EFAULT;
1477 return 0;
1478 }
1479 read_unlock(&mrt_lock);
1480 return -EADDRNOTAVAIL;
1481 default:
1482 return -ENOIOCTLCMD;
1483 }
1484 }
1485
1486
1487 static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1488 {
1489 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1490 IPSTATS_MIB_OUTFORWDATAGRAMS);
1491 return dst_output(skb);
1492 }
1493
1494 /*
1495 * Processing handlers for ip6mr_forward
1496 */
1497
1498 static int ip6mr_forward2(struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1499 {
1500 struct ipv6hdr *ipv6h;
1501 struct net *net = mfc6_net(c);
1502 struct mif_device *vif = &net->ipv6.vif6_table[vifi];
1503 struct net_device *dev;
1504 struct dst_entry *dst;
1505 struct flowi fl;
1506
1507 if (vif->dev == NULL)
1508 goto out_free;
1509
1510 #ifdef CONFIG_IPV6_PIMSM_V2
1511 if (vif->flags & MIFF_REGISTER) {
1512 vif->pkt_out++;
1513 vif->bytes_out += skb->len;
1514 vif->dev->stats.tx_bytes += skb->len;
1515 vif->dev->stats.tx_packets++;
1516 ip6mr_cache_report(net, skb, vifi, MRT6MSG_WHOLEPKT);
1517 goto out_free;
1518 }
1519 #endif
1520
1521 ipv6h = ipv6_hdr(skb);
1522
1523 fl = (struct flowi) {
1524 .oif = vif->link,
1525 .nl_u = { .ip6_u =
1526 { .daddr = ipv6h->daddr, }
1527 }
1528 };
1529
1530 dst = ip6_route_output(net, NULL, &fl);
1531 if (!dst)
1532 goto out_free;
1533
1534 skb_dst_drop(skb);
1535 skb_dst_set(skb, dst);
1536
1537 /*
1538 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1539 * not only before forwarding, but after forwarding on all output
1540 * interfaces. It is clear, if mrouter runs a multicasting
1541 * program, it should receive packets not depending to what interface
1542 * program is joined.
1543 * If we will not make it, the program will have to join on all
1544 * interfaces. On the other hand, multihoming host (or router, but
1545 * not mrouter) cannot join to more than one interface - it will
1546 * result in receiving multiple packets.
1547 */
1548 dev = vif->dev;
1549 skb->dev = dev;
1550 vif->pkt_out++;
1551 vif->bytes_out += skb->len;
1552
1553 /* We are about to write */
1554 /* XXX: extension headers? */
1555 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
1556 goto out_free;
1557
1558 ipv6h = ipv6_hdr(skb);
1559 ipv6h->hop_limit--;
1560
1561 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
1562
1563 return NF_HOOK(PF_INET6, NF_INET_FORWARD, skb, skb->dev, dev,
1564 ip6mr_forward2_finish);
1565
1566 out_free:
1567 kfree_skb(skb);
1568 return 0;
1569 }
1570
1571 static int ip6mr_find_vif(struct net_device *dev)
1572 {
1573 struct net *net = dev_net(dev);
1574 int ct;
1575 for (ct = net->ipv6.maxvif - 1; ct >= 0; ct--) {
1576 if (net->ipv6.vif6_table[ct].dev == dev)
1577 break;
1578 }
1579 return ct;
1580 }
1581
1582 static int ip6_mr_forward(struct sk_buff *skb, struct mfc6_cache *cache)
1583 {
1584 int psend = -1;
1585 int vif, ct;
1586 struct net *net = mfc6_net(cache);
1587
1588 vif = cache->mf6c_parent;
1589 cache->mfc_un.res.pkt++;
1590 cache->mfc_un.res.bytes += skb->len;
1591
1592 /*
1593 * Wrong interface: drop packet and (maybe) send PIM assert.
1594 */
1595 if (net->ipv6.vif6_table[vif].dev != skb->dev) {
1596 int true_vifi;
1597
1598 cache->mfc_un.res.wrong_if++;
1599 true_vifi = ip6mr_find_vif(skb->dev);
1600
1601 if (true_vifi >= 0 && net->ipv6.mroute_do_assert &&
1602 /* pimsm uses asserts, when switching from RPT to SPT,
1603 so that we cannot check that packet arrived on an oif.
1604 It is bad, but otherwise we would need to move pretty
1605 large chunk of pimd to kernel. Ough... --ANK
1606 */
1607 (net->ipv6.mroute_do_pim ||
1608 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1609 time_after(jiffies,
1610 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1611 cache->mfc_un.res.last_assert = jiffies;
1612 ip6mr_cache_report(net, skb, true_vifi, MRT6MSG_WRONGMIF);
1613 }
1614 goto dont_forward;
1615 }
1616
1617 net->ipv6.vif6_table[vif].pkt_in++;
1618 net->ipv6.vif6_table[vif].bytes_in += skb->len;
1619
1620 /*
1621 * Forward the frame
1622 */
1623 for (ct = cache->mfc_un.res.maxvif - 1; ct >= cache->mfc_un.res.minvif; ct--) {
1624 if (ipv6_hdr(skb)->hop_limit > cache->mfc_un.res.ttls[ct]) {
1625 if (psend != -1) {
1626 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1627 if (skb2)
1628 ip6mr_forward2(skb2, cache, psend);
1629 }
1630 psend = ct;
1631 }
1632 }
1633 if (psend != -1) {
1634 ip6mr_forward2(skb, cache, psend);
1635 return 0;
1636 }
1637
1638 dont_forward:
1639 kfree_skb(skb);
1640 return 0;
1641 }
1642
1643
1644 /*
1645 * Multicast packets for forwarding arrive here
1646 */
1647
1648 int ip6_mr_input(struct sk_buff *skb)
1649 {
1650 struct mfc6_cache *cache;
1651 struct net *net = dev_net(skb->dev);
1652
1653 read_lock(&mrt_lock);
1654 cache = ip6mr_cache_find(net,
1655 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
1656
1657 /*
1658 * No usable cache entry
1659 */
1660 if (cache == NULL) {
1661 int vif;
1662
1663 vif = ip6mr_find_vif(skb->dev);
1664 if (vif >= 0) {
1665 int err = ip6mr_cache_unresolved(net, vif, skb);
1666 read_unlock(&mrt_lock);
1667
1668 return err;
1669 }
1670 read_unlock(&mrt_lock);
1671 kfree_skb(skb);
1672 return -ENODEV;
1673 }
1674
1675 ip6_mr_forward(skb, cache);
1676
1677 read_unlock(&mrt_lock);
1678
1679 return 0;
1680 }
1681
1682
1683 static int
1684 ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
1685 {
1686 int ct;
1687 struct rtnexthop *nhp;
1688 struct net *net = mfc6_net(c);
1689 struct net_device *dev = net->ipv6.vif6_table[c->mf6c_parent].dev;
1690 u8 *b = skb_tail_pointer(skb);
1691 struct rtattr *mp_head;
1692
1693 if (dev)
1694 RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
1695
1696 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1697
1698 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1699 if (c->mfc_un.res.ttls[ct] < 255) {
1700 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1701 goto rtattr_failure;
1702 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1703 nhp->rtnh_flags = 0;
1704 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1705 nhp->rtnh_ifindex = net->ipv6.vif6_table[ct].dev->ifindex;
1706 nhp->rtnh_len = sizeof(*nhp);
1707 }
1708 }
1709 mp_head->rta_type = RTA_MULTIPATH;
1710 mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
1711 rtm->rtm_type = RTN_MULTICAST;
1712 return 1;
1713
1714 rtattr_failure:
1715 nlmsg_trim(skb, b);
1716 return -EMSGSIZE;
1717 }
1718
1719 int ip6mr_get_route(struct net *net,
1720 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1721 {
1722 int err;
1723 struct mfc6_cache *cache;
1724 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
1725
1726 read_lock(&mrt_lock);
1727 cache = ip6mr_cache_find(net, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
1728
1729 if (!cache) {
1730 struct sk_buff *skb2;
1731 struct ipv6hdr *iph;
1732 struct net_device *dev;
1733 int vif;
1734
1735 if (nowait) {
1736 read_unlock(&mrt_lock);
1737 return -EAGAIN;
1738 }
1739
1740 dev = skb->dev;
1741 if (dev == NULL || (vif = ip6mr_find_vif(dev)) < 0) {
1742 read_unlock(&mrt_lock);
1743 return -ENODEV;
1744 }
1745
1746 /* really correct? */
1747 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
1748 if (!skb2) {
1749 read_unlock(&mrt_lock);
1750 return -ENOMEM;
1751 }
1752
1753 skb_reset_transport_header(skb2);
1754
1755 skb_put(skb2, sizeof(struct ipv6hdr));
1756 skb_reset_network_header(skb2);
1757
1758 iph = ipv6_hdr(skb2);
1759 iph->version = 0;
1760 iph->priority = 0;
1761 iph->flow_lbl[0] = 0;
1762 iph->flow_lbl[1] = 0;
1763 iph->flow_lbl[2] = 0;
1764 iph->payload_len = 0;
1765 iph->nexthdr = IPPROTO_NONE;
1766 iph->hop_limit = 0;
1767 ipv6_addr_copy(&iph->saddr, &rt->rt6i_src.addr);
1768 ipv6_addr_copy(&iph->daddr, &rt->rt6i_dst.addr);
1769
1770 err = ip6mr_cache_unresolved(net, vif, skb2);
1771 read_unlock(&mrt_lock);
1772
1773 return err;
1774 }
1775
1776 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1777 cache->mfc_flags |= MFC_NOTIFY;
1778
1779 err = ip6mr_fill_mroute(skb, cache, rtm);
1780 read_unlock(&mrt_lock);
1781 return err;
1782 }
1783