]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - net/ipv4/fib_frontend.c
netlink: add netlink_kernel_cfg parameter to netlink_kernel_create
[mirror_ubuntu-zesty-kernel.git] / net / ipv4 / fib_frontend.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IPv4 Forwarding Information Base: FIB frontend.
7 *
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #include <linux/module.h>
17 #include <asm/uaccess.h>
18 #include <linux/bitops.h>
19 #include <linux/capability.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/mm.h>
23 #include <linux/string.h>
24 #include <linux/socket.h>
25 #include <linux/sockios.h>
26 #include <linux/errno.h>
27 #include <linux/in.h>
28 #include <linux/inet.h>
29 #include <linux/inetdevice.h>
30 #include <linux/netdevice.h>
31 #include <linux/if_addr.h>
32 #include <linux/if_arp.h>
33 #include <linux/skbuff.h>
34 #include <linux/cache.h>
35 #include <linux/init.h>
36 #include <linux/list.h>
37 #include <linux/slab.h>
38
39 #include <net/ip.h>
40 #include <net/protocol.h>
41 #include <net/route.h>
42 #include <net/tcp.h>
43 #include <net/sock.h>
44 #include <net/arp.h>
45 #include <net/ip_fib.h>
46 #include <net/rtnetlink.h>
47 #include <net/xfrm.h>
48
49 #ifndef CONFIG_IP_MULTIPLE_TABLES
50
51 static int __net_init fib4_rules_init(struct net *net)
52 {
53 struct fib_table *local_table, *main_table;
54
55 local_table = fib_trie_table(RT_TABLE_LOCAL);
56 if (local_table == NULL)
57 return -ENOMEM;
58
59 main_table = fib_trie_table(RT_TABLE_MAIN);
60 if (main_table == NULL)
61 goto fail;
62
63 hlist_add_head_rcu(&local_table->tb_hlist,
64 &net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]);
65 hlist_add_head_rcu(&main_table->tb_hlist,
66 &net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]);
67 return 0;
68
69 fail:
70 kfree(local_table);
71 return -ENOMEM;
72 }
73 #else
74
75 struct fib_table *fib_new_table(struct net *net, u32 id)
76 {
77 struct fib_table *tb;
78 unsigned int h;
79
80 if (id == 0)
81 id = RT_TABLE_MAIN;
82 tb = fib_get_table(net, id);
83 if (tb)
84 return tb;
85
86 tb = fib_trie_table(id);
87 if (!tb)
88 return NULL;
89 h = id & (FIB_TABLE_HASHSZ - 1);
90 hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
91 return tb;
92 }
93
94 struct fib_table *fib_get_table(struct net *net, u32 id)
95 {
96 struct fib_table *tb;
97 struct hlist_node *node;
98 struct hlist_head *head;
99 unsigned int h;
100
101 if (id == 0)
102 id = RT_TABLE_MAIN;
103 h = id & (FIB_TABLE_HASHSZ - 1);
104
105 rcu_read_lock();
106 head = &net->ipv4.fib_table_hash[h];
107 hlist_for_each_entry_rcu(tb, node, head, tb_hlist) {
108 if (tb->tb_id == id) {
109 rcu_read_unlock();
110 return tb;
111 }
112 }
113 rcu_read_unlock();
114 return NULL;
115 }
116 #endif /* CONFIG_IP_MULTIPLE_TABLES */
117
118 static void fib_flush(struct net *net)
119 {
120 int flushed = 0;
121 struct fib_table *tb;
122 struct hlist_node *node;
123 struct hlist_head *head;
124 unsigned int h;
125
126 for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
127 head = &net->ipv4.fib_table_hash[h];
128 hlist_for_each_entry(tb, node, head, tb_hlist)
129 flushed += fib_table_flush(tb);
130 }
131
132 if (flushed)
133 rt_cache_flush(net, -1);
134 }
135
136 /*
137 * Find address type as if only "dev" was present in the system. If
138 * on_dev is NULL then all interfaces are taken into consideration.
139 */
140 static inline unsigned int __inet_dev_addr_type(struct net *net,
141 const struct net_device *dev,
142 __be32 addr)
143 {
144 struct flowi4 fl4 = { .daddr = addr };
145 struct fib_result res;
146 unsigned int ret = RTN_BROADCAST;
147 struct fib_table *local_table;
148
149 if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
150 return RTN_BROADCAST;
151 if (ipv4_is_multicast(addr))
152 return RTN_MULTICAST;
153
154 #ifdef CONFIG_IP_MULTIPLE_TABLES
155 res.r = NULL;
156 #endif
157
158 local_table = fib_get_table(net, RT_TABLE_LOCAL);
159 if (local_table) {
160 ret = RTN_UNICAST;
161 rcu_read_lock();
162 if (!fib_table_lookup(local_table, &fl4, &res, FIB_LOOKUP_NOREF)) {
163 if (!dev || dev == res.fi->fib_dev)
164 ret = res.type;
165 }
166 rcu_read_unlock();
167 }
168 return ret;
169 }
170
171 unsigned int inet_addr_type(struct net *net, __be32 addr)
172 {
173 return __inet_dev_addr_type(net, NULL, addr);
174 }
175 EXPORT_SYMBOL(inet_addr_type);
176
177 unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
178 __be32 addr)
179 {
180 return __inet_dev_addr_type(net, dev, addr);
181 }
182 EXPORT_SYMBOL(inet_dev_addr_type);
183
184 __be32 fib_compute_spec_dst(struct sk_buff *skb)
185 {
186 struct net_device *dev = skb->dev;
187 struct in_device *in_dev;
188 struct fib_result res;
189 struct rtable *rt;
190 struct flowi4 fl4;
191 struct net *net;
192 int scope;
193
194 rt = skb_rtable(skb);
195 if (!(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)))
196 return ip_hdr(skb)->daddr;
197
198 in_dev = __in_dev_get_rcu(dev);
199 BUG_ON(!in_dev);
200
201 net = dev_net(dev);
202
203 scope = RT_SCOPE_UNIVERSE;
204 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
205 fl4.flowi4_oif = 0;
206 fl4.flowi4_iif = net->loopback_dev->ifindex;
207 fl4.daddr = ip_hdr(skb)->saddr;
208 fl4.saddr = 0;
209 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
210 fl4.flowi4_scope = scope;
211 fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
212 if (!fib_lookup(net, &fl4, &res))
213 return FIB_RES_PREFSRC(net, res);
214 } else {
215 scope = RT_SCOPE_LINK;
216 }
217
218 return inet_select_addr(dev, ip_hdr(skb)->saddr, scope);
219 }
220
221 #ifdef CONFIG_IP_ROUTE_CLASSID
222 int fib_num_tclassid_users __read_mostly;
223 #endif
224
225 /* Given (packet source, input interface) and optional (dst, oif, tos):
226 * - (main) check, that source is valid i.e. not broadcast or our local
227 * address.
228 * - figure out what "logical" interface this packet arrived
229 * and calculate "specific destination" address.
230 * - check, that packet arrived from expected physical interface.
231 * called with rcu_read_lock()
232 */
233 static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
234 u8 tos, int oif, struct net_device *dev,
235 int rpf, struct in_device *idev, u32 *itag)
236 {
237 int ret, no_addr, accept_local;
238 struct fib_result res;
239 struct flowi4 fl4;
240 struct net *net;
241 bool dev_match;
242
243 fl4.flowi4_oif = 0;
244 fl4.flowi4_iif = oif;
245 fl4.daddr = src;
246 fl4.saddr = dst;
247 fl4.flowi4_tos = tos;
248 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
249
250 no_addr = accept_local = 0;
251 no_addr = idev->ifa_list == NULL;
252
253 accept_local = IN_DEV_ACCEPT_LOCAL(idev);
254 fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
255
256 net = dev_net(dev);
257 if (fib_lookup(net, &fl4, &res))
258 goto last_resort;
259 if (res.type != RTN_UNICAST) {
260 if (res.type != RTN_LOCAL || !accept_local)
261 goto e_inval;
262 }
263 fib_combine_itag(itag, &res);
264 dev_match = false;
265
266 #ifdef CONFIG_IP_ROUTE_MULTIPATH
267 for (ret = 0; ret < res.fi->fib_nhs; ret++) {
268 struct fib_nh *nh = &res.fi->fib_nh[ret];
269
270 if (nh->nh_dev == dev) {
271 dev_match = true;
272 break;
273 }
274 }
275 #else
276 if (FIB_RES_DEV(res) == dev)
277 dev_match = true;
278 #endif
279 if (dev_match) {
280 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
281 return ret;
282 }
283 if (no_addr)
284 goto last_resort;
285 if (rpf == 1)
286 goto e_rpf;
287 fl4.flowi4_oif = dev->ifindex;
288
289 ret = 0;
290 if (fib_lookup(net, &fl4, &res) == 0) {
291 if (res.type == RTN_UNICAST)
292 ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
293 }
294 return ret;
295
296 last_resort:
297 if (rpf)
298 goto e_rpf;
299 *itag = 0;
300 return 0;
301
302 e_inval:
303 return -EINVAL;
304 e_rpf:
305 return -EXDEV;
306 }
307
308 /* Ignore rp_filter for packets protected by IPsec. */
309 int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
310 u8 tos, int oif, struct net_device *dev,
311 struct in_device *idev, u32 *itag)
312 {
313 int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
314
315 if (!r && !fib_num_tclassid_users) {
316 *itag = 0;
317 return 0;
318 }
319 return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag);
320 }
321
322 static inline __be32 sk_extract_addr(struct sockaddr *addr)
323 {
324 return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
325 }
326
327 static int put_rtax(struct nlattr *mx, int len, int type, u32 value)
328 {
329 struct nlattr *nla;
330
331 nla = (struct nlattr *) ((char *) mx + len);
332 nla->nla_type = type;
333 nla->nla_len = nla_attr_size(4);
334 *(u32 *) nla_data(nla) = value;
335
336 return len + nla_total_size(4);
337 }
338
339 static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
340 struct fib_config *cfg)
341 {
342 __be32 addr;
343 int plen;
344
345 memset(cfg, 0, sizeof(*cfg));
346 cfg->fc_nlinfo.nl_net = net;
347
348 if (rt->rt_dst.sa_family != AF_INET)
349 return -EAFNOSUPPORT;
350
351 /*
352 * Check mask for validity:
353 * a) it must be contiguous.
354 * b) destination must have all host bits clear.
355 * c) if application forgot to set correct family (AF_INET),
356 * reject request unless it is absolutely clear i.e.
357 * both family and mask are zero.
358 */
359 plen = 32;
360 addr = sk_extract_addr(&rt->rt_dst);
361 if (!(rt->rt_flags & RTF_HOST)) {
362 __be32 mask = sk_extract_addr(&rt->rt_genmask);
363
364 if (rt->rt_genmask.sa_family != AF_INET) {
365 if (mask || rt->rt_genmask.sa_family)
366 return -EAFNOSUPPORT;
367 }
368
369 if (bad_mask(mask, addr))
370 return -EINVAL;
371
372 plen = inet_mask_len(mask);
373 }
374
375 cfg->fc_dst_len = plen;
376 cfg->fc_dst = addr;
377
378 if (cmd != SIOCDELRT) {
379 cfg->fc_nlflags = NLM_F_CREATE;
380 cfg->fc_protocol = RTPROT_BOOT;
381 }
382
383 if (rt->rt_metric)
384 cfg->fc_priority = rt->rt_metric - 1;
385
386 if (rt->rt_flags & RTF_REJECT) {
387 cfg->fc_scope = RT_SCOPE_HOST;
388 cfg->fc_type = RTN_UNREACHABLE;
389 return 0;
390 }
391
392 cfg->fc_scope = RT_SCOPE_NOWHERE;
393 cfg->fc_type = RTN_UNICAST;
394
395 if (rt->rt_dev) {
396 char *colon;
397 struct net_device *dev;
398 char devname[IFNAMSIZ];
399
400 if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1))
401 return -EFAULT;
402
403 devname[IFNAMSIZ-1] = 0;
404 colon = strchr(devname, ':');
405 if (colon)
406 *colon = 0;
407 dev = __dev_get_by_name(net, devname);
408 if (!dev)
409 return -ENODEV;
410 cfg->fc_oif = dev->ifindex;
411 if (colon) {
412 struct in_ifaddr *ifa;
413 struct in_device *in_dev = __in_dev_get_rtnl(dev);
414 if (!in_dev)
415 return -ENODEV;
416 *colon = ':';
417 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
418 if (strcmp(ifa->ifa_label, devname) == 0)
419 break;
420 if (ifa == NULL)
421 return -ENODEV;
422 cfg->fc_prefsrc = ifa->ifa_local;
423 }
424 }
425
426 addr = sk_extract_addr(&rt->rt_gateway);
427 if (rt->rt_gateway.sa_family == AF_INET && addr) {
428 cfg->fc_gw = addr;
429 if (rt->rt_flags & RTF_GATEWAY &&
430 inet_addr_type(net, addr) == RTN_UNICAST)
431 cfg->fc_scope = RT_SCOPE_UNIVERSE;
432 }
433
434 if (cmd == SIOCDELRT)
435 return 0;
436
437 if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw)
438 return -EINVAL;
439
440 if (cfg->fc_scope == RT_SCOPE_NOWHERE)
441 cfg->fc_scope = RT_SCOPE_LINK;
442
443 if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) {
444 struct nlattr *mx;
445 int len = 0;
446
447 mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
448 if (mx == NULL)
449 return -ENOMEM;
450
451 if (rt->rt_flags & RTF_MTU)
452 len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40);
453
454 if (rt->rt_flags & RTF_WINDOW)
455 len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window);
456
457 if (rt->rt_flags & RTF_IRTT)
458 len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3);
459
460 cfg->fc_mx = mx;
461 cfg->fc_mx_len = len;
462 }
463
464 return 0;
465 }
466
467 /*
468 * Handle IP routing ioctl calls.
469 * These are used to manipulate the routing tables
470 */
471 int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
472 {
473 struct fib_config cfg;
474 struct rtentry rt;
475 int err;
476
477 switch (cmd) {
478 case SIOCADDRT: /* Add a route */
479 case SIOCDELRT: /* Delete a route */
480 if (!capable(CAP_NET_ADMIN))
481 return -EPERM;
482
483 if (copy_from_user(&rt, arg, sizeof(rt)))
484 return -EFAULT;
485
486 rtnl_lock();
487 err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
488 if (err == 0) {
489 struct fib_table *tb;
490
491 if (cmd == SIOCDELRT) {
492 tb = fib_get_table(net, cfg.fc_table);
493 if (tb)
494 err = fib_table_delete(tb, &cfg);
495 else
496 err = -ESRCH;
497 } else {
498 tb = fib_new_table(net, cfg.fc_table);
499 if (tb)
500 err = fib_table_insert(tb, &cfg);
501 else
502 err = -ENOBUFS;
503 }
504
505 /* allocated by rtentry_to_fib_config() */
506 kfree(cfg.fc_mx);
507 }
508 rtnl_unlock();
509 return err;
510 }
511 return -EINVAL;
512 }
513
514 const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
515 [RTA_DST] = { .type = NLA_U32 },
516 [RTA_SRC] = { .type = NLA_U32 },
517 [RTA_IIF] = { .type = NLA_U32 },
518 [RTA_OIF] = { .type = NLA_U32 },
519 [RTA_GATEWAY] = { .type = NLA_U32 },
520 [RTA_PRIORITY] = { .type = NLA_U32 },
521 [RTA_PREFSRC] = { .type = NLA_U32 },
522 [RTA_METRICS] = { .type = NLA_NESTED },
523 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
524 [RTA_FLOW] = { .type = NLA_U32 },
525 };
526
527 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
528 struct nlmsghdr *nlh, struct fib_config *cfg)
529 {
530 struct nlattr *attr;
531 int err, remaining;
532 struct rtmsg *rtm;
533
534 err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy);
535 if (err < 0)
536 goto errout;
537
538 memset(cfg, 0, sizeof(*cfg));
539
540 rtm = nlmsg_data(nlh);
541 cfg->fc_dst_len = rtm->rtm_dst_len;
542 cfg->fc_tos = rtm->rtm_tos;
543 cfg->fc_table = rtm->rtm_table;
544 cfg->fc_protocol = rtm->rtm_protocol;
545 cfg->fc_scope = rtm->rtm_scope;
546 cfg->fc_type = rtm->rtm_type;
547 cfg->fc_flags = rtm->rtm_flags;
548 cfg->fc_nlflags = nlh->nlmsg_flags;
549
550 cfg->fc_nlinfo.pid = NETLINK_CB(skb).pid;
551 cfg->fc_nlinfo.nlh = nlh;
552 cfg->fc_nlinfo.nl_net = net;
553
554 if (cfg->fc_type > RTN_MAX) {
555 err = -EINVAL;
556 goto errout;
557 }
558
559 nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) {
560 switch (nla_type(attr)) {
561 case RTA_DST:
562 cfg->fc_dst = nla_get_be32(attr);
563 break;
564 case RTA_OIF:
565 cfg->fc_oif = nla_get_u32(attr);
566 break;
567 case RTA_GATEWAY:
568 cfg->fc_gw = nla_get_be32(attr);
569 break;
570 case RTA_PRIORITY:
571 cfg->fc_priority = nla_get_u32(attr);
572 break;
573 case RTA_PREFSRC:
574 cfg->fc_prefsrc = nla_get_be32(attr);
575 break;
576 case RTA_METRICS:
577 cfg->fc_mx = nla_data(attr);
578 cfg->fc_mx_len = nla_len(attr);
579 break;
580 case RTA_MULTIPATH:
581 cfg->fc_mp = nla_data(attr);
582 cfg->fc_mp_len = nla_len(attr);
583 break;
584 case RTA_FLOW:
585 cfg->fc_flow = nla_get_u32(attr);
586 break;
587 case RTA_TABLE:
588 cfg->fc_table = nla_get_u32(attr);
589 break;
590 }
591 }
592
593 return 0;
594 errout:
595 return err;
596 }
597
598 static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
599 {
600 struct net *net = sock_net(skb->sk);
601 struct fib_config cfg;
602 struct fib_table *tb;
603 int err;
604
605 err = rtm_to_fib_config(net, skb, nlh, &cfg);
606 if (err < 0)
607 goto errout;
608
609 tb = fib_get_table(net, cfg.fc_table);
610 if (tb == NULL) {
611 err = -ESRCH;
612 goto errout;
613 }
614
615 err = fib_table_delete(tb, &cfg);
616 errout:
617 return err;
618 }
619
620 static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
621 {
622 struct net *net = sock_net(skb->sk);
623 struct fib_config cfg;
624 struct fib_table *tb;
625 int err;
626
627 err = rtm_to_fib_config(net, skb, nlh, &cfg);
628 if (err < 0)
629 goto errout;
630
631 tb = fib_new_table(net, cfg.fc_table);
632 if (tb == NULL) {
633 err = -ENOBUFS;
634 goto errout;
635 }
636
637 err = fib_table_insert(tb, &cfg);
638 errout:
639 return err;
640 }
641
642 static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
643 {
644 struct net *net = sock_net(skb->sk);
645 unsigned int h, s_h;
646 unsigned int e = 0, s_e;
647 struct fib_table *tb;
648 struct hlist_node *node;
649 struct hlist_head *head;
650 int dumped = 0;
651
652 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
653 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
654 return ip_rt_dump(skb, cb);
655
656 s_h = cb->args[0];
657 s_e = cb->args[1];
658
659 for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
660 e = 0;
661 head = &net->ipv4.fib_table_hash[h];
662 hlist_for_each_entry(tb, node, head, tb_hlist) {
663 if (e < s_e)
664 goto next;
665 if (dumped)
666 memset(&cb->args[2], 0, sizeof(cb->args) -
667 2 * sizeof(cb->args[0]));
668 if (fib_table_dump(tb, skb, cb) < 0)
669 goto out;
670 dumped = 1;
671 next:
672 e++;
673 }
674 }
675 out:
676 cb->args[1] = e;
677 cb->args[0] = h;
678
679 return skb->len;
680 }
681
682 /* Prepare and feed intra-kernel routing request.
683 * Really, it should be netlink message, but :-( netlink
684 * can be not configured, so that we feed it directly
685 * to fib engine. It is legal, because all events occur
686 * only when netlink is already locked.
687 */
688 static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
689 {
690 struct net *net = dev_net(ifa->ifa_dev->dev);
691 struct fib_table *tb;
692 struct fib_config cfg = {
693 .fc_protocol = RTPROT_KERNEL,
694 .fc_type = type,
695 .fc_dst = dst,
696 .fc_dst_len = dst_len,
697 .fc_prefsrc = ifa->ifa_local,
698 .fc_oif = ifa->ifa_dev->dev->ifindex,
699 .fc_nlflags = NLM_F_CREATE | NLM_F_APPEND,
700 .fc_nlinfo = {
701 .nl_net = net,
702 },
703 };
704
705 if (type == RTN_UNICAST)
706 tb = fib_new_table(net, RT_TABLE_MAIN);
707 else
708 tb = fib_new_table(net, RT_TABLE_LOCAL);
709
710 if (tb == NULL)
711 return;
712
713 cfg.fc_table = tb->tb_id;
714
715 if (type != RTN_LOCAL)
716 cfg.fc_scope = RT_SCOPE_LINK;
717 else
718 cfg.fc_scope = RT_SCOPE_HOST;
719
720 if (cmd == RTM_NEWROUTE)
721 fib_table_insert(tb, &cfg);
722 else
723 fib_table_delete(tb, &cfg);
724 }
725
726 void fib_add_ifaddr(struct in_ifaddr *ifa)
727 {
728 struct in_device *in_dev = ifa->ifa_dev;
729 struct net_device *dev = in_dev->dev;
730 struct in_ifaddr *prim = ifa;
731 __be32 mask = ifa->ifa_mask;
732 __be32 addr = ifa->ifa_local;
733 __be32 prefix = ifa->ifa_address & mask;
734
735 if (ifa->ifa_flags & IFA_F_SECONDARY) {
736 prim = inet_ifa_byprefix(in_dev, prefix, mask);
737 if (prim == NULL) {
738 pr_warn("%s: bug: prim == NULL\n", __func__);
739 return;
740 }
741 }
742
743 fib_magic(RTM_NEWROUTE, RTN_LOCAL, addr, 32, prim);
744
745 if (!(dev->flags & IFF_UP))
746 return;
747
748 /* Add broadcast address, if it is explicitly assigned. */
749 if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF))
750 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
751
752 if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
753 (prefix != addr || ifa->ifa_prefixlen < 32)) {
754 fib_magic(RTM_NEWROUTE,
755 dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
756 prefix, ifa->ifa_prefixlen, prim);
757
758 /* Add network specific broadcasts, when it takes a sense */
759 if (ifa->ifa_prefixlen < 31) {
760 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, prim);
761 fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask,
762 32, prim);
763 }
764 }
765 }
766
767 /* Delete primary or secondary address.
768 * Optionally, on secondary address promotion consider the addresses
769 * from subnet iprim as deleted, even if they are in device list.
770 * In this case the secondary ifa can be in device list.
771 */
772 void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
773 {
774 struct in_device *in_dev = ifa->ifa_dev;
775 struct net_device *dev = in_dev->dev;
776 struct in_ifaddr *ifa1;
777 struct in_ifaddr *prim = ifa, *prim1 = NULL;
778 __be32 brd = ifa->ifa_address | ~ifa->ifa_mask;
779 __be32 any = ifa->ifa_address & ifa->ifa_mask;
780 #define LOCAL_OK 1
781 #define BRD_OK 2
782 #define BRD0_OK 4
783 #define BRD1_OK 8
784 unsigned int ok = 0;
785 int subnet = 0; /* Primary network */
786 int gone = 1; /* Address is missing */
787 int same_prefsrc = 0; /* Another primary with same IP */
788
789 if (ifa->ifa_flags & IFA_F_SECONDARY) {
790 prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
791 if (prim == NULL) {
792 pr_warn("%s: bug: prim == NULL\n", __func__);
793 return;
794 }
795 if (iprim && iprim != prim) {
796 pr_warn("%s: bug: iprim != prim\n", __func__);
797 return;
798 }
799 } else if (!ipv4_is_zeronet(any) &&
800 (any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) {
801 fib_magic(RTM_DELROUTE,
802 dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
803 any, ifa->ifa_prefixlen, prim);
804 subnet = 1;
805 }
806
807 /* Deletion is more complicated than add.
808 * We should take care of not to delete too much :-)
809 *
810 * Scan address list to be sure that addresses are really gone.
811 */
812
813 for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
814 if (ifa1 == ifa) {
815 /* promotion, keep the IP */
816 gone = 0;
817 continue;
818 }
819 /* Ignore IFAs from our subnet */
820 if (iprim && ifa1->ifa_mask == iprim->ifa_mask &&
821 inet_ifa_match(ifa1->ifa_address, iprim))
822 continue;
823
824 /* Ignore ifa1 if it uses different primary IP (prefsrc) */
825 if (ifa1->ifa_flags & IFA_F_SECONDARY) {
826 /* Another address from our subnet? */
827 if (ifa1->ifa_mask == prim->ifa_mask &&
828 inet_ifa_match(ifa1->ifa_address, prim))
829 prim1 = prim;
830 else {
831 /* We reached the secondaries, so
832 * same_prefsrc should be determined.
833 */
834 if (!same_prefsrc)
835 continue;
836 /* Search new prim1 if ifa1 is not
837 * using the current prim1
838 */
839 if (!prim1 ||
840 ifa1->ifa_mask != prim1->ifa_mask ||
841 !inet_ifa_match(ifa1->ifa_address, prim1))
842 prim1 = inet_ifa_byprefix(in_dev,
843 ifa1->ifa_address,
844 ifa1->ifa_mask);
845 if (!prim1)
846 continue;
847 if (prim1->ifa_local != prim->ifa_local)
848 continue;
849 }
850 } else {
851 if (prim->ifa_local != ifa1->ifa_local)
852 continue;
853 prim1 = ifa1;
854 if (prim != prim1)
855 same_prefsrc = 1;
856 }
857 if (ifa->ifa_local == ifa1->ifa_local)
858 ok |= LOCAL_OK;
859 if (ifa->ifa_broadcast == ifa1->ifa_broadcast)
860 ok |= BRD_OK;
861 if (brd == ifa1->ifa_broadcast)
862 ok |= BRD1_OK;
863 if (any == ifa1->ifa_broadcast)
864 ok |= BRD0_OK;
865 /* primary has network specific broadcasts */
866 if (prim1 == ifa1 && ifa1->ifa_prefixlen < 31) {
867 __be32 brd1 = ifa1->ifa_address | ~ifa1->ifa_mask;
868 __be32 any1 = ifa1->ifa_address & ifa1->ifa_mask;
869
870 if (!ipv4_is_zeronet(any1)) {
871 if (ifa->ifa_broadcast == brd1 ||
872 ifa->ifa_broadcast == any1)
873 ok |= BRD_OK;
874 if (brd == brd1 || brd == any1)
875 ok |= BRD1_OK;
876 if (any == brd1 || any == any1)
877 ok |= BRD0_OK;
878 }
879 }
880 }
881
882 if (!(ok & BRD_OK))
883 fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
884 if (subnet && ifa->ifa_prefixlen < 31) {
885 if (!(ok & BRD1_OK))
886 fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim);
887 if (!(ok & BRD0_OK))
888 fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
889 }
890 if (!(ok & LOCAL_OK)) {
891 fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
892
893 /* Check, that this local address finally disappeared. */
894 if (gone &&
895 inet_addr_type(dev_net(dev), ifa->ifa_local) != RTN_LOCAL) {
896 /* And the last, but not the least thing.
897 * We must flush stray FIB entries.
898 *
899 * First of all, we scan fib_info list searching
900 * for stray nexthop entries, then ignite fib_flush.
901 */
902 if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
903 fib_flush(dev_net(dev));
904 }
905 }
906 #undef LOCAL_OK
907 #undef BRD_OK
908 #undef BRD0_OK
909 #undef BRD1_OK
910 }
911
912 static void nl_fib_lookup(struct fib_result_nl *frn, struct fib_table *tb)
913 {
914
915 struct fib_result res;
916 struct flowi4 fl4 = {
917 .flowi4_mark = frn->fl_mark,
918 .daddr = frn->fl_addr,
919 .flowi4_tos = frn->fl_tos,
920 .flowi4_scope = frn->fl_scope,
921 };
922
923 #ifdef CONFIG_IP_MULTIPLE_TABLES
924 res.r = NULL;
925 #endif
926
927 frn->err = -ENOENT;
928 if (tb) {
929 local_bh_disable();
930
931 frn->tb_id = tb->tb_id;
932 rcu_read_lock();
933 frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
934
935 if (!frn->err) {
936 frn->prefixlen = res.prefixlen;
937 frn->nh_sel = res.nh_sel;
938 frn->type = res.type;
939 frn->scope = res.scope;
940 }
941 rcu_read_unlock();
942 local_bh_enable();
943 }
944 }
945
946 static void nl_fib_input(struct sk_buff *skb)
947 {
948 struct net *net;
949 struct fib_result_nl *frn;
950 struct nlmsghdr *nlh;
951 struct fib_table *tb;
952 u32 pid;
953
954 net = sock_net(skb->sk);
955 nlh = nlmsg_hdr(skb);
956 if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len ||
957 nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*frn)))
958 return;
959
960 skb = skb_clone(skb, GFP_KERNEL);
961 if (skb == NULL)
962 return;
963 nlh = nlmsg_hdr(skb);
964
965 frn = (struct fib_result_nl *) NLMSG_DATA(nlh);
966 tb = fib_get_table(net, frn->tb_id_in);
967
968 nl_fib_lookup(frn, tb);
969
970 pid = NETLINK_CB(skb).pid; /* pid of sending process */
971 NETLINK_CB(skb).pid = 0; /* from kernel */
972 NETLINK_CB(skb).dst_group = 0; /* unicast */
973 netlink_unicast(net->ipv4.fibnl, skb, pid, MSG_DONTWAIT);
974 }
975
976 static int __net_init nl_fib_lookup_init(struct net *net)
977 {
978 struct sock *sk;
979 struct netlink_kernel_cfg cfg = {
980 .input = nl_fib_input,
981 };
982
983 sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, THIS_MODULE, &cfg);
984 if (sk == NULL)
985 return -EAFNOSUPPORT;
986 net->ipv4.fibnl = sk;
987 return 0;
988 }
989
990 static void nl_fib_lookup_exit(struct net *net)
991 {
992 netlink_kernel_release(net->ipv4.fibnl);
993 net->ipv4.fibnl = NULL;
994 }
995
996 static void fib_disable_ip(struct net_device *dev, int force, int delay)
997 {
998 if (fib_sync_down_dev(dev, force))
999 fib_flush(dev_net(dev));
1000 rt_cache_flush(dev_net(dev), delay);
1001 arp_ifdown(dev);
1002 }
1003
1004 static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
1005 {
1006 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1007 struct net_device *dev = ifa->ifa_dev->dev;
1008 struct net *net = dev_net(dev);
1009
1010 switch (event) {
1011 case NETDEV_UP:
1012 fib_add_ifaddr(ifa);
1013 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1014 fib_sync_up(dev);
1015 #endif
1016 atomic_inc(&net->ipv4.dev_addr_genid);
1017 rt_cache_flush(dev_net(dev), -1);
1018 break;
1019 case NETDEV_DOWN:
1020 fib_del_ifaddr(ifa, NULL);
1021 atomic_inc(&net->ipv4.dev_addr_genid);
1022 if (ifa->ifa_dev->ifa_list == NULL) {
1023 /* Last address was deleted from this interface.
1024 * Disable IP.
1025 */
1026 fib_disable_ip(dev, 1, 0);
1027 } else {
1028 rt_cache_flush(dev_net(dev), -1);
1029 }
1030 break;
1031 }
1032 return NOTIFY_DONE;
1033 }
1034
1035 static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1036 {
1037 struct net_device *dev = ptr;
1038 struct in_device *in_dev = __in_dev_get_rtnl(dev);
1039 struct net *net = dev_net(dev);
1040
1041 if (event == NETDEV_UNREGISTER) {
1042 fib_disable_ip(dev, 2, -1);
1043 return NOTIFY_DONE;
1044 }
1045
1046 if (!in_dev)
1047 return NOTIFY_DONE;
1048
1049 switch (event) {
1050 case NETDEV_UP:
1051 for_ifa(in_dev) {
1052 fib_add_ifaddr(ifa);
1053 } endfor_ifa(in_dev);
1054 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1055 fib_sync_up(dev);
1056 #endif
1057 atomic_inc(&net->ipv4.dev_addr_genid);
1058 rt_cache_flush(dev_net(dev), -1);
1059 break;
1060 case NETDEV_DOWN:
1061 fib_disable_ip(dev, 0, 0);
1062 break;
1063 case NETDEV_CHANGEMTU:
1064 case NETDEV_CHANGE:
1065 rt_cache_flush(dev_net(dev), 0);
1066 break;
1067 case NETDEV_UNREGISTER_BATCH:
1068 /* The batch unregister is only called on the first
1069 * device in the list of devices being unregistered.
1070 * Therefore we should not pass dev_net(dev) in here.
1071 */
1072 rt_cache_flush_batch(NULL);
1073 break;
1074 }
1075 return NOTIFY_DONE;
1076 }
1077
1078 static struct notifier_block fib_inetaddr_notifier = {
1079 .notifier_call = fib_inetaddr_event,
1080 };
1081
1082 static struct notifier_block fib_netdev_notifier = {
1083 .notifier_call = fib_netdev_event,
1084 };
1085
1086 static int __net_init ip_fib_net_init(struct net *net)
1087 {
1088 int err;
1089 size_t size = sizeof(struct hlist_head) * FIB_TABLE_HASHSZ;
1090
1091 /* Avoid false sharing : Use at least a full cache line */
1092 size = max_t(size_t, size, L1_CACHE_BYTES);
1093
1094 net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
1095 if (net->ipv4.fib_table_hash == NULL)
1096 return -ENOMEM;
1097
1098 err = fib4_rules_init(net);
1099 if (err < 0)
1100 goto fail;
1101 return 0;
1102
1103 fail:
1104 kfree(net->ipv4.fib_table_hash);
1105 return err;
1106 }
1107
1108 static void ip_fib_net_exit(struct net *net)
1109 {
1110 unsigned int i;
1111
1112 #ifdef CONFIG_IP_MULTIPLE_TABLES
1113 fib4_rules_exit(net);
1114 #endif
1115
1116 rtnl_lock();
1117 for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
1118 struct fib_table *tb;
1119 struct hlist_head *head;
1120 struct hlist_node *node, *tmp;
1121
1122 head = &net->ipv4.fib_table_hash[i];
1123 hlist_for_each_entry_safe(tb, node, tmp, head, tb_hlist) {
1124 hlist_del(node);
1125 fib_table_flush(tb);
1126 fib_free_table(tb);
1127 }
1128 }
1129 rtnl_unlock();
1130 kfree(net->ipv4.fib_table_hash);
1131 }
1132
1133 static int __net_init fib_net_init(struct net *net)
1134 {
1135 int error;
1136
1137 error = ip_fib_net_init(net);
1138 if (error < 0)
1139 goto out;
1140 error = nl_fib_lookup_init(net);
1141 if (error < 0)
1142 goto out_nlfl;
1143 error = fib_proc_init(net);
1144 if (error < 0)
1145 goto out_proc;
1146 out:
1147 return error;
1148
1149 out_proc:
1150 nl_fib_lookup_exit(net);
1151 out_nlfl:
1152 ip_fib_net_exit(net);
1153 goto out;
1154 }
1155
1156 static void __net_exit fib_net_exit(struct net *net)
1157 {
1158 fib_proc_exit(net);
1159 nl_fib_lookup_exit(net);
1160 ip_fib_net_exit(net);
1161 }
1162
1163 static struct pernet_operations fib_net_ops = {
1164 .init = fib_net_init,
1165 .exit = fib_net_exit,
1166 };
1167
1168 void __init ip_fib_init(void)
1169 {
1170 rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
1171 rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
1172 rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
1173
1174 register_pernet_subsys(&fib_net_ops);
1175 register_netdevice_notifier(&fib_netdev_notifier);
1176 register_inetaddr_notifier(&fib_inetaddr_notifier);
1177
1178 fib_trie_init();
1179 }