]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - net/ipv4/fib_rules.c
Remove obsolete #include <linux/config.h>
[mirror_ubuntu-jammy-kernel.git] / net / ipv4 / fib_rules.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IPv4 Forwarding Information Base: policy rules.
7 *
8 * Version: $Id: fib_rules.c,v 1.17 2001/10/31 21:55:54 davem Exp $
9 *
10 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * Fixes:
18 * Rani Assaf : local_rule cannot be deleted
19 * Marc Boucher : routing by fwmark
20 */
21
1da177e4
LT
22#include <asm/uaccess.h>
23#include <asm/system.h>
24#include <linux/bitops.h>
25#include <linux/types.h>
26#include <linux/kernel.h>
27#include <linux/sched.h>
28#include <linux/mm.h>
29#include <linux/string.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/errno.h>
33#include <linux/in.h>
34#include <linux/inet.h>
14c85021 35#include <linux/inetdevice.h>
1da177e4
LT
36#include <linux/netdevice.h>
37#include <linux/if_arp.h>
38#include <linux/proc_fs.h>
39#include <linux/skbuff.h>
40#include <linux/netlink.h>
41#include <linux/init.h>
7b204afd
RO
42#include <linux/list.h>
43#include <linux/rcupdate.h>
1da177e4
LT
44
45#include <net/ip.h>
46#include <net/protocol.h>
47#include <net/route.h>
48#include <net/tcp.h>
49#include <net/sock.h>
50#include <net/ip_fib.h>
51
52#define FRprintk(a...)
53
54struct fib_rule
55{
7b204afd 56 struct hlist_node hlist;
1da177e4
LT
57 atomic_t r_clntref;
58 u32 r_preference;
59 unsigned char r_table;
60 unsigned char r_action;
61 unsigned char r_dst_len;
62 unsigned char r_src_len;
63 u32 r_src;
64 u32 r_srcmask;
65 u32 r_dst;
66 u32 r_dstmask;
67 u32 r_srcmap;
68 u8 r_flags;
69 u8 r_tos;
70#ifdef CONFIG_IP_ROUTE_FWMARK
71 u32 r_fwmark;
72#endif
73 int r_ifindex;
74#ifdef CONFIG_NET_CLS_ROUTE
75 __u32 r_tclassid;
76#endif
77 char r_ifname[IFNAMSIZ];
78 int r_dead;
7b204afd 79 struct rcu_head rcu;
1da177e4
LT
80};
81
82static struct fib_rule default_rule = {
83 .r_clntref = ATOMIC_INIT(2),
84 .r_preference = 0x7FFF,
85 .r_table = RT_TABLE_DEFAULT,
86 .r_action = RTN_UNICAST,
87};
88
89static struct fib_rule main_rule = {
1da177e4
LT
90 .r_clntref = ATOMIC_INIT(2),
91 .r_preference = 0x7FFE,
92 .r_table = RT_TABLE_MAIN,
93 .r_action = RTN_UNICAST,
94};
95
96static struct fib_rule local_rule = {
1da177e4
LT
97 .r_clntref = ATOMIC_INIT(2),
98 .r_table = RT_TABLE_LOCAL,
99 .r_action = RTN_UNICAST,
100};
101
d15150f7 102static struct hlist_head fib_rules;
7b204afd
RO
103
104/* writer func called from netlink -- rtnl_sem hold*/
1da177e4 105
a5cdc030
PM
106static void rtmsg_rule(int, struct fib_rule *);
107
1da177e4
LT
108int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
109{
110 struct rtattr **rta = arg;
111 struct rtmsg *rtm = NLMSG_DATA(nlh);
7b204afd
RO
112 struct fib_rule *r;
113 struct hlist_node *node;
1da177e4
LT
114 int err = -ESRCH;
115
7b204afd 116 hlist_for_each_entry(r, node, &fib_rules, hlist) {
1da177e4
LT
117 if ((!rta[RTA_SRC-1] || memcmp(RTA_DATA(rta[RTA_SRC-1]), &r->r_src, 4) == 0) &&
118 rtm->rtm_src_len == r->r_src_len &&
119 rtm->rtm_dst_len == r->r_dst_len &&
120 (!rta[RTA_DST-1] || memcmp(RTA_DATA(rta[RTA_DST-1]), &r->r_dst, 4) == 0) &&
121 rtm->rtm_tos == r->r_tos &&
122#ifdef CONFIG_IP_ROUTE_FWMARK
123 (!rta[RTA_PROTOINFO-1] || memcmp(RTA_DATA(rta[RTA_PROTOINFO-1]), &r->r_fwmark, 4) == 0) &&
124#endif
125 (!rtm->rtm_type || rtm->rtm_type == r->r_action) &&
126 (!rta[RTA_PRIORITY-1] || memcmp(RTA_DATA(rta[RTA_PRIORITY-1]), &r->r_preference, 4) == 0) &&
127 (!rta[RTA_IIF-1] || rtattr_strcmp(rta[RTA_IIF-1], r->r_ifname) == 0) &&
128 (!rtm->rtm_table || (r && rtm->rtm_table == r->r_table))) {
129 err = -EPERM;
130 if (r == &local_rule)
131 break;
132
7b204afd 133 hlist_del_rcu(&r->hlist);
1da177e4 134 r->r_dead = 1;
a5cdc030 135 rtmsg_rule(RTM_DELRULE, r);
1da177e4
LT
136 fib_rule_put(r);
137 err = 0;
138 break;
139 }
140 }
141 return err;
142}
143
144/* Allocate new unique table id */
145
146static struct fib_table *fib_empty_table(void)
147{
148 int id;
149
150 for (id = 1; id <= RT_TABLE_MAX; id++)
151 if (fib_tables[id] == NULL)
152 return __fib_new_table(id);
153 return NULL;
154}
155
7b204afd
RO
156static inline void fib_rule_put_rcu(struct rcu_head *head)
157{
158 struct fib_rule *r = container_of(head, struct fib_rule, rcu);
159 kfree(r);
160}
161
1da177e4
LT
162void fib_rule_put(struct fib_rule *r)
163{
164 if (atomic_dec_and_test(&r->r_clntref)) {
165 if (r->r_dead)
7b204afd 166 call_rcu(&r->rcu, fib_rule_put_rcu);
1da177e4
LT
167 else
168 printk("Freeing alive rule %p\n", r);
169 }
170}
171
7b204afd
RO
172/* writer func called from netlink -- rtnl_sem hold*/
173
1da177e4
LT
174int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
175{
176 struct rtattr **rta = arg;
177 struct rtmsg *rtm = NLMSG_DATA(nlh);
7b204afd
RO
178 struct fib_rule *r, *new_r, *last = NULL;
179 struct hlist_node *node = NULL;
1da177e4
LT
180 unsigned char table_id;
181
182 if (rtm->rtm_src_len > 32 || rtm->rtm_dst_len > 32 ||
183 (rtm->rtm_tos & ~IPTOS_TOS_MASK))
184 return -EINVAL;
185
186 if (rta[RTA_IIF-1] && RTA_PAYLOAD(rta[RTA_IIF-1]) > IFNAMSIZ)
187 return -EINVAL;
188
189 table_id = rtm->rtm_table;
190 if (table_id == RT_TABLE_UNSPEC) {
191 struct fib_table *table;
192 if (rtm->rtm_type == RTN_UNICAST) {
193 if ((table = fib_empty_table()) == NULL)
194 return -ENOBUFS;
195 table_id = table->tb_id;
196 }
197 }
198
199 new_r = kmalloc(sizeof(*new_r), GFP_KERNEL);
200 if (!new_r)
201 return -ENOMEM;
202 memset(new_r, 0, sizeof(*new_r));
7b204afd 203
1da177e4
LT
204 if (rta[RTA_SRC-1])
205 memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4);
206 if (rta[RTA_DST-1])
207 memcpy(&new_r->r_dst, RTA_DATA(rta[RTA_DST-1]), 4);
208 if (rta[RTA_GATEWAY-1])
209 memcpy(&new_r->r_srcmap, RTA_DATA(rta[RTA_GATEWAY-1]), 4);
210 new_r->r_src_len = rtm->rtm_src_len;
211 new_r->r_dst_len = rtm->rtm_dst_len;
212 new_r->r_srcmask = inet_make_mask(rtm->rtm_src_len);
213 new_r->r_dstmask = inet_make_mask(rtm->rtm_dst_len);
214 new_r->r_tos = rtm->rtm_tos;
215#ifdef CONFIG_IP_ROUTE_FWMARK
216 if (rta[RTA_PROTOINFO-1])
217 memcpy(&new_r->r_fwmark, RTA_DATA(rta[RTA_PROTOINFO-1]), 4);
218#endif
219 new_r->r_action = rtm->rtm_type;
220 new_r->r_flags = rtm->rtm_flags;
221 if (rta[RTA_PRIORITY-1])
222 memcpy(&new_r->r_preference, RTA_DATA(rta[RTA_PRIORITY-1]), 4);
223 new_r->r_table = table_id;
224 if (rta[RTA_IIF-1]) {
225 struct net_device *dev;
226 rtattr_strlcpy(new_r->r_ifname, rta[RTA_IIF-1], IFNAMSIZ);
227 new_r->r_ifindex = -1;
228 dev = __dev_get_by_name(new_r->r_ifname);
229 if (dev)
230 new_r->r_ifindex = dev->ifindex;
231 }
232#ifdef CONFIG_NET_CLS_ROUTE
233 if (rta[RTA_FLOW-1])
234 memcpy(&new_r->r_tclassid, RTA_DATA(rta[RTA_FLOW-1]), 4);
235#endif
7b204afd 236 r = container_of(fib_rules.first, struct fib_rule, hlist);
1da177e4 237
1da177e4 238 if (!new_r->r_preference) {
7b204afd
RO
239 if (r && r->hlist.next != NULL) {
240 r = container_of(r->hlist.next, struct fib_rule, hlist);
1da177e4
LT
241 if (r->r_preference)
242 new_r->r_preference = r->r_preference - 1;
243 }
244 }
245
7b204afd 246 hlist_for_each_entry(r, node, &fib_rules, hlist) {
1da177e4
LT
247 if (r->r_preference > new_r->r_preference)
248 break;
7b204afd 249 last = r;
1da177e4 250 }
1da177e4 251 atomic_inc(&new_r->r_clntref);
7b204afd
RO
252
253 if (last)
254 hlist_add_after_rcu(&last->hlist, &new_r->hlist);
255 else
256 hlist_add_before_rcu(&new_r->hlist, &r->hlist);
257
a5cdc030 258 rtmsg_rule(RTM_NEWRULE, new_r);
1da177e4
LT
259 return 0;
260}
261
262#ifdef CONFIG_NET_CLS_ROUTE
263u32 fib_rules_tclass(struct fib_result *res)
264{
265 if (res->r)
266 return res->r->r_tclassid;
267 return 0;
268}
269#endif
270
7b204afd 271/* callers should hold rtnl semaphore */
1da177e4
LT
272
273static void fib_rules_detach(struct net_device *dev)
274{
7b204afd 275 struct hlist_node *node;
1da177e4
LT
276 struct fib_rule *r;
277
7b204afd
RO
278 hlist_for_each_entry(r, node, &fib_rules, hlist) {
279 if (r->r_ifindex == dev->ifindex)
1da177e4 280 r->r_ifindex = -1;
7b204afd 281
1da177e4
LT
282 }
283}
284
7b204afd
RO
285/* callers should hold rtnl semaphore */
286
1da177e4
LT
287static void fib_rules_attach(struct net_device *dev)
288{
7b204afd 289 struct hlist_node *node;
1da177e4
LT
290 struct fib_rule *r;
291
7b204afd
RO
292 hlist_for_each_entry(r, node, &fib_rules, hlist) {
293 if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0)
1da177e4 294 r->r_ifindex = dev->ifindex;
1da177e4
LT
295 }
296}
297
298int fib_lookup(const struct flowi *flp, struct fib_result *res)
299{
300 int err;
301 struct fib_rule *r, *policy;
302 struct fib_table *tb;
7b204afd 303 struct hlist_node *node;
1da177e4
LT
304
305 u32 daddr = flp->fl4_dst;
306 u32 saddr = flp->fl4_src;
307
308FRprintk("Lookup: %u.%u.%u.%u <- %u.%u.%u.%u ",
309 NIPQUAD(flp->fl4_dst), NIPQUAD(flp->fl4_src));
7b204afd
RO
310
311 rcu_read_lock();
312
313 hlist_for_each_entry_rcu(r, node, &fib_rules, hlist) {
1da177e4
LT
314 if (((saddr^r->r_src) & r->r_srcmask) ||
315 ((daddr^r->r_dst) & r->r_dstmask) ||
316 (r->r_tos && r->r_tos != flp->fl4_tos) ||
317#ifdef CONFIG_IP_ROUTE_FWMARK
318 (r->r_fwmark && r->r_fwmark != flp->fl4_fwmark) ||
319#endif
320 (r->r_ifindex && r->r_ifindex != flp->iif))
321 continue;
322
323FRprintk("tb %d r %d ", r->r_table, r->r_action);
324 switch (r->r_action) {
325 case RTN_UNICAST:
326 policy = r;
327 break;
328 case RTN_UNREACHABLE:
7b204afd 329 rcu_read_unlock();
1da177e4
LT
330 return -ENETUNREACH;
331 default:
332 case RTN_BLACKHOLE:
7b204afd 333 rcu_read_unlock();
1da177e4
LT
334 return -EINVAL;
335 case RTN_PROHIBIT:
7b204afd 336 rcu_read_unlock();
1da177e4
LT
337 return -EACCES;
338 }
339
340 if ((tb = fib_get_table(r->r_table)) == NULL)
341 continue;
342 err = tb->tb_lookup(tb, flp, res);
343 if (err == 0) {
344 res->r = policy;
345 if (policy)
346 atomic_inc(&policy->r_clntref);
7b204afd 347 rcu_read_unlock();
1da177e4
LT
348 return 0;
349 }
350 if (err < 0 && err != -EAGAIN) {
7b204afd 351 rcu_read_unlock();
1da177e4
LT
352 return err;
353 }
354 }
355FRprintk("FAILURE\n");
7b204afd 356 rcu_read_unlock();
1da177e4
LT
357 return -ENETUNREACH;
358}
359
360void fib_select_default(const struct flowi *flp, struct fib_result *res)
361{
362 if (res->r && res->r->r_action == RTN_UNICAST &&
363 FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) {
364 struct fib_table *tb;
365 if ((tb = fib_get_table(res->r->r_table)) != NULL)
366 tb->tb_select_default(tb, flp, res);
367 }
368}
369
370static int fib_rules_event(struct notifier_block *this, unsigned long event, void *ptr)
371{
372 struct net_device *dev = ptr;
373
374 if (event == NETDEV_UNREGISTER)
375 fib_rules_detach(dev);
376 else if (event == NETDEV_REGISTER)
377 fib_rules_attach(dev);
378 return NOTIFY_DONE;
379}
380
381
382static struct notifier_block fib_rules_notifier = {
383 .notifier_call =fib_rules_event,
384};
385
386static __inline__ int inet_fill_rule(struct sk_buff *skb,
387 struct fib_rule *r,
a5cdc030 388 u32 pid, u32 seq, int event,
b6544c0b 389 unsigned int flags)
1da177e4
LT
390{
391 struct rtmsg *rtm;
392 struct nlmsghdr *nlh;
393 unsigned char *b = skb->tail;
394
a5cdc030 395 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
1da177e4
LT
396 rtm = NLMSG_DATA(nlh);
397 rtm->rtm_family = AF_INET;
398 rtm->rtm_dst_len = r->r_dst_len;
399 rtm->rtm_src_len = r->r_src_len;
400 rtm->rtm_tos = r->r_tos;
401#ifdef CONFIG_IP_ROUTE_FWMARK
402 if (r->r_fwmark)
403 RTA_PUT(skb, RTA_PROTOINFO, 4, &r->r_fwmark);
404#endif
405 rtm->rtm_table = r->r_table;
406 rtm->rtm_protocol = 0;
407 rtm->rtm_scope = 0;
408 rtm->rtm_type = r->r_action;
409 rtm->rtm_flags = r->r_flags;
410
411 if (r->r_dst_len)
412 RTA_PUT(skb, RTA_DST, 4, &r->r_dst);
413 if (r->r_src_len)
414 RTA_PUT(skb, RTA_SRC, 4, &r->r_src);
415 if (r->r_ifname[0])
416 RTA_PUT(skb, RTA_IIF, IFNAMSIZ, &r->r_ifname);
417 if (r->r_preference)
418 RTA_PUT(skb, RTA_PRIORITY, 4, &r->r_preference);
419 if (r->r_srcmap)
420 RTA_PUT(skb, RTA_GATEWAY, 4, &r->r_srcmap);
421#ifdef CONFIG_NET_CLS_ROUTE
422 if (r->r_tclassid)
423 RTA_PUT(skb, RTA_FLOW, 4, &r->r_tclassid);
424#endif
425 nlh->nlmsg_len = skb->tail - b;
426 return skb->len;
427
428nlmsg_failure:
429rtattr_failure:
430 skb_trim(skb, b - skb->data);
431 return -1;
432}
433
7b204afd
RO
434/* callers should hold rtnl semaphore */
435
a5cdc030
PM
436static void rtmsg_rule(int event, struct fib_rule *r)
437{
438 int size = NLMSG_SPACE(sizeof(struct rtmsg) + 128);
439 struct sk_buff *skb = alloc_skb(size, GFP_KERNEL);
440
441 if (!skb)
442 netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, ENOBUFS);
443 else if (inet_fill_rule(skb, r, 0, 0, event, 0) < 0) {
444 kfree_skb(skb);
445 netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, EINVAL);
446 } else {
447 netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV4_RULE, GFP_KERNEL);
448 }
449}
450
1da177e4
LT
451int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
452{
7b204afd 453 int idx = 0;
1da177e4
LT
454 int s_idx = cb->args[0];
455 struct fib_rule *r;
7b204afd
RO
456 struct hlist_node *node;
457
458 rcu_read_lock();
459 hlist_for_each_entry(r, node, &fib_rules, hlist) {
1da177e4 460
1da177e4
LT
461 if (idx < s_idx)
462 continue;
a5cdc030
PM
463 if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid,
464 cb->nlh->nlmsg_seq,
465 RTM_NEWRULE, NLM_F_MULTI) < 0)
1da177e4 466 break;
7b204afd 467 idx++;
1da177e4 468 }
7b204afd 469 rcu_read_unlock();
1da177e4
LT
470 cb->args[0] = idx;
471
472 return skb->len;
473}
474
475void __init fib_rules_init(void)
476{
7b204afd
RO
477 INIT_HLIST_HEAD(&fib_rules);
478 hlist_add_head(&local_rule.hlist, &fib_rules);
479 hlist_add_after(&local_rule.hlist, &main_rule.hlist);
480 hlist_add_after(&main_rule.hlist, &default_rule.hlist);
1da177e4
LT
481 register_netdevice_notifier(&fib_rules_notifier);
482}