]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - net/core/fib_rules.c
HID: pidff: effect can't be NULL
[mirror_ubuntu-artful-kernel.git] / net / core / fib_rules.c
1 /*
2 * net/core/fib_rules.c Generic Routing Rules
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation, version 2.
7 *
8 * Authors: Thomas Graf <tgraf@suug.ch>
9 */
10
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/list.h>
15 #include <linux/module.h>
16 #include <net/net_namespace.h>
17 #include <net/sock.h>
18 #include <net/fib_rules.h>
19
20 int fib_default_rule_add(struct fib_rules_ops *ops,
21 u32 pref, u32 table, u32 flags)
22 {
23 struct fib_rule *r;
24
25 r = kzalloc(ops->rule_size, GFP_KERNEL);
26 if (r == NULL)
27 return -ENOMEM;
28
29 atomic_set(&r->refcnt, 1);
30 r->action = FR_ACT_TO_TBL;
31 r->pref = pref;
32 r->table = table;
33 r->flags = flags;
34 r->fr_net = hold_net(ops->fro_net);
35
36 r->suppress_prefixlen = -1;
37 r->suppress_ifgroup = -1;
38
39 /* The lock is not required here, the list in unreacheable
40 * at the moment this function is called */
41 list_add_tail(&r->list, &ops->rules_list);
42 return 0;
43 }
44 EXPORT_SYMBOL(fib_default_rule_add);
45
46 u32 fib_default_rule_pref(struct fib_rules_ops *ops)
47 {
48 struct list_head *pos;
49 struct fib_rule *rule;
50
51 if (!list_empty(&ops->rules_list)) {
52 pos = ops->rules_list.next;
53 if (pos->next != &ops->rules_list) {
54 rule = list_entry(pos->next, struct fib_rule, list);
55 if (rule->pref)
56 return rule->pref - 1;
57 }
58 }
59
60 return 0;
61 }
62 EXPORT_SYMBOL(fib_default_rule_pref);
63
64 static void notify_rule_change(int event, struct fib_rule *rule,
65 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
66 u32 pid);
67
68 static struct fib_rules_ops *lookup_rules_ops(struct net *net, int family)
69 {
70 struct fib_rules_ops *ops;
71
72 rcu_read_lock();
73 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
74 if (ops->family == family) {
75 if (!try_module_get(ops->owner))
76 ops = NULL;
77 rcu_read_unlock();
78 return ops;
79 }
80 }
81 rcu_read_unlock();
82
83 return NULL;
84 }
85
86 static void rules_ops_put(struct fib_rules_ops *ops)
87 {
88 if (ops)
89 module_put(ops->owner);
90 }
91
92 static void flush_route_cache(struct fib_rules_ops *ops)
93 {
94 if (ops->flush_cache)
95 ops->flush_cache(ops);
96 }
97
98 static int __fib_rules_register(struct fib_rules_ops *ops)
99 {
100 int err = -EEXIST;
101 struct fib_rules_ops *o;
102 struct net *net;
103
104 net = ops->fro_net;
105
106 if (ops->rule_size < sizeof(struct fib_rule))
107 return -EINVAL;
108
109 if (ops->match == NULL || ops->configure == NULL ||
110 ops->compare == NULL || ops->fill == NULL ||
111 ops->action == NULL)
112 return -EINVAL;
113
114 spin_lock(&net->rules_mod_lock);
115 list_for_each_entry(o, &net->rules_ops, list)
116 if (ops->family == o->family)
117 goto errout;
118
119 hold_net(net);
120 list_add_tail_rcu(&ops->list, &net->rules_ops);
121 err = 0;
122 errout:
123 spin_unlock(&net->rules_mod_lock);
124
125 return err;
126 }
127
128 struct fib_rules_ops *
129 fib_rules_register(const struct fib_rules_ops *tmpl, struct net *net)
130 {
131 struct fib_rules_ops *ops;
132 int err;
133
134 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
135 if (ops == NULL)
136 return ERR_PTR(-ENOMEM);
137
138 INIT_LIST_HEAD(&ops->rules_list);
139 ops->fro_net = net;
140
141 err = __fib_rules_register(ops);
142 if (err) {
143 kfree(ops);
144 ops = ERR_PTR(err);
145 }
146
147 return ops;
148 }
149 EXPORT_SYMBOL_GPL(fib_rules_register);
150
151 static void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
152 {
153 struct fib_rule *rule, *tmp;
154
155 list_for_each_entry_safe(rule, tmp, &ops->rules_list, list) {
156 list_del_rcu(&rule->list);
157 if (ops->delete)
158 ops->delete(rule);
159 fib_rule_put(rule);
160 }
161 }
162
163 static void fib_rules_put_rcu(struct rcu_head *head)
164 {
165 struct fib_rules_ops *ops = container_of(head, struct fib_rules_ops, rcu);
166 struct net *net = ops->fro_net;
167
168 release_net(net);
169 kfree(ops);
170 }
171
172 void fib_rules_unregister(struct fib_rules_ops *ops)
173 {
174 struct net *net = ops->fro_net;
175
176 spin_lock(&net->rules_mod_lock);
177 list_del_rcu(&ops->list);
178 fib_rules_cleanup_ops(ops);
179 spin_unlock(&net->rules_mod_lock);
180
181 call_rcu(&ops->rcu, fib_rules_put_rcu);
182 }
183 EXPORT_SYMBOL_GPL(fib_rules_unregister);
184
185 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
186 struct flowi *fl, int flags)
187 {
188 int ret = 0;
189
190 if (rule->iifindex && (rule->iifindex != fl->flowi_iif))
191 goto out;
192
193 if (rule->oifindex && (rule->oifindex != fl->flowi_oif))
194 goto out;
195
196 if ((rule->mark ^ fl->flowi_mark) & rule->mark_mask)
197 goto out;
198
199 ret = ops->match(rule, fl, flags);
200 out:
201 return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
202 }
203
204 int fib_rules_lookup(struct fib_rules_ops *ops, struct flowi *fl,
205 int flags, struct fib_lookup_arg *arg)
206 {
207 struct fib_rule *rule;
208 int err;
209
210 rcu_read_lock();
211
212 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
213 jumped:
214 if (!fib_rule_match(rule, ops, fl, flags))
215 continue;
216
217 if (rule->action == FR_ACT_GOTO) {
218 struct fib_rule *target;
219
220 target = rcu_dereference(rule->ctarget);
221 if (target == NULL) {
222 continue;
223 } else {
224 rule = target;
225 goto jumped;
226 }
227 } else if (rule->action == FR_ACT_NOP)
228 continue;
229 else
230 err = ops->action(rule, fl, flags, arg);
231
232 if (!err && ops->suppress && ops->suppress(rule, arg))
233 continue;
234
235 if (err != -EAGAIN) {
236 if ((arg->flags & FIB_LOOKUP_NOREF) ||
237 likely(atomic_inc_not_zero(&rule->refcnt))) {
238 arg->rule = rule;
239 goto out;
240 }
241 break;
242 }
243 }
244
245 err = -ESRCH;
246 out:
247 rcu_read_unlock();
248
249 return err;
250 }
251 EXPORT_SYMBOL_GPL(fib_rules_lookup);
252
253 static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
254 struct fib_rules_ops *ops)
255 {
256 int err = -EINVAL;
257
258 if (frh->src_len)
259 if (tb[FRA_SRC] == NULL ||
260 frh->src_len > (ops->addr_size * 8) ||
261 nla_len(tb[FRA_SRC]) != ops->addr_size)
262 goto errout;
263
264 if (frh->dst_len)
265 if (tb[FRA_DST] == NULL ||
266 frh->dst_len > (ops->addr_size * 8) ||
267 nla_len(tb[FRA_DST]) != ops->addr_size)
268 goto errout;
269
270 err = 0;
271 errout:
272 return err;
273 }
274
275 static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
276 {
277 struct net *net = sock_net(skb->sk);
278 struct fib_rule_hdr *frh = nlmsg_data(nlh);
279 struct fib_rules_ops *ops = NULL;
280 struct fib_rule *rule, *r, *last = NULL;
281 struct nlattr *tb[FRA_MAX+1];
282 int err = -EINVAL, unresolved = 0;
283
284 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
285 goto errout;
286
287 ops = lookup_rules_ops(net, frh->family);
288 if (ops == NULL) {
289 err = -EAFNOSUPPORT;
290 goto errout;
291 }
292
293 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
294 if (err < 0)
295 goto errout;
296
297 err = validate_rulemsg(frh, tb, ops);
298 if (err < 0)
299 goto errout;
300
301 rule = kzalloc(ops->rule_size, GFP_KERNEL);
302 if (rule == NULL) {
303 err = -ENOMEM;
304 goto errout;
305 }
306 rule->fr_net = hold_net(net);
307
308 if (tb[FRA_PRIORITY])
309 rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
310
311 if (tb[FRA_IIFNAME]) {
312 struct net_device *dev;
313
314 rule->iifindex = -1;
315 nla_strlcpy(rule->iifname, tb[FRA_IIFNAME], IFNAMSIZ);
316 dev = __dev_get_by_name(net, rule->iifname);
317 if (dev)
318 rule->iifindex = dev->ifindex;
319 }
320
321 if (tb[FRA_OIFNAME]) {
322 struct net_device *dev;
323
324 rule->oifindex = -1;
325 nla_strlcpy(rule->oifname, tb[FRA_OIFNAME], IFNAMSIZ);
326 dev = __dev_get_by_name(net, rule->oifname);
327 if (dev)
328 rule->oifindex = dev->ifindex;
329 }
330
331 if (tb[FRA_FWMARK]) {
332 rule->mark = nla_get_u32(tb[FRA_FWMARK]);
333 if (rule->mark)
334 /* compatibility: if the mark value is non-zero all bits
335 * are compared unless a mask is explicitly specified.
336 */
337 rule->mark_mask = 0xFFFFFFFF;
338 }
339
340 if (tb[FRA_FWMASK])
341 rule->mark_mask = nla_get_u32(tb[FRA_FWMASK]);
342
343 rule->action = frh->action;
344 rule->flags = frh->flags;
345 rule->table = frh_get_table(frh, tb);
346 if (tb[FRA_SUPPRESS_PREFIXLEN])
347 rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
348 else
349 rule->suppress_prefixlen = -1;
350
351 if (tb[FRA_SUPPRESS_IFGROUP])
352 rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
353 else
354 rule->suppress_ifgroup = -1;
355
356 if (!tb[FRA_PRIORITY] && ops->default_pref)
357 rule->pref = ops->default_pref(ops);
358
359 err = -EINVAL;
360 if (tb[FRA_GOTO]) {
361 if (rule->action != FR_ACT_GOTO)
362 goto errout_free;
363
364 rule->target = nla_get_u32(tb[FRA_GOTO]);
365 /* Backward jumps are prohibited to avoid endless loops */
366 if (rule->target <= rule->pref)
367 goto errout_free;
368
369 list_for_each_entry(r, &ops->rules_list, list) {
370 if (r->pref == rule->target) {
371 RCU_INIT_POINTER(rule->ctarget, r);
372 break;
373 }
374 }
375
376 if (rcu_dereference_protected(rule->ctarget, 1) == NULL)
377 unresolved = 1;
378 } else if (rule->action == FR_ACT_GOTO)
379 goto errout_free;
380
381 err = ops->configure(rule, skb, frh, tb);
382 if (err < 0)
383 goto errout_free;
384
385 list_for_each_entry(r, &ops->rules_list, list) {
386 if (r->pref > rule->pref)
387 break;
388 last = r;
389 }
390
391 fib_rule_get(rule);
392
393 if (last)
394 list_add_rcu(&rule->list, &last->list);
395 else
396 list_add_rcu(&rule->list, &ops->rules_list);
397
398 if (ops->unresolved_rules) {
399 /*
400 * There are unresolved goto rules in the list, check if
401 * any of them are pointing to this new rule.
402 */
403 list_for_each_entry(r, &ops->rules_list, list) {
404 if (r->action == FR_ACT_GOTO &&
405 r->target == rule->pref &&
406 rtnl_dereference(r->ctarget) == NULL) {
407 rcu_assign_pointer(r->ctarget, rule);
408 if (--ops->unresolved_rules == 0)
409 break;
410 }
411 }
412 }
413
414 if (rule->action == FR_ACT_GOTO)
415 ops->nr_goto_rules++;
416
417 if (unresolved)
418 ops->unresolved_rules++;
419
420 notify_rule_change(RTM_NEWRULE, rule, ops, nlh, NETLINK_CB(skb).portid);
421 flush_route_cache(ops);
422 rules_ops_put(ops);
423 return 0;
424
425 errout_free:
426 release_net(rule->fr_net);
427 kfree(rule);
428 errout:
429 rules_ops_put(ops);
430 return err;
431 }
432
433 static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
434 {
435 struct net *net = sock_net(skb->sk);
436 struct fib_rule_hdr *frh = nlmsg_data(nlh);
437 struct fib_rules_ops *ops = NULL;
438 struct fib_rule *rule, *tmp;
439 struct nlattr *tb[FRA_MAX+1];
440 int err = -EINVAL;
441
442 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*frh)))
443 goto errout;
444
445 ops = lookup_rules_ops(net, frh->family);
446 if (ops == NULL) {
447 err = -EAFNOSUPPORT;
448 goto errout;
449 }
450
451 err = nlmsg_parse(nlh, sizeof(*frh), tb, FRA_MAX, ops->policy);
452 if (err < 0)
453 goto errout;
454
455 err = validate_rulemsg(frh, tb, ops);
456 if (err < 0)
457 goto errout;
458
459 list_for_each_entry(rule, &ops->rules_list, list) {
460 if (frh->action && (frh->action != rule->action))
461 continue;
462
463 if (frh_get_table(frh, tb) &&
464 (frh_get_table(frh, tb) != rule->table))
465 continue;
466
467 if (tb[FRA_PRIORITY] &&
468 (rule->pref != nla_get_u32(tb[FRA_PRIORITY])))
469 continue;
470
471 if (tb[FRA_IIFNAME] &&
472 nla_strcmp(tb[FRA_IIFNAME], rule->iifname))
473 continue;
474
475 if (tb[FRA_OIFNAME] &&
476 nla_strcmp(tb[FRA_OIFNAME], rule->oifname))
477 continue;
478
479 if (tb[FRA_FWMARK] &&
480 (rule->mark != nla_get_u32(tb[FRA_FWMARK])))
481 continue;
482
483 if (tb[FRA_FWMASK] &&
484 (rule->mark_mask != nla_get_u32(tb[FRA_FWMASK])))
485 continue;
486
487 if (!ops->compare(rule, frh, tb))
488 continue;
489
490 if (rule->flags & FIB_RULE_PERMANENT) {
491 err = -EPERM;
492 goto errout;
493 }
494
495 list_del_rcu(&rule->list);
496
497 if (rule->action == FR_ACT_GOTO) {
498 ops->nr_goto_rules--;
499 if (rtnl_dereference(rule->ctarget) == NULL)
500 ops->unresolved_rules--;
501 }
502
503 /*
504 * Check if this rule is a target to any of them. If so,
505 * disable them. As this operation is eventually very
506 * expensive, it is only performed if goto rules have
507 * actually been added.
508 */
509 if (ops->nr_goto_rules > 0) {
510 list_for_each_entry(tmp, &ops->rules_list, list) {
511 if (rtnl_dereference(tmp->ctarget) == rule) {
512 RCU_INIT_POINTER(tmp->ctarget, NULL);
513 ops->unresolved_rules++;
514 }
515 }
516 }
517
518 notify_rule_change(RTM_DELRULE, rule, ops, nlh,
519 NETLINK_CB(skb).portid);
520 if (ops->delete)
521 ops->delete(rule);
522 fib_rule_put(rule);
523 flush_route_cache(ops);
524 rules_ops_put(ops);
525 return 0;
526 }
527
528 err = -ENOENT;
529 errout:
530 rules_ops_put(ops);
531 return err;
532 }
533
534 static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
535 struct fib_rule *rule)
536 {
537 size_t payload = NLMSG_ALIGN(sizeof(struct fib_rule_hdr))
538 + nla_total_size(IFNAMSIZ) /* FRA_IIFNAME */
539 + nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
540 + nla_total_size(4) /* FRA_PRIORITY */
541 + nla_total_size(4) /* FRA_TABLE */
542 + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
543 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
544 + nla_total_size(4) /* FRA_FWMARK */
545 + nla_total_size(4); /* FRA_FWMASK */
546
547 if (ops->nlmsg_payload)
548 payload += ops->nlmsg_payload(rule);
549
550 return payload;
551 }
552
553 static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
554 u32 pid, u32 seq, int type, int flags,
555 struct fib_rules_ops *ops)
556 {
557 struct nlmsghdr *nlh;
558 struct fib_rule_hdr *frh;
559
560 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
561 if (nlh == NULL)
562 return -EMSGSIZE;
563
564 frh = nlmsg_data(nlh);
565 frh->family = ops->family;
566 frh->table = rule->table;
567 if (nla_put_u32(skb, FRA_TABLE, rule->table))
568 goto nla_put_failure;
569 if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
570 goto nla_put_failure;
571 frh->res1 = 0;
572 frh->res2 = 0;
573 frh->action = rule->action;
574 frh->flags = rule->flags;
575
576 if (rule->action == FR_ACT_GOTO &&
577 rcu_access_pointer(rule->ctarget) == NULL)
578 frh->flags |= FIB_RULE_UNRESOLVED;
579
580 if (rule->iifname[0]) {
581 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
582 goto nla_put_failure;
583 if (rule->iifindex == -1)
584 frh->flags |= FIB_RULE_IIF_DETACHED;
585 }
586
587 if (rule->oifname[0]) {
588 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
589 goto nla_put_failure;
590 if (rule->oifindex == -1)
591 frh->flags |= FIB_RULE_OIF_DETACHED;
592 }
593
594 if ((rule->pref &&
595 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
596 (rule->mark &&
597 nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
598 ((rule->mark_mask || rule->mark) &&
599 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
600 (rule->target &&
601 nla_put_u32(skb, FRA_GOTO, rule->target)))
602 goto nla_put_failure;
603
604 if (rule->suppress_ifgroup != -1) {
605 if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
606 goto nla_put_failure;
607 }
608
609 if (ops->fill(rule, skb, frh) < 0)
610 goto nla_put_failure;
611
612 return nlmsg_end(skb, nlh);
613
614 nla_put_failure:
615 nlmsg_cancel(skb, nlh);
616 return -EMSGSIZE;
617 }
618
619 static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
620 struct fib_rules_ops *ops)
621 {
622 int idx = 0;
623 struct fib_rule *rule;
624
625 rcu_read_lock();
626 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
627 if (idx < cb->args[1])
628 goto skip;
629
630 if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
631 cb->nlh->nlmsg_seq, RTM_NEWRULE,
632 NLM_F_MULTI, ops) < 0)
633 break;
634 skip:
635 idx++;
636 }
637 rcu_read_unlock();
638 cb->args[1] = idx;
639 rules_ops_put(ops);
640
641 return skb->len;
642 }
643
644 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
645 {
646 struct net *net = sock_net(skb->sk);
647 struct fib_rules_ops *ops;
648 int idx = 0, family;
649
650 family = rtnl_msg_family(cb->nlh);
651 if (family != AF_UNSPEC) {
652 /* Protocol specific dump request */
653 ops = lookup_rules_ops(net, family);
654 if (ops == NULL)
655 return -EAFNOSUPPORT;
656
657 return dump_rules(skb, cb, ops);
658 }
659
660 rcu_read_lock();
661 list_for_each_entry_rcu(ops, &net->rules_ops, list) {
662 if (idx < cb->args[0] || !try_module_get(ops->owner))
663 goto skip;
664
665 if (dump_rules(skb, cb, ops) < 0)
666 break;
667
668 cb->args[1] = 0;
669 skip:
670 idx++;
671 }
672 rcu_read_unlock();
673 cb->args[0] = idx;
674
675 return skb->len;
676 }
677
678 static void notify_rule_change(int event, struct fib_rule *rule,
679 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
680 u32 pid)
681 {
682 struct net *net;
683 struct sk_buff *skb;
684 int err = -ENOBUFS;
685
686 net = ops->fro_net;
687 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
688 if (skb == NULL)
689 goto errout;
690
691 err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
692 if (err < 0) {
693 /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
694 WARN_ON(err == -EMSGSIZE);
695 kfree_skb(skb);
696 goto errout;
697 }
698
699 rtnl_notify(skb, net, pid, ops->nlgroup, nlh, GFP_KERNEL);
700 return;
701 errout:
702 if (err < 0)
703 rtnl_set_sk_err(net, ops->nlgroup, err);
704 }
705
706 static void attach_rules(struct list_head *rules, struct net_device *dev)
707 {
708 struct fib_rule *rule;
709
710 list_for_each_entry(rule, rules, list) {
711 if (rule->iifindex == -1 &&
712 strcmp(dev->name, rule->iifname) == 0)
713 rule->iifindex = dev->ifindex;
714 if (rule->oifindex == -1 &&
715 strcmp(dev->name, rule->oifname) == 0)
716 rule->oifindex = dev->ifindex;
717 }
718 }
719
720 static void detach_rules(struct list_head *rules, struct net_device *dev)
721 {
722 struct fib_rule *rule;
723
724 list_for_each_entry(rule, rules, list) {
725 if (rule->iifindex == dev->ifindex)
726 rule->iifindex = -1;
727 if (rule->oifindex == dev->ifindex)
728 rule->oifindex = -1;
729 }
730 }
731
732
733 static int fib_rules_event(struct notifier_block *this, unsigned long event,
734 void *ptr)
735 {
736 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
737 struct net *net = dev_net(dev);
738 struct fib_rules_ops *ops;
739
740 ASSERT_RTNL();
741
742 switch (event) {
743 case NETDEV_REGISTER:
744 list_for_each_entry(ops, &net->rules_ops, list)
745 attach_rules(&ops->rules_list, dev);
746 break;
747
748 case NETDEV_CHANGENAME:
749 list_for_each_entry(ops, &net->rules_ops, list) {
750 detach_rules(&ops->rules_list, dev);
751 attach_rules(&ops->rules_list, dev);
752 }
753 break;
754
755 case NETDEV_UNREGISTER:
756 list_for_each_entry(ops, &net->rules_ops, list)
757 detach_rules(&ops->rules_list, dev);
758 break;
759 }
760
761 return NOTIFY_DONE;
762 }
763
764 static struct notifier_block fib_rules_notifier = {
765 .notifier_call = fib_rules_event,
766 };
767
768 static int __net_init fib_rules_net_init(struct net *net)
769 {
770 INIT_LIST_HEAD(&net->rules_ops);
771 spin_lock_init(&net->rules_mod_lock);
772 return 0;
773 }
774
775 static struct pernet_operations fib_rules_net_ops = {
776 .init = fib_rules_net_init,
777 };
778
779 static int __init fib_rules_init(void)
780 {
781 int err;
782 rtnl_register(PF_UNSPEC, RTM_NEWRULE, fib_nl_newrule, NULL, NULL);
783 rtnl_register(PF_UNSPEC, RTM_DELRULE, fib_nl_delrule, NULL, NULL);
784 rtnl_register(PF_UNSPEC, RTM_GETRULE, NULL, fib_nl_dumprule, NULL);
785
786 err = register_pernet_subsys(&fib_rules_net_ops);
787 if (err < 0)
788 goto fail;
789
790 err = register_netdevice_notifier(&fib_rules_notifier);
791 if (err < 0)
792 goto fail_unregister;
793
794 return 0;
795
796 fail_unregister:
797 unregister_pernet_subsys(&fib_rules_net_ops);
798 fail:
799 rtnl_unregister(PF_UNSPEC, RTM_NEWRULE);
800 rtnl_unregister(PF_UNSPEC, RTM_DELRULE);
801 rtnl_unregister(PF_UNSPEC, RTM_GETRULE);
802 return err;
803 }
804
805 subsys_initcall(fib_rules_init);