2 * net/sched/act_api.c Packet action API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Author: Jamal Hadi Salim
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/slab.h>
19 #include <linux/skbuff.h>
20 #include <linux/init.h>
21 #include <linux/kmod.h>
22 #include <linux/err.h>
23 #include <linux/module.h>
24 #include <net/net_namespace.h>
26 #include <net/sch_generic.h>
27 #include <net/pkt_cls.h>
28 #include <net/act_api.h>
29 #include <net/netlink.h>
31 static int tcf_action_goto_chain_init(struct tc_action
*a
, struct tcf_proto
*tp
)
33 u32 chain_index
= a
->tcfa_action
& TC_ACT_EXT_VAL_MASK
;
37 a
->goto_chain
= tcf_chain_get(tp
->chain
->block
, chain_index
, true);
43 static void tcf_action_goto_chain_fini(struct tc_action
*a
)
45 tcf_chain_put(a
->goto_chain
);
48 static void tcf_action_goto_chain_exec(const struct tc_action
*a
,
49 struct tcf_result
*res
)
51 const struct tcf_chain
*chain
= a
->goto_chain
;
53 res
->goto_tp
= rcu_dereference_bh(chain
->filter_chain
);
56 /* XXX: For standalone actions, we don't need a RCU grace period either, because
57 * actions are always connected to filters and filters are already destroyed in
58 * RCU callbacks, so after a RCU grace period actions are already disconnected
59 * from filters. Readers later can not find us.
61 static void free_tcf(struct tc_action
*p
)
63 free_percpu(p
->cpu_bstats
);
64 free_percpu(p
->cpu_qstats
);
67 kfree(p
->act_cookie
->data
);
71 tcf_action_goto_chain_fini(p
);
76 static void tcf_idr_remove(struct tcf_idrinfo
*idrinfo
, struct tc_action
*p
)
78 spin_lock_bh(&idrinfo
->lock
);
79 idr_remove_ext(&idrinfo
->action_idr
, p
->tcfa_index
);
80 spin_unlock_bh(&idrinfo
->lock
);
81 gen_kill_estimator(&p
->tcfa_rate_est
);
85 int __tcf_idr_release(struct tc_action
*p
, bool bind
, bool strict
)
94 else if (strict
&& p
->tcfa_bindcnt
> 0)
98 if (p
->tcfa_bindcnt
<= 0 && p
->tcfa_refcnt
<= 0) {
100 p
->ops
->cleanup(p
, bind
);
101 tcf_idr_remove(p
->idrinfo
, p
);
108 EXPORT_SYMBOL(__tcf_idr_release
);
110 static int tcf_dump_walker(struct tcf_idrinfo
*idrinfo
, struct sk_buff
*skb
,
111 struct netlink_callback
*cb
)
113 int err
= 0, index
= -1, s_i
= 0, n_i
= 0;
114 u32 act_flags
= cb
->args
[2];
115 unsigned long jiffy_since
= cb
->args
[3];
117 struct idr
*idr
= &idrinfo
->action_idr
;
119 unsigned long id
= 1;
121 spin_lock_bh(&idrinfo
->lock
);
125 idr_for_each_entry_ext(idr
, p
, id
) {
131 time_after(jiffy_since
,
132 (unsigned long)p
->tcfa_tm
.lastuse
))
135 nest
= nla_nest_start(skb
, n_i
);
137 goto nla_put_failure
;
138 err
= tcf_action_dump_1(skb
, p
, 0, 0);
141 nlmsg_trim(skb
, nest
);
144 nla_nest_end(skb
, nest
);
146 if (!(act_flags
& TCA_FLAG_LARGE_DUMP_ON
) &&
147 n_i
>= TCA_ACT_MAX_PRIO
)
152 cb
->args
[0] = index
+ 1;
154 spin_unlock_bh(&idrinfo
->lock
);
156 if (act_flags
& TCA_FLAG_LARGE_DUMP_ON
)
162 nla_nest_cancel(skb
, nest
);
166 static int tcf_del_walker(struct tcf_idrinfo
*idrinfo
, struct sk_buff
*skb
,
167 const struct tc_action_ops
*ops
)
172 struct idr
*idr
= &idrinfo
->action_idr
;
174 unsigned long id
= 1;
176 nest
= nla_nest_start(skb
, 0);
178 goto nla_put_failure
;
179 if (nla_put_string(skb
, TCA_KIND
, ops
->kind
))
180 goto nla_put_failure
;
182 idr_for_each_entry_ext(idr
, p
, id
) {
183 ret
= __tcf_idr_release(p
, false, true);
184 if (ret
== ACT_P_DELETED
) {
185 module_put(ops
->owner
);
187 } else if (ret
< 0) {
188 goto nla_put_failure
;
191 if (nla_put_u32(skb
, TCA_FCNT
, n_i
))
192 goto nla_put_failure
;
193 nla_nest_end(skb
, nest
);
197 nla_nest_cancel(skb
, nest
);
201 int tcf_generic_walker(struct tc_action_net
*tn
, struct sk_buff
*skb
,
202 struct netlink_callback
*cb
, int type
,
203 const struct tc_action_ops
*ops
)
205 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
207 if (type
== RTM_DELACTION
) {
208 return tcf_del_walker(idrinfo
, skb
, ops
);
209 } else if (type
== RTM_GETACTION
) {
210 return tcf_dump_walker(idrinfo
, skb
, cb
);
212 WARN(1, "tcf_generic_walker: unknown action %d\n", type
);
216 EXPORT_SYMBOL(tcf_generic_walker
);
218 static struct tc_action
*tcf_idr_lookup(u32 index
, struct tcf_idrinfo
*idrinfo
)
220 struct tc_action
*p
= NULL
;
222 spin_lock_bh(&idrinfo
->lock
);
223 p
= idr_find_ext(&idrinfo
->action_idr
, index
);
224 spin_unlock_bh(&idrinfo
->lock
);
229 int tcf_idr_search(struct tc_action_net
*tn
, struct tc_action
**a
, u32 index
)
231 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
232 struct tc_action
*p
= tcf_idr_lookup(index
, idrinfo
);
240 EXPORT_SYMBOL(tcf_idr_search
);
242 bool tcf_idr_check(struct tc_action_net
*tn
, u32 index
, struct tc_action
**a
,
245 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
246 struct tc_action
*p
= tcf_idr_lookup(index
, idrinfo
);
257 EXPORT_SYMBOL(tcf_idr_check
);
259 void tcf_idr_cleanup(struct tc_action
*a
, struct nlattr
*est
)
262 gen_kill_estimator(&a
->tcfa_rate_est
);
265 EXPORT_SYMBOL(tcf_idr_cleanup
);
267 int tcf_idr_create(struct tc_action_net
*tn
, u32 index
, struct nlattr
*est
,
268 struct tc_action
**a
, const struct tc_action_ops
*ops
,
269 int bind
, bool cpustats
)
271 struct tc_action
*p
= kzalloc(ops
->size
, GFP_KERNEL
);
272 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
273 struct idr
*idr
= &idrinfo
->action_idr
;
275 unsigned long idr_index
;
284 p
->cpu_bstats
= netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu
);
285 if (!p
->cpu_bstats
) {
290 p
->cpu_qstats
= alloc_percpu(struct gnet_stats_queue
);
291 if (!p
->cpu_qstats
) {
293 free_percpu(p
->cpu_bstats
);
297 spin_lock_init(&p
->tcfa_lock
);
298 /* user doesn't specify an index */
300 idr_preload(GFP_KERNEL
);
301 spin_lock_bh(&idrinfo
->lock
);
302 err
= idr_alloc_ext(idr
, NULL
, &idr_index
, 1, 0,
304 spin_unlock_bh(&idrinfo
->lock
);
308 free_percpu(p
->cpu_qstats
);
311 p
->tcfa_index
= idr_index
;
313 idr_preload(GFP_KERNEL
);
314 spin_lock_bh(&idrinfo
->lock
);
315 err
= idr_alloc_ext(idr
, NULL
, NULL
, index
, index
+ 1,
317 spin_unlock_bh(&idrinfo
->lock
);
321 p
->tcfa_index
= index
;
324 p
->tcfa_tm
.install
= jiffies
;
325 p
->tcfa_tm
.lastuse
= jiffies
;
326 p
->tcfa_tm
.firstuse
= 0;
328 err
= gen_new_estimator(&p
->tcfa_bstats
, p
->cpu_bstats
,
330 &p
->tcfa_lock
, NULL
, est
);
336 p
->idrinfo
= idrinfo
;
338 INIT_LIST_HEAD(&p
->list
);
342 EXPORT_SYMBOL(tcf_idr_create
);
344 void tcf_idr_insert(struct tc_action_net
*tn
, struct tc_action
*a
)
346 struct tcf_idrinfo
*idrinfo
= tn
->idrinfo
;
348 spin_lock_bh(&idrinfo
->lock
);
349 idr_replace_ext(&idrinfo
->action_idr
, a
, a
->tcfa_index
);
350 spin_unlock_bh(&idrinfo
->lock
);
352 EXPORT_SYMBOL(tcf_idr_insert
);
354 void tcf_idrinfo_destroy(const struct tc_action_ops
*ops
,
355 struct tcf_idrinfo
*idrinfo
)
357 struct idr
*idr
= &idrinfo
->action_idr
;
360 unsigned long id
= 1;
362 idr_for_each_entry_ext(idr
, p
, id
) {
363 ret
= __tcf_idr_release(p
, false, true);
364 if (ret
== ACT_P_DELETED
)
365 module_put(ops
->owner
);
369 idr_destroy(&idrinfo
->action_idr
);
371 EXPORT_SYMBOL(tcf_idrinfo_destroy
);
373 static LIST_HEAD(act_base
);
374 static DEFINE_RWLOCK(act_mod_lock
);
376 int tcf_register_action(struct tc_action_ops
*act
,
377 struct pernet_operations
*ops
)
379 struct tc_action_ops
*a
;
382 if (!act
->act
|| !act
->dump
|| !act
->init
|| !act
->walk
|| !act
->lookup
)
385 /* We have to register pernet ops before making the action ops visible,
386 * otherwise tcf_action_init_1() could get a partially initialized
389 ret
= register_pernet_subsys(ops
);
393 write_lock(&act_mod_lock
);
394 list_for_each_entry(a
, &act_base
, head
) {
395 if (act
->type
== a
->type
|| (strcmp(act
->kind
, a
->kind
) == 0)) {
396 write_unlock(&act_mod_lock
);
397 unregister_pernet_subsys(ops
);
401 list_add_tail(&act
->head
, &act_base
);
402 write_unlock(&act_mod_lock
);
406 EXPORT_SYMBOL(tcf_register_action
);
408 int tcf_unregister_action(struct tc_action_ops
*act
,
409 struct pernet_operations
*ops
)
411 struct tc_action_ops
*a
;
414 write_lock(&act_mod_lock
);
415 list_for_each_entry(a
, &act_base
, head
) {
417 list_del(&act
->head
);
422 write_unlock(&act_mod_lock
);
424 unregister_pernet_subsys(ops
);
427 EXPORT_SYMBOL(tcf_unregister_action
);
430 static struct tc_action_ops
*tc_lookup_action_n(char *kind
)
432 struct tc_action_ops
*a
, *res
= NULL
;
435 read_lock(&act_mod_lock
);
436 list_for_each_entry(a
, &act_base
, head
) {
437 if (strcmp(kind
, a
->kind
) == 0) {
438 if (try_module_get(a
->owner
))
443 read_unlock(&act_mod_lock
);
448 /* lookup by nlattr */
449 static struct tc_action_ops
*tc_lookup_action(struct nlattr
*kind
)
451 struct tc_action_ops
*a
, *res
= NULL
;
454 read_lock(&act_mod_lock
);
455 list_for_each_entry(a
, &act_base
, head
) {
456 if (nla_strcmp(kind
, a
->kind
) == 0) {
457 if (try_module_get(a
->owner
))
462 read_unlock(&act_mod_lock
);
467 /*TCA_ACT_MAX_PRIO is 32, there count upto 32 */
468 #define TCA_ACT_MAX_PRIO_MASK 0x1FF
469 int tcf_action_exec(struct sk_buff
*skb
, struct tc_action
**actions
,
470 int nr_actions
, struct tcf_result
*res
)
473 u32 jmp_ttl
= TCA_ACT_MAX_PRIO
; /*matches actions per filter */
477 if (skb_skip_tc_classify(skb
))
481 for (i
= 0; i
< nr_actions
; i
++) {
482 const struct tc_action
*a
= actions
[i
];
484 if (jmp_prgcnt
> 0) {
489 ret
= a
->ops
->act(skb
, a
, res
);
490 if (ret
== TC_ACT_REPEAT
)
491 goto repeat
; /* we need a ttl - JHS */
493 if (TC_ACT_EXT_CMP(ret
, TC_ACT_JUMP
)) {
494 jmp_prgcnt
= ret
& TCA_ACT_MAX_PRIO_MASK
;
495 if (!jmp_prgcnt
|| (jmp_prgcnt
> nr_actions
)) {
496 /* faulty opcode, stop pipeline */
501 goto restart_act_graph
;
502 else /* faulty graph, stop pipeline */
505 } else if (TC_ACT_EXT_CMP(ret
, TC_ACT_GOTO_CHAIN
)) {
506 tcf_action_goto_chain_exec(a
, res
);
509 if (ret
!= TC_ACT_PIPE
)
515 EXPORT_SYMBOL(tcf_action_exec
);
517 int tcf_action_destroy(struct list_head
*actions
, int bind
)
519 const struct tc_action_ops
*ops
;
520 struct tc_action
*a
, *tmp
;
523 list_for_each_entry_safe(a
, tmp
, actions
, list
) {
525 ret
= __tcf_idr_release(a
, bind
, true);
526 if (ret
== ACT_P_DELETED
)
527 module_put(ops
->owner
);
535 tcf_action_dump_old(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
537 return a
->ops
->dump(skb
, a
, bind
, ref
);
541 tcf_action_dump_1(struct sk_buff
*skb
, struct tc_action
*a
, int bind
, int ref
)
544 unsigned char *b
= skb_tail_pointer(skb
);
547 if (nla_put_string(skb
, TCA_KIND
, a
->ops
->kind
))
548 goto nla_put_failure
;
549 if (tcf_action_copy_stats(skb
, a
, 0))
550 goto nla_put_failure
;
552 if (nla_put(skb
, TCA_ACT_COOKIE
, a
->act_cookie
->len
,
553 a
->act_cookie
->data
))
554 goto nla_put_failure
;
557 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
559 goto nla_put_failure
;
560 err
= tcf_action_dump_old(skb
, a
, bind
, ref
);
562 nla_nest_end(skb
, nest
);
570 EXPORT_SYMBOL(tcf_action_dump_1
);
572 int tcf_action_dump(struct sk_buff
*skb
, struct list_head
*actions
,
579 list_for_each_entry(a
, actions
, list
) {
580 nest
= nla_nest_start(skb
, a
->order
);
582 goto nla_put_failure
;
583 err
= tcf_action_dump_1(skb
, a
, bind
, ref
);
586 nla_nest_end(skb
, nest
);
594 nla_nest_cancel(skb
, nest
);
598 static struct tc_cookie
*nla_memdup_cookie(struct nlattr
**tb
)
600 struct tc_cookie
*c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
604 c
->data
= nla_memdup(tb
[TCA_ACT_COOKIE
], GFP_KERNEL
);
609 c
->len
= nla_len(tb
[TCA_ACT_COOKIE
]);
614 struct tc_action
*tcf_action_init_1(struct net
*net
, struct tcf_proto
*tp
,
615 struct nlattr
*nla
, struct nlattr
*est
,
616 char *name
, int ovr
, int bind
)
619 struct tc_action_ops
*a_o
;
620 struct tc_cookie
*cookie
= NULL
;
621 char act_name
[IFNAMSIZ
];
622 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
627 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
, NULL
);
631 kind
= tb
[TCA_ACT_KIND
];
634 if (nla_strlcpy(act_name
, kind
, IFNAMSIZ
) >= IFNAMSIZ
)
636 if (tb
[TCA_ACT_COOKIE
]) {
637 int cklen
= nla_len(tb
[TCA_ACT_COOKIE
]);
639 if (cklen
> TC_COOKIE_MAX_SIZE
)
642 cookie
= nla_memdup_cookie(tb
);
650 if (strlcpy(act_name
, name
, IFNAMSIZ
) >= IFNAMSIZ
)
654 a_o
= tc_lookup_action_n(act_name
);
656 #ifdef CONFIG_MODULES
658 request_module("act_%s", act_name
);
661 a_o
= tc_lookup_action_n(act_name
);
663 /* We dropped the RTNL semaphore in order to
664 * perform the module load. So, even if we
665 * succeeded in loading the module we have to
666 * tell the caller to replay the request. We
667 * indicate this using -EAGAIN.
678 /* backward compatibility for policer */
680 err
= a_o
->init(net
, tb
[TCA_ACT_OPTIONS
], est
, &a
, ovr
, bind
);
682 err
= a_o
->init(net
, nla
, est
, &a
, ovr
, bind
);
686 if (name
== NULL
&& tb
[TCA_ACT_COOKIE
]) {
688 kfree(a
->act_cookie
->data
);
689 kfree(a
->act_cookie
);
691 a
->act_cookie
= cookie
;
694 /* module count goes up only when brand new policy is created
695 * if it exists and is only bound to in a_o->init() then
696 * ACT_P_CREATED is not returned (a zero is).
698 if (err
!= ACT_P_CREATED
)
699 module_put(a_o
->owner
);
701 if (TC_ACT_EXT_CMP(a
->tcfa_action
, TC_ACT_GOTO_CHAIN
)) {
702 err
= tcf_action_goto_chain_init(a
, tp
);
706 list_add_tail(&a
->list
, &actions
);
707 tcf_action_destroy(&actions
, bind
);
715 module_put(a_o
->owner
);
724 static void cleanup_a(struct list_head
*actions
, int ovr
)
731 list_for_each_entry(a
, actions
, list
)
735 int tcf_action_init(struct net
*net
, struct tcf_proto
*tp
, struct nlattr
*nla
,
736 struct nlattr
*est
, char *name
, int ovr
, int bind
,
737 struct list_head
*actions
)
739 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
740 struct tc_action
*act
;
744 err
= nla_parse_nested(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
, NULL
);
748 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
749 act
= tcf_action_init_1(net
, tp
, tb
[i
], est
, name
, ovr
, bind
);
757 list_add_tail(&act
->list
, actions
);
760 /* Remove the temp refcnt which was necessary to protect against
761 * destroying an existing action which was being replaced
763 cleanup_a(actions
, ovr
);
767 tcf_action_destroy(actions
, bind
);
771 int tcf_action_copy_stats(struct sk_buff
*skb
, struct tc_action
*p
,
780 /* compat_mode being true specifies a call that is supposed
781 * to add additional backward compatibility statistic TLVs.
784 if (p
->type
== TCA_OLD_COMPAT
)
785 err
= gnet_stats_start_copy_compat(skb
, 0,
793 err
= gnet_stats_start_copy(skb
, TCA_ACT_STATS
,
794 &p
->tcfa_lock
, &d
, TCA_ACT_PAD
);
799 if (gnet_stats_copy_basic(NULL
, &d
, p
->cpu_bstats
, &p
->tcfa_bstats
) < 0 ||
800 gnet_stats_copy_rate_est(&d
, &p
->tcfa_rate_est
) < 0 ||
801 gnet_stats_copy_queue(&d
, p
->cpu_qstats
,
803 p
->tcfa_qstats
.qlen
) < 0)
806 if (gnet_stats_finish_copy(&d
) < 0)
815 static int tca_get_fill(struct sk_buff
*skb
, struct list_head
*actions
,
816 u32 portid
, u32 seq
, u16 flags
, int event
, int bind
,
820 struct nlmsghdr
*nlh
;
821 unsigned char *b
= skb_tail_pointer(skb
);
824 nlh
= nlmsg_put(skb
, portid
, seq
, event
, sizeof(*t
), flags
);
828 t
->tca_family
= AF_UNSPEC
;
832 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
836 if (tcf_action_dump(skb
, actions
, bind
, ref
) < 0)
839 nla_nest_end(skb
, nest
);
841 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
850 tcf_get_notify(struct net
*net
, u32 portid
, struct nlmsghdr
*n
,
851 struct list_head
*actions
, int event
)
855 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
858 if (tca_get_fill(skb
, actions
, portid
, n
->nlmsg_seq
, 0, event
,
864 return rtnl_unicast(skb
, net
, portid
);
867 static struct tc_action
*tcf_action_get_1(struct net
*net
, struct nlattr
*nla
,
868 struct nlmsghdr
*n
, u32 portid
)
870 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
871 const struct tc_action_ops
*ops
;
876 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
, NULL
);
881 if (tb
[TCA_ACT_INDEX
] == NULL
||
882 nla_len(tb
[TCA_ACT_INDEX
]) < sizeof(index
))
884 index
= nla_get_u32(tb
[TCA_ACT_INDEX
]);
887 ops
= tc_lookup_action(tb
[TCA_ACT_KIND
]);
888 if (!ops
) /* could happen in batch of actions */
891 if (ops
->lookup(net
, &a
, index
) == 0)
894 module_put(ops
->owner
);
898 module_put(ops
->owner
);
903 static int tca_action_flush(struct net
*net
, struct nlattr
*nla
,
904 struct nlmsghdr
*n
, u32 portid
)
908 struct nlmsghdr
*nlh
;
910 struct netlink_callback dcb
;
912 struct nlattr
*tb
[TCA_ACT_MAX
+ 1];
913 const struct tc_action_ops
*ops
;
917 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
919 pr_debug("tca_action_flush: failed skb alloc\n");
923 b
= skb_tail_pointer(skb
);
925 err
= nla_parse_nested(tb
, TCA_ACT_MAX
, nla
, NULL
, NULL
);
930 kind
= tb
[TCA_ACT_KIND
];
931 ops
= tc_lookup_action(kind
);
932 if (!ops
) /*some idjot trying to flush unknown action */
935 nlh
= nlmsg_put(skb
, portid
, n
->nlmsg_seq
, RTM_DELACTION
,
940 t
->tca_family
= AF_UNSPEC
;
944 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
948 err
= ops
->walk(net
, skb
, &dcb
, RTM_DELACTION
, ops
);
952 nla_nest_end(skb
, nest
);
954 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
955 nlh
->nlmsg_flags
|= NLM_F_ROOT
;
956 module_put(ops
->owner
);
957 err
= rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
958 n
->nlmsg_flags
& NLM_F_ECHO
);
965 module_put(ops
->owner
);
972 tcf_del_notify(struct net
*net
, struct nlmsghdr
*n
, struct list_head
*actions
,
978 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
982 if (tca_get_fill(skb
, actions
, portid
, n
->nlmsg_seq
, 0, RTM_DELACTION
,
988 /* now do the delete */
989 ret
= tcf_action_destroy(actions
, 0);
995 ret
= rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
996 n
->nlmsg_flags
& NLM_F_ECHO
);
1003 tca_action_gd(struct net
*net
, struct nlattr
*nla
, struct nlmsghdr
*n
,
1004 u32 portid
, int event
)
1007 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
1008 struct tc_action
*act
;
1011 ret
= nla_parse_nested(tb
, TCA_ACT_MAX_PRIO
, nla
, NULL
, NULL
);
1015 if (event
== RTM_DELACTION
&& n
->nlmsg_flags
& NLM_F_ROOT
) {
1017 return tca_action_flush(net
, tb
[1], n
, portid
);
1022 for (i
= 1; i
<= TCA_ACT_MAX_PRIO
&& tb
[i
]; i
++) {
1023 act
= tcf_action_get_1(net
, tb
[i
], n
, portid
);
1029 list_add_tail(&act
->list
, &actions
);
1032 if (event
== RTM_GETACTION
)
1033 ret
= tcf_get_notify(net
, portid
, n
, &actions
, event
);
1035 ret
= tcf_del_notify(net
, n
, &actions
, portid
);
1041 if (event
!= RTM_GETACTION
)
1042 tcf_action_destroy(&actions
, 0);
1047 tcf_add_notify(struct net
*net
, struct nlmsghdr
*n
, struct list_head
*actions
,
1050 struct sk_buff
*skb
;
1053 skb
= alloc_skb(NLMSG_GOODSIZE
, GFP_KERNEL
);
1057 if (tca_get_fill(skb
, actions
, portid
, n
->nlmsg_seq
, n
->nlmsg_flags
,
1058 RTM_NEWACTION
, 0, 0) <= 0) {
1063 err
= rtnetlink_send(skb
, net
, portid
, RTNLGRP_TC
,
1064 n
->nlmsg_flags
& NLM_F_ECHO
);
1070 static int tcf_action_add(struct net
*net
, struct nlattr
*nla
,
1071 struct nlmsghdr
*n
, u32 portid
, int ovr
)
1076 ret
= tcf_action_init(net
, NULL
, nla
, NULL
, NULL
, ovr
, 0, &actions
);
1080 return tcf_add_notify(net
, n
, &actions
, portid
);
1083 static u32 tcaa_root_flags_allowed
= TCA_FLAG_LARGE_DUMP_ON
;
1084 static const struct nla_policy tcaa_policy
[TCA_ROOT_MAX
+ 1] = {
1085 [TCA_ROOT_FLAGS
] = { .type
= NLA_BITFIELD32
,
1086 .validation_data
= &tcaa_root_flags_allowed
},
1087 [TCA_ROOT_TIME_DELTA
] = { .type
= NLA_U32
},
1090 static int tc_ctl_action(struct sk_buff
*skb
, struct nlmsghdr
*n
,
1091 struct netlink_ext_ack
*extack
)
1093 struct net
*net
= sock_net(skb
->sk
);
1094 struct nlattr
*tca
[TCA_ROOT_MAX
+ 1];
1095 u32 portid
= skb
? NETLINK_CB(skb
).portid
: 0;
1096 int ret
= 0, ovr
= 0;
1098 if ((n
->nlmsg_type
!= RTM_GETACTION
) &&
1099 !netlink_capable(skb
, CAP_NET_ADMIN
))
1102 ret
= nlmsg_parse(n
, sizeof(struct tcamsg
), tca
, TCA_ROOT_MAX
, NULL
,
1107 if (tca
[TCA_ACT_TAB
] == NULL
) {
1108 pr_notice("tc_ctl_action: received NO action attribs\n");
1112 /* n->nlmsg_flags & NLM_F_CREATE */
1113 switch (n
->nlmsg_type
) {
1115 /* we are going to assume all other flags
1116 * imply create only if it doesn't exist
1117 * Note that CREATE | EXCL implies that
1118 * but since we want avoid ambiguity (eg when flags
1119 * is zero) then just set this
1121 if (n
->nlmsg_flags
& NLM_F_REPLACE
)
1124 ret
= tcf_action_add(net
, tca
[TCA_ACT_TAB
], n
, portid
, ovr
);
1129 ret
= tca_action_gd(net
, tca
[TCA_ACT_TAB
], n
,
1130 portid
, RTM_DELACTION
);
1133 ret
= tca_action_gd(net
, tca
[TCA_ACT_TAB
], n
,
1134 portid
, RTM_GETACTION
);
1143 static struct nlattr
*find_dump_kind(struct nlattr
**nla
)
1145 struct nlattr
*tb1
, *tb2
[TCA_ACT_MAX
+ 1];
1146 struct nlattr
*tb
[TCA_ACT_MAX_PRIO
+ 1];
1147 struct nlattr
*kind
;
1149 tb1
= nla
[TCA_ACT_TAB
];
1153 if (nla_parse(tb
, TCA_ACT_MAX_PRIO
, nla_data(tb1
),
1154 NLMSG_ALIGN(nla_len(tb1
)), NULL
, NULL
) < 0)
1159 if (nla_parse_nested(tb2
, TCA_ACT_MAX
, tb
[1], NULL
, NULL
) < 0)
1161 kind
= tb2
[TCA_ACT_KIND
];
1166 static int tc_dump_action(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1168 struct net
*net
= sock_net(skb
->sk
);
1169 struct nlmsghdr
*nlh
;
1170 unsigned char *b
= skb_tail_pointer(skb
);
1171 struct nlattr
*nest
;
1172 struct tc_action_ops
*a_o
;
1174 struct tcamsg
*t
= (struct tcamsg
*) nlmsg_data(cb
->nlh
);
1175 struct nlattr
*tb
[TCA_ROOT_MAX
+ 1];
1176 struct nlattr
*count_attr
= NULL
;
1177 unsigned long jiffy_since
= 0;
1178 struct nlattr
*kind
= NULL
;
1179 struct nla_bitfield32 bf
;
1180 u32 msecs_since
= 0;
1183 ret
= nlmsg_parse(cb
->nlh
, sizeof(struct tcamsg
), tb
, TCA_ROOT_MAX
,
1188 kind
= find_dump_kind(tb
);
1190 pr_info("tc_dump_action: action bad kind\n");
1194 a_o
= tc_lookup_action(kind
);
1199 if (tb
[TCA_ROOT_FLAGS
]) {
1200 bf
= nla_get_bitfield32(tb
[TCA_ROOT_FLAGS
]);
1201 cb
->args
[2] = bf
.value
;
1204 if (tb
[TCA_ROOT_TIME_DELTA
]) {
1205 msecs_since
= nla_get_u32(tb
[TCA_ROOT_TIME_DELTA
]);
1208 nlh
= nlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
, cb
->nlh
->nlmsg_seq
,
1209 cb
->nlh
->nlmsg_type
, sizeof(*t
), 0);
1211 goto out_module_put
;
1214 jiffy_since
= jiffies
- msecs_to_jiffies(msecs_since
);
1216 t
= nlmsg_data(nlh
);
1217 t
->tca_family
= AF_UNSPEC
;
1220 cb
->args
[3] = jiffy_since
;
1221 count_attr
= nla_reserve(skb
, TCA_ROOT_COUNT
, sizeof(u32
));
1223 goto out_module_put
;
1225 nest
= nla_nest_start(skb
, TCA_ACT_TAB
);
1227 goto out_module_put
;
1229 ret
= a_o
->walk(net
, skb
, cb
, RTM_GETACTION
, a_o
);
1231 goto out_module_put
;
1234 nla_nest_end(skb
, nest
);
1236 act_count
= cb
->args
[1];
1237 memcpy(nla_data(count_attr
), &act_count
, sizeof(u32
));
1242 nlh
->nlmsg_len
= skb_tail_pointer(skb
) - b
;
1243 if (NETLINK_CB(cb
->skb
).portid
&& ret
)
1244 nlh
->nlmsg_flags
|= NLM_F_MULTI
;
1245 module_put(a_o
->owner
);
1249 module_put(a_o
->owner
);
1254 static int __init
tc_action_init(void)
1256 rtnl_register(PF_UNSPEC
, RTM_NEWACTION
, tc_ctl_action
, NULL
, 0);
1257 rtnl_register(PF_UNSPEC
, RTM_DELACTION
, tc_ctl_action
, NULL
, 0);
1258 rtnl_register(PF_UNSPEC
, RTM_GETACTION
, tc_ctl_action
, tc_dump_action
,
1264 subsys_initcall(tc_action_init
);