2 * net/sched/cls_matchll.c Match-all classifier
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <net/sch_generic.h>
17 #include <net/pkt_cls.h>
19 struct cls_mall_head
{
21 struct tcf_result res
;
27 static int mall_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
28 struct tcf_result
*res
)
30 struct cls_mall_head
*head
= rcu_dereference_bh(tp
->root
);
32 if (tc_skip_sw(head
->flags
))
35 return tcf_exts_exec(skb
, &head
->exts
, res
);
38 static int mall_init(struct tcf_proto
*tp
)
43 static void mall_destroy_rcu(struct rcu_head
*rcu
)
45 struct cls_mall_head
*head
= container_of(rcu
, struct cls_mall_head
,
48 tcf_exts_destroy(&head
->exts
);
52 static int mall_replace_hw_filter(struct tcf_proto
*tp
,
53 struct cls_mall_head
*head
,
56 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
57 struct tc_to_netdev offload
;
58 struct tc_cls_matchall_offload mall_offload
= {0};
61 offload
.type
= TC_SETUP_MATCHALL
;
62 offload
.cls_mall
= &mall_offload
;
63 offload
.cls_mall
->command
= TC_CLSMATCHALL_REPLACE
;
64 offload
.cls_mall
->exts
= &head
->exts
;
65 offload
.cls_mall
->cookie
= cookie
;
67 err
= dev
->netdev_ops
->ndo_setup_tc(dev
, tp
->q
->handle
, tp
->protocol
,
70 head
->flags
|= TCA_CLS_FLAGS_IN_HW
;
75 static void mall_destroy_hw_filter(struct tcf_proto
*tp
,
76 struct cls_mall_head
*head
,
79 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
80 struct tc_to_netdev offload
;
81 struct tc_cls_matchall_offload mall_offload
= {0};
83 offload
.type
= TC_SETUP_MATCHALL
;
84 offload
.cls_mall
= &mall_offload
;
85 offload
.cls_mall
->command
= TC_CLSMATCHALL_DESTROY
;
86 offload
.cls_mall
->exts
= NULL
;
87 offload
.cls_mall
->cookie
= cookie
;
89 dev
->netdev_ops
->ndo_setup_tc(dev
, tp
->q
->handle
, tp
->protocol
,
93 static void mall_destroy(struct tcf_proto
*tp
)
95 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
96 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
101 if (tc_should_offload(dev
, tp
, head
->flags
))
102 mall_destroy_hw_filter(tp
, head
, (unsigned long) head
);
104 call_rcu(&head
->rcu
, mall_destroy_rcu
);
107 static unsigned long mall_get(struct tcf_proto
*tp
, u32 handle
)
112 static const struct nla_policy mall_policy
[TCA_MATCHALL_MAX
+ 1] = {
113 [TCA_MATCHALL_UNSPEC
] = { .type
= NLA_UNSPEC
},
114 [TCA_MATCHALL_CLASSID
] = { .type
= NLA_U32
},
117 static int mall_set_parms(struct net
*net
, struct tcf_proto
*tp
,
118 struct cls_mall_head
*head
,
119 unsigned long base
, struct nlattr
**tb
,
120 struct nlattr
*est
, bool ovr
)
125 err
= tcf_exts_init(&e
, TCA_MATCHALL_ACT
, 0);
128 err
= tcf_exts_validate(net
, tp
, tb
, est
, &e
, ovr
);
132 if (tb
[TCA_MATCHALL_CLASSID
]) {
133 head
->res
.classid
= nla_get_u32(tb
[TCA_MATCHALL_CLASSID
]);
134 tcf_bind_filter(tp
, &head
->res
, base
);
137 tcf_exts_change(tp
, &head
->exts
, &e
);
141 tcf_exts_destroy(&e
);
145 static int mall_change(struct net
*net
, struct sk_buff
*in_skb
,
146 struct tcf_proto
*tp
, unsigned long base
,
147 u32 handle
, struct nlattr
**tca
,
148 unsigned long *arg
, bool ovr
)
150 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
151 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
152 struct nlattr
*tb
[TCA_MATCHALL_MAX
+ 1];
153 struct cls_mall_head
*new;
157 if (!tca
[TCA_OPTIONS
])
163 err
= nla_parse_nested(tb
, TCA_MATCHALL_MAX
, tca
[TCA_OPTIONS
],
168 if (tb
[TCA_MATCHALL_FLAGS
]) {
169 flags
= nla_get_u32(tb
[TCA_MATCHALL_FLAGS
]);
170 if (!tc_flags_valid(flags
))
174 new = kzalloc(sizeof(*new), GFP_KERNEL
);
178 err
= tcf_exts_init(&new->exts
, TCA_MATCHALL_ACT
, 0);
184 new->handle
= handle
;
187 err
= mall_set_parms(net
, tp
, new, base
, tb
, tca
[TCA_RATE
], ovr
);
191 if (tc_should_offload(dev
, tp
, flags
)) {
192 err
= mall_replace_hw_filter(tp
, new, (unsigned long) new);
194 if (tc_skip_sw(flags
))
195 goto err_replace_hw_filter
;
201 if (!tc_in_hw(new->flags
))
202 new->flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
204 *arg
= (unsigned long) head
;
205 rcu_assign_pointer(tp
->root
, new);
206 call_rcu(&head
->rcu
, mall_destroy_rcu
);
209 err_replace_hw_filter
:
211 tcf_exts_destroy(&new->exts
);
217 static int mall_delete(struct tcf_proto
*tp
, unsigned long arg
, bool *last
)
222 static void mall_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
224 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
226 if (arg
->count
< arg
->skip
)
228 if (arg
->fn(tp
, (unsigned long) head
, arg
) < 0)
234 static int mall_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
235 struct sk_buff
*skb
, struct tcmsg
*t
)
237 struct cls_mall_head
*head
= (struct cls_mall_head
*) fh
;
243 t
->tcm_handle
= head
->handle
;
245 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
247 goto nla_put_failure
;
249 if (head
->res
.classid
&&
250 nla_put_u32(skb
, TCA_MATCHALL_CLASSID
, head
->res
.classid
))
251 goto nla_put_failure
;
253 if (head
->flags
&& nla_put_u32(skb
, TCA_MATCHALL_FLAGS
, head
->flags
))
254 goto nla_put_failure
;
256 if (tcf_exts_dump(skb
, &head
->exts
))
257 goto nla_put_failure
;
259 nla_nest_end(skb
, nest
);
261 if (tcf_exts_dump_stats(skb
, &head
->exts
) < 0)
262 goto nla_put_failure
;
267 nla_nest_cancel(skb
, nest
);
271 static struct tcf_proto_ops cls_mall_ops __read_mostly
= {
273 .classify
= mall_classify
,
275 .destroy
= mall_destroy
,
277 .change
= mall_change
,
278 .delete = mall_delete
,
281 .owner
= THIS_MODULE
,
284 static int __init
cls_mall_init(void)
286 return register_tcf_proto_ops(&cls_mall_ops
);
289 static void __exit
cls_mall_exit(void)
291 unregister_tcf_proto_ops(&cls_mall_ops
);
294 module_init(cls_mall_init
);
295 module_exit(cls_mall_exit
);
297 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
298 MODULE_DESCRIPTION("Match-all classifier");
299 MODULE_LICENSE("GPL v2");