2 * net/sched/cls_matchll.c Match-all classifier
4 * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <net/sch_generic.h>
17 #include <net/pkt_cls.h>
19 struct cls_mall_head
{
21 struct tcf_result res
;
27 static int mall_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
28 struct tcf_result
*res
)
30 struct cls_mall_head
*head
= rcu_dereference_bh(tp
->root
);
32 if (tc_skip_sw(head
->flags
))
36 return tcf_exts_exec(skb
, &head
->exts
, res
);
39 static int mall_init(struct tcf_proto
*tp
)
44 static void mall_destroy_rcu(struct rcu_head
*rcu
)
46 struct cls_mall_head
*head
= container_of(rcu
, struct cls_mall_head
,
49 tcf_exts_destroy(&head
->exts
);
53 static int mall_replace_hw_filter(struct tcf_proto
*tp
,
54 struct cls_mall_head
*head
,
57 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
58 struct tc_to_netdev offload
;
59 struct tc_cls_matchall_offload mall_offload
= {0};
62 offload
.type
= TC_SETUP_MATCHALL
;
63 offload
.cls_mall
= &mall_offload
;
64 offload
.cls_mall
->command
= TC_CLSMATCHALL_REPLACE
;
65 offload
.cls_mall
->exts
= &head
->exts
;
66 offload
.cls_mall
->cookie
= cookie
;
68 err
= dev
->netdev_ops
->ndo_setup_tc(dev
, tp
->q
->handle
,
70 tp
->protocol
, &offload
);
72 head
->flags
|= TCA_CLS_FLAGS_IN_HW
;
77 static void mall_destroy_hw_filter(struct tcf_proto
*tp
,
78 struct cls_mall_head
*head
,
81 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
82 struct tc_to_netdev offload
;
83 struct tc_cls_matchall_offload mall_offload
= {0};
85 offload
.type
= TC_SETUP_MATCHALL
;
86 offload
.cls_mall
= &mall_offload
;
87 offload
.cls_mall
->command
= TC_CLSMATCHALL_DESTROY
;
88 offload
.cls_mall
->exts
= NULL
;
89 offload
.cls_mall
->cookie
= cookie
;
91 dev
->netdev_ops
->ndo_setup_tc(dev
, tp
->q
->handle
, tp
->chain
->index
,
92 tp
->protocol
, &offload
);
95 static void mall_destroy(struct tcf_proto
*tp
)
97 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
98 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
103 if (tc_should_offload(dev
, tp
, head
->flags
))
104 mall_destroy_hw_filter(tp
, head
, (unsigned long) head
);
106 call_rcu(&head
->rcu
, mall_destroy_rcu
);
109 static unsigned long mall_get(struct tcf_proto
*tp
, u32 handle
)
114 static const struct nla_policy mall_policy
[TCA_MATCHALL_MAX
+ 1] = {
115 [TCA_MATCHALL_UNSPEC
] = { .type
= NLA_UNSPEC
},
116 [TCA_MATCHALL_CLASSID
] = { .type
= NLA_U32
},
119 static int mall_set_parms(struct net
*net
, struct tcf_proto
*tp
,
120 struct cls_mall_head
*head
,
121 unsigned long base
, struct nlattr
**tb
,
122 struct nlattr
*est
, bool ovr
)
127 err
= tcf_exts_init(&e
, TCA_MATCHALL_ACT
, 0);
130 err
= tcf_exts_validate(net
, tp
, tb
, est
, &e
, ovr
);
134 if (tb
[TCA_MATCHALL_CLASSID
]) {
135 head
->res
.classid
= nla_get_u32(tb
[TCA_MATCHALL_CLASSID
]);
136 tcf_bind_filter(tp
, &head
->res
, base
);
139 tcf_exts_change(tp
, &head
->exts
, &e
);
143 tcf_exts_destroy(&e
);
147 static int mall_change(struct net
*net
, struct sk_buff
*in_skb
,
148 struct tcf_proto
*tp
, unsigned long base
,
149 u32 handle
, struct nlattr
**tca
,
150 unsigned long *arg
, bool ovr
)
152 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
153 struct net_device
*dev
= tp
->q
->dev_queue
->dev
;
154 struct nlattr
*tb
[TCA_MATCHALL_MAX
+ 1];
155 struct cls_mall_head
*new;
159 if (!tca
[TCA_OPTIONS
])
165 err
= nla_parse_nested(tb
, TCA_MATCHALL_MAX
, tca
[TCA_OPTIONS
],
170 if (tb
[TCA_MATCHALL_FLAGS
]) {
171 flags
= nla_get_u32(tb
[TCA_MATCHALL_FLAGS
]);
172 if (!tc_flags_valid(flags
))
176 new = kzalloc(sizeof(*new), GFP_KERNEL
);
180 err
= tcf_exts_init(&new->exts
, TCA_MATCHALL_ACT
, 0);
186 new->handle
= handle
;
189 err
= mall_set_parms(net
, tp
, new, base
, tb
, tca
[TCA_RATE
], ovr
);
193 if (tc_should_offload(dev
, tp
, flags
)) {
194 err
= mall_replace_hw_filter(tp
, new, (unsigned long) new);
196 if (tc_skip_sw(flags
))
197 goto err_replace_hw_filter
;
203 if (!tc_in_hw(new->flags
))
204 new->flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
206 *arg
= (unsigned long) head
;
207 rcu_assign_pointer(tp
->root
, new);
210 err_replace_hw_filter
:
212 tcf_exts_destroy(&new->exts
);
218 static int mall_delete(struct tcf_proto
*tp
, unsigned long arg
, bool *last
)
223 static void mall_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
225 struct cls_mall_head
*head
= rtnl_dereference(tp
->root
);
227 if (arg
->count
< arg
->skip
)
229 if (arg
->fn(tp
, (unsigned long) head
, arg
) < 0)
235 static int mall_dump(struct net
*net
, struct tcf_proto
*tp
, unsigned long fh
,
236 struct sk_buff
*skb
, struct tcmsg
*t
)
238 struct cls_mall_head
*head
= (struct cls_mall_head
*) fh
;
244 t
->tcm_handle
= head
->handle
;
246 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
248 goto nla_put_failure
;
250 if (head
->res
.classid
&&
251 nla_put_u32(skb
, TCA_MATCHALL_CLASSID
, head
->res
.classid
))
252 goto nla_put_failure
;
254 if (head
->flags
&& nla_put_u32(skb
, TCA_MATCHALL_FLAGS
, head
->flags
))
255 goto nla_put_failure
;
257 if (tcf_exts_dump(skb
, &head
->exts
))
258 goto nla_put_failure
;
260 nla_nest_end(skb
, nest
);
262 if (tcf_exts_dump_stats(skb
, &head
->exts
) < 0)
263 goto nla_put_failure
;
268 nla_nest_cancel(skb
, nest
);
272 static struct tcf_proto_ops cls_mall_ops __read_mostly
= {
274 .classify
= mall_classify
,
276 .destroy
= mall_destroy
,
278 .change
= mall_change
,
279 .delete = mall_delete
,
282 .owner
= THIS_MODULE
,
285 static int __init
cls_mall_init(void)
287 return register_tcf_proto_ops(&cls_mall_ops
);
290 static void __exit
cls_mall_exit(void)
292 unregister_tcf_proto_ops(&cls_mall_ops
);
295 module_init(cls_mall_init
);
296 module_exit(cls_mall_exit
);
298 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
299 MODULE_DESCRIPTION("Match-all classifier");
300 MODULE_LICENSE("GPL v2");