]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - net/sched/act_police.c
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / net / sched / act_police.c
1 /*
2 * net/sched/act_police.c Input police filter
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * J Hadi Salim (action changes)
11 */
12
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/init.h>
21 #include <linux/slab.h>
22 #include <net/act_api.h>
23 #include <net/netlink.h>
24 #include <net/pkt_cls.h>
25
26 struct tcf_police_params {
27 int tcfp_result;
28 u32 tcfp_ewma_rate;
29 s64 tcfp_burst;
30 u32 tcfp_mtu;
31 s64 tcfp_mtu_ptoks;
32 struct psched_ratecfg rate;
33 bool rate_present;
34 struct psched_ratecfg peak;
35 bool peak_present;
36 struct rcu_head rcu;
37 };
38
39 struct tcf_police {
40 struct tc_action common;
41 struct tcf_police_params __rcu *params;
42
43 spinlock_t tcfp_lock ____cacheline_aligned_in_smp;
44 s64 tcfp_toks;
45 s64 tcfp_ptoks;
46 s64 tcfp_t_c;
47 };
48
49 #define to_police(pc) ((struct tcf_police *)pc)
50
51 /* old policer structure from before tc actions */
52 struct tc_police_compat {
53 u32 index;
54 int action;
55 u32 limit;
56 u32 burst;
57 u32 mtu;
58 struct tc_ratespec rate;
59 struct tc_ratespec peakrate;
60 };
61
62 /* Each policer is serialized by its individual spinlock */
63
64 static unsigned int police_net_id;
65 static struct tc_action_ops act_police_ops;
66
67 static int tcf_police_walker(struct net *net, struct sk_buff *skb,
68 struct netlink_callback *cb, int type,
69 const struct tc_action_ops *ops,
70 struct netlink_ext_ack *extack)
71 {
72 struct tc_action_net *tn = net_generic(net, police_net_id);
73
74 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
75 }
76
77 static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
78 [TCA_POLICE_RATE] = { .len = TC_RTAB_SIZE },
79 [TCA_POLICE_PEAKRATE] = { .len = TC_RTAB_SIZE },
80 [TCA_POLICE_AVRATE] = { .type = NLA_U32 },
81 [TCA_POLICE_RESULT] = { .type = NLA_U32 },
82 };
83
84 static int tcf_police_init(struct net *net, struct nlattr *nla,
85 struct nlattr *est, struct tc_action **a,
86 int ovr, int bind, bool rtnl_held,
87 struct tcf_proto *tp,
88 struct netlink_ext_ack *extack)
89 {
90 int ret = 0, tcfp_result = TC_ACT_OK, err, size;
91 struct nlattr *tb[TCA_POLICE_MAX + 1];
92 struct tcf_chain *goto_ch = NULL;
93 struct tc_police *parm;
94 struct tcf_police *police;
95 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
96 struct tc_action_net *tn = net_generic(net, police_net_id);
97 struct tcf_police_params *new;
98 bool exists = false;
99
100 if (nla == NULL)
101 return -EINVAL;
102
103 err = nla_parse_nested(tb, TCA_POLICE_MAX, nla, police_policy, NULL);
104 if (err < 0)
105 return err;
106
107 if (tb[TCA_POLICE_TBF] == NULL)
108 return -EINVAL;
109 size = nla_len(tb[TCA_POLICE_TBF]);
110 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
111 return -EINVAL;
112
113 parm = nla_data(tb[TCA_POLICE_TBF]);
114 err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
115 if (err < 0)
116 return err;
117 exists = err;
118 if (exists && bind)
119 return 0;
120
121 if (!exists) {
122 ret = tcf_idr_create(tn, parm->index, NULL, a,
123 &act_police_ops, bind, true);
124 if (ret) {
125 tcf_idr_cleanup(tn, parm->index);
126 return ret;
127 }
128 ret = ACT_P_CREATED;
129 spin_lock_init(&(to_police(*a)->tcfp_lock));
130 } else if (!ovr) {
131 tcf_idr_release(*a, bind);
132 return -EEXIST;
133 }
134 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
135 if (err < 0)
136 goto release_idr;
137
138 police = to_police(*a);
139 if (parm->rate.rate) {
140 err = -ENOMEM;
141 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE], NULL);
142 if (R_tab == NULL)
143 goto failure;
144
145 if (parm->peakrate.rate) {
146 P_tab = qdisc_get_rtab(&parm->peakrate,
147 tb[TCA_POLICE_PEAKRATE], NULL);
148 if (P_tab == NULL)
149 goto failure;
150 }
151 }
152
153 if (est) {
154 err = gen_replace_estimator(&police->tcf_bstats,
155 police->common.cpu_bstats,
156 &police->tcf_rate_est,
157 &police->tcf_lock,
158 NULL, est);
159 if (err)
160 goto failure;
161 } else if (tb[TCA_POLICE_AVRATE] &&
162 (ret == ACT_P_CREATED ||
163 !gen_estimator_active(&police->tcf_rate_est))) {
164 err = -EINVAL;
165 goto failure;
166 }
167
168 if (tb[TCA_POLICE_RESULT]) {
169 tcfp_result = nla_get_u32(tb[TCA_POLICE_RESULT]);
170 if (TC_ACT_EXT_CMP(tcfp_result, TC_ACT_GOTO_CHAIN)) {
171 NL_SET_ERR_MSG(extack,
172 "goto chain not allowed on fallback");
173 err = -EINVAL;
174 goto failure;
175 }
176 }
177
178 new = kzalloc(sizeof(*new), GFP_KERNEL);
179 if (unlikely(!new)) {
180 err = -ENOMEM;
181 goto failure;
182 }
183
184 /* No failure allowed after this point */
185 new->tcfp_result = tcfp_result;
186 new->tcfp_mtu = parm->mtu;
187 if (!new->tcfp_mtu) {
188 new->tcfp_mtu = ~0;
189 if (R_tab)
190 new->tcfp_mtu = 255 << R_tab->rate.cell_log;
191 }
192 if (R_tab) {
193 new->rate_present = true;
194 psched_ratecfg_precompute(&new->rate, &R_tab->rate, 0);
195 qdisc_put_rtab(R_tab);
196 } else {
197 new->rate_present = false;
198 }
199 if (P_tab) {
200 new->peak_present = true;
201 psched_ratecfg_precompute(&new->peak, &P_tab->rate, 0);
202 qdisc_put_rtab(P_tab);
203 } else {
204 new->peak_present = false;
205 }
206
207 new->tcfp_burst = PSCHED_TICKS2NS(parm->burst);
208 if (new->peak_present)
209 new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak,
210 new->tcfp_mtu);
211
212 if (tb[TCA_POLICE_AVRATE])
213 new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]);
214
215 spin_lock_bh(&police->tcf_lock);
216 spin_lock_bh(&police->tcfp_lock);
217 police->tcfp_t_c = ktime_get_ns();
218 police->tcfp_toks = new->tcfp_burst;
219 if (new->peak_present)
220 police->tcfp_ptoks = new->tcfp_mtu_ptoks;
221 spin_unlock_bh(&police->tcfp_lock);
222 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
223 rcu_swap_protected(police->params,
224 new,
225 lockdep_is_held(&police->tcf_lock));
226 spin_unlock_bh(&police->tcf_lock);
227
228 if (goto_ch)
229 tcf_chain_put_by_act(goto_ch);
230 if (new)
231 kfree_rcu(new, rcu);
232
233 if (ret == ACT_P_CREATED)
234 tcf_idr_insert(tn, *a);
235 return ret;
236
237 failure:
238 qdisc_put_rtab(P_tab);
239 qdisc_put_rtab(R_tab);
240 if (goto_ch)
241 tcf_chain_put_by_act(goto_ch);
242 release_idr:
243 tcf_idr_release(*a, bind);
244 return err;
245 }
246
247 static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a,
248 struct tcf_result *res)
249 {
250 struct tcf_police *police = to_police(a);
251 struct tcf_police_params *p;
252 s64 now, toks, ptoks = 0;
253 int ret;
254
255 tcf_lastuse_update(&police->tcf_tm);
256 bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb);
257
258 ret = READ_ONCE(police->tcf_action);
259 p = rcu_dereference_bh(police->params);
260
261 if (p->tcfp_ewma_rate) {
262 struct gnet_stats_rate_est64 sample;
263
264 if (!gen_estimator_read(&police->tcf_rate_est, &sample) ||
265 sample.bps >= p->tcfp_ewma_rate)
266 goto inc_overlimits;
267 }
268
269 if (qdisc_pkt_len(skb) <= p->tcfp_mtu) {
270 if (!p->rate_present) {
271 ret = p->tcfp_result;
272 goto end;
273 }
274
275 now = ktime_get_ns();
276 spin_lock_bh(&police->tcfp_lock);
277 toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst);
278 if (p->peak_present) {
279 ptoks = toks + police->tcfp_ptoks;
280 if (ptoks > p->tcfp_mtu_ptoks)
281 ptoks = p->tcfp_mtu_ptoks;
282 ptoks -= (s64)psched_l2t_ns(&p->peak,
283 qdisc_pkt_len(skb));
284 }
285 toks += police->tcfp_toks;
286 if (toks > p->tcfp_burst)
287 toks = p->tcfp_burst;
288 toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb));
289 if ((toks|ptoks) >= 0) {
290 police->tcfp_t_c = now;
291 police->tcfp_toks = toks;
292 police->tcfp_ptoks = ptoks;
293 spin_unlock_bh(&police->tcfp_lock);
294 ret = p->tcfp_result;
295 goto inc_drops;
296 }
297 spin_unlock_bh(&police->tcfp_lock);
298 }
299
300 inc_overlimits:
301 qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats));
302 inc_drops:
303 if (ret == TC_ACT_SHOT)
304 qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats));
305 end:
306 return ret;
307 }
308
309 static void tcf_police_cleanup(struct tc_action *a)
310 {
311 struct tcf_police *police = to_police(a);
312 struct tcf_police_params *p;
313
314 p = rcu_dereference_protected(police->params, 1);
315 if (p)
316 kfree_rcu(p, rcu);
317 }
318
319 static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a,
320 int bind, int ref)
321 {
322 unsigned char *b = skb_tail_pointer(skb);
323 struct tcf_police *police = to_police(a);
324 struct tcf_police_params *p;
325 struct tc_police opt = {
326 .index = police->tcf_index,
327 .refcnt = refcount_read(&police->tcf_refcnt) - ref,
328 .bindcnt = atomic_read(&police->tcf_bindcnt) - bind,
329 };
330 struct tcf_t t;
331
332 spin_lock_bh(&police->tcf_lock);
333 opt.action = police->tcf_action;
334 p = rcu_dereference_protected(police->params,
335 lockdep_is_held(&police->tcf_lock));
336 opt.mtu = p->tcfp_mtu;
337 opt.burst = PSCHED_NS2TICKS(p->tcfp_burst);
338 if (p->rate_present)
339 psched_ratecfg_getrate(&opt.rate, &p->rate);
340 if (p->peak_present)
341 psched_ratecfg_getrate(&opt.peakrate, &p->peak);
342 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
343 goto nla_put_failure;
344 if (p->tcfp_result &&
345 nla_put_u32(skb, TCA_POLICE_RESULT, p->tcfp_result))
346 goto nla_put_failure;
347 if (p->tcfp_ewma_rate &&
348 nla_put_u32(skb, TCA_POLICE_AVRATE, p->tcfp_ewma_rate))
349 goto nla_put_failure;
350
351 t.install = jiffies_to_clock_t(jiffies - police->tcf_tm.install);
352 t.lastuse = jiffies_to_clock_t(jiffies - police->tcf_tm.lastuse);
353 t.firstuse = jiffies_to_clock_t(jiffies - police->tcf_tm.firstuse);
354 t.expires = jiffies_to_clock_t(police->tcf_tm.expires);
355 if (nla_put_64bit(skb, TCA_POLICE_TM, sizeof(t), &t, TCA_POLICE_PAD))
356 goto nla_put_failure;
357 spin_unlock_bh(&police->tcf_lock);
358
359 return skb->len;
360
361 nla_put_failure:
362 spin_unlock_bh(&police->tcf_lock);
363 nlmsg_trim(skb, b);
364 return -1;
365 }
366
367 static int tcf_police_search(struct net *net, struct tc_action **a, u32 index)
368 {
369 struct tc_action_net *tn = net_generic(net, police_net_id);
370
371 return tcf_idr_search(tn, a, index);
372 }
373
374 MODULE_AUTHOR("Alexey Kuznetsov");
375 MODULE_DESCRIPTION("Policing actions");
376 MODULE_LICENSE("GPL");
377
378 static struct tc_action_ops act_police_ops = {
379 .kind = "police",
380 .id = TCA_ID_POLICE,
381 .owner = THIS_MODULE,
382 .act = tcf_police_act,
383 .dump = tcf_police_dump,
384 .init = tcf_police_init,
385 .walk = tcf_police_walker,
386 .lookup = tcf_police_search,
387 .cleanup = tcf_police_cleanup,
388 .size = sizeof(struct tcf_police),
389 };
390
391 static __net_init int police_init_net(struct net *net)
392 {
393 struct tc_action_net *tn = net_generic(net, police_net_id);
394
395 return tc_action_net_init(tn, &act_police_ops);
396 }
397
398 static void __net_exit police_exit_net(struct list_head *net_list)
399 {
400 tc_action_net_exit(net_list, police_net_id);
401 }
402
403 static struct pernet_operations police_net_ops = {
404 .init = police_init_net,
405 .exit_batch = police_exit_net,
406 .id = &police_net_id,
407 .size = sizeof(struct tc_action_net),
408 };
409
410 static int __init police_init_module(void)
411 {
412 return tcf_register_action(&act_police_ops, &police_net_ops);
413 }
414
415 static void __exit police_cleanup_module(void)
416 {
417 tcf_unregister_action(&act_police_ops, &police_net_ops);
418 }
419
420 module_init(police_init_module);
421 module_exit(police_cleanup_module);