]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - net/sched/act_police.c
[NET] SCHED: Fix whitespace errors.
[mirror_ubuntu-bionic-kernel.git] / net / sched / act_police.c
1 /*
2 * net/sched/police.c Input police filter.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * J Hadi Salim (action changes)
11 */
12
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/module.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
21 #include <linux/mm.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/in.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/netdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/module.h>
30 #include <linux/rtnetlink.h>
31 #include <linux/init.h>
32 #include <net/sock.h>
33 #include <net/act_api.h>
34
35 #define L2T(p,L) ((p)->tcfp_R_tab->data[(L)>>(p)->tcfp_R_tab->rate.cell_log])
36 #define L2T_P(p,L) ((p)->tcfp_P_tab->data[(L)>>(p)->tcfp_P_tab->rate.cell_log])
37
38 #define POL_TAB_MASK 15
39 static struct tcf_common *tcf_police_ht[POL_TAB_MASK + 1];
40 static u32 police_idx_gen;
41 static DEFINE_RWLOCK(police_lock);
42
43 static struct tcf_hashinfo police_hash_info = {
44 .htab = tcf_police_ht,
45 .hmask = POL_TAB_MASK,
46 .lock = &police_lock,
47 };
48
49 /* old policer structure from before tc actions */
50 struct tc_police_compat
51 {
52 u32 index;
53 int action;
54 u32 limit;
55 u32 burst;
56 u32 mtu;
57 struct tc_ratespec rate;
58 struct tc_ratespec peakrate;
59 };
60
61 /* Each policer is serialized by its individual spinlock */
62
63 #ifdef CONFIG_NET_CLS_ACT
64 static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
65 int type, struct tc_action *a)
66 {
67 struct tcf_common *p;
68 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
69 struct rtattr *r;
70
71 read_lock(&police_lock);
72
73 s_i = cb->args[0];
74
75 for (i = 0; i < (POL_TAB_MASK + 1); i++) {
76 p = tcf_police_ht[tcf_hash(i, POL_TAB_MASK)];
77
78 for (; p; p = p->tcfc_next) {
79 index++;
80 if (index < s_i)
81 continue;
82 a->priv = p;
83 a->order = index;
84 r = (struct rtattr*) skb->tail;
85 RTA_PUT(skb, a->order, 0, NULL);
86 if (type == RTM_DELACTION)
87 err = tcf_action_dump_1(skb, a, 0, 1);
88 else
89 err = tcf_action_dump_1(skb, a, 0, 0);
90 if (err < 0) {
91 index--;
92 skb_trim(skb, (u8*)r - skb->data);
93 goto done;
94 }
95 r->rta_len = skb->tail - (u8*)r;
96 n_i++;
97 }
98 }
99 done:
100 read_unlock(&police_lock);
101 if (n_i)
102 cb->args[0] += n_i;
103 return n_i;
104
105 rtattr_failure:
106 skb_trim(skb, (u8*)r - skb->data);
107 goto done;
108 }
109 #endif
110
111 void tcf_police_destroy(struct tcf_police *p)
112 {
113 unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
114 struct tcf_common **p1p;
115
116 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
117 if (*p1p == &p->common) {
118 write_lock_bh(&police_lock);
119 *p1p = p->tcf_next;
120 write_unlock_bh(&police_lock);
121 #ifdef CONFIG_NET_ESTIMATOR
122 gen_kill_estimator(&p->tcf_bstats,
123 &p->tcf_rate_est);
124 #endif
125 if (p->tcfp_R_tab)
126 qdisc_put_rtab(p->tcfp_R_tab);
127 if (p->tcfp_P_tab)
128 qdisc_put_rtab(p->tcfp_P_tab);
129 kfree(p);
130 return;
131 }
132 }
133 BUG_TRAP(0);
134 }
135
136 #ifdef CONFIG_NET_CLS_ACT
137 static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
138 struct tc_action *a, int ovr, int bind)
139 {
140 unsigned h;
141 int ret = 0, err;
142 struct rtattr *tb[TCA_POLICE_MAX];
143 struct tc_police *parm;
144 struct tcf_police *police;
145 struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
146 int size;
147
148 if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
149 return -EINVAL;
150
151 if (tb[TCA_POLICE_TBF-1] == NULL)
152 return -EINVAL;
153 size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]);
154 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
155 return -EINVAL;
156 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
157
158 if (tb[TCA_POLICE_RESULT-1] != NULL &&
159 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
160 return -EINVAL;
161 if (tb[TCA_POLICE_RESULT-1] != NULL &&
162 RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
163 return -EINVAL;
164
165 if (parm->index) {
166 struct tcf_common *pc;
167
168 pc = tcf_hash_lookup(parm->index, &police_hash_info);
169 if (pc != NULL) {
170 a->priv = pc;
171 police = to_police(pc);
172 if (bind) {
173 police->tcf_bindcnt += 1;
174 police->tcf_refcnt += 1;
175 }
176 if (ovr)
177 goto override;
178 return ret;
179 }
180 }
181
182 police = kzalloc(sizeof(*police), GFP_KERNEL);
183 if (police == NULL)
184 return -ENOMEM;
185 ret = ACT_P_CREATED;
186 police->tcf_refcnt = 1;
187 spin_lock_init(&police->tcf_lock);
188 police->tcf_stats_lock = &police->tcf_lock;
189 if (bind)
190 police->tcf_bindcnt = 1;
191 override:
192 if (parm->rate.rate) {
193 err = -ENOMEM;
194 R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
195 if (R_tab == NULL)
196 goto failure;
197 if (parm->peakrate.rate) {
198 P_tab = qdisc_get_rtab(&parm->peakrate,
199 tb[TCA_POLICE_PEAKRATE-1]);
200 if (P_tab == NULL) {
201 qdisc_put_rtab(R_tab);
202 goto failure;
203 }
204 }
205 }
206 /* No failure allowed after this point */
207 spin_lock_bh(&police->tcf_lock);
208 if (R_tab != NULL) {
209 qdisc_put_rtab(police->tcfp_R_tab);
210 police->tcfp_R_tab = R_tab;
211 }
212 if (P_tab != NULL) {
213 qdisc_put_rtab(police->tcfp_P_tab);
214 police->tcfp_P_tab = P_tab;
215 }
216
217 if (tb[TCA_POLICE_RESULT-1])
218 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
219 police->tcfp_toks = police->tcfp_burst = parm->burst;
220 police->tcfp_mtu = parm->mtu;
221 if (police->tcfp_mtu == 0) {
222 police->tcfp_mtu = ~0;
223 if (police->tcfp_R_tab)
224 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
225 }
226 if (police->tcfp_P_tab)
227 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
228 police->tcf_action = parm->action;
229
230 #ifdef CONFIG_NET_ESTIMATOR
231 if (tb[TCA_POLICE_AVRATE-1])
232 police->tcfp_ewma_rate =
233 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
234 if (est)
235 gen_replace_estimator(&police->tcf_bstats,
236 &police->tcf_rate_est,
237 police->tcf_stats_lock, est);
238 #endif
239
240 spin_unlock_bh(&police->tcf_lock);
241 if (ret != ACT_P_CREATED)
242 return ret;
243
244 PSCHED_GET_TIME(police->tcfp_t_c);
245 police->tcf_index = parm->index ? parm->index :
246 tcf_hash_new_index(&police_idx_gen, &police_hash_info);
247 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
248 write_lock_bh(&police_lock);
249 police->tcf_next = tcf_police_ht[h];
250 tcf_police_ht[h] = &police->common;
251 write_unlock_bh(&police_lock);
252
253 a->priv = police;
254 return ret;
255
256 failure:
257 if (ret == ACT_P_CREATED)
258 kfree(police);
259 return err;
260 }
261
262 static int tcf_act_police_cleanup(struct tc_action *a, int bind)
263 {
264 struct tcf_police *p = a->priv;
265
266 if (p != NULL)
267 return tcf_police_release(p, bind);
268 return 0;
269 }
270
271 static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
272 struct tcf_result *res)
273 {
274 struct tcf_police *police = a->priv;
275 psched_time_t now;
276 long toks;
277 long ptoks = 0;
278
279 spin_lock(&police->tcf_lock);
280
281 police->tcf_bstats.bytes += skb->len;
282 police->tcf_bstats.packets++;
283
284 #ifdef CONFIG_NET_ESTIMATOR
285 if (police->tcfp_ewma_rate &&
286 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
287 police->tcf_qstats.overlimits++;
288 spin_unlock(&police->tcf_lock);
289 return police->tcf_action;
290 }
291 #endif
292
293 if (skb->len <= police->tcfp_mtu) {
294 if (police->tcfp_R_tab == NULL) {
295 spin_unlock(&police->tcf_lock);
296 return police->tcfp_result;
297 }
298
299 PSCHED_GET_TIME(now);
300
301 toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c,
302 police->tcfp_burst);
303 if (police->tcfp_P_tab) {
304 ptoks = toks + police->tcfp_ptoks;
305 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
306 ptoks = (long)L2T_P(police, police->tcfp_mtu);
307 ptoks -= L2T_P(police, skb->len);
308 }
309 toks += police->tcfp_toks;
310 if (toks > (long)police->tcfp_burst)
311 toks = police->tcfp_burst;
312 toks -= L2T(police, skb->len);
313 if ((toks|ptoks) >= 0) {
314 police->tcfp_t_c = now;
315 police->tcfp_toks = toks;
316 police->tcfp_ptoks = ptoks;
317 spin_unlock(&police->tcf_lock);
318 return police->tcfp_result;
319 }
320 }
321
322 police->tcf_qstats.overlimits++;
323 spin_unlock(&police->tcf_lock);
324 return police->tcf_action;
325 }
326
327 static int
328 tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
329 {
330 unsigned char *b = skb->tail;
331 struct tcf_police *police = a->priv;
332 struct tc_police opt;
333
334 opt.index = police->tcf_index;
335 opt.action = police->tcf_action;
336 opt.mtu = police->tcfp_mtu;
337 opt.burst = police->tcfp_burst;
338 opt.refcnt = police->tcf_refcnt - ref;
339 opt.bindcnt = police->tcf_bindcnt - bind;
340 if (police->tcfp_R_tab)
341 opt.rate = police->tcfp_R_tab->rate;
342 else
343 memset(&opt.rate, 0, sizeof(opt.rate));
344 if (police->tcfp_P_tab)
345 opt.peakrate = police->tcfp_P_tab->rate;
346 else
347 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
348 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
349 if (police->tcfp_result)
350 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
351 &police->tcfp_result);
352 #ifdef CONFIG_NET_ESTIMATOR
353 if (police->tcfp_ewma_rate)
354 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
355 #endif
356 return skb->len;
357
358 rtattr_failure:
359 skb_trim(skb, b - skb->data);
360 return -1;
361 }
362
363 MODULE_AUTHOR("Alexey Kuznetsov");
364 MODULE_DESCRIPTION("Policing actions");
365 MODULE_LICENSE("GPL");
366
367 static struct tc_action_ops act_police_ops = {
368 .kind = "police",
369 .hinfo = &police_hash_info,
370 .type = TCA_ID_POLICE,
371 .capab = TCA_CAP_NONE,
372 .owner = THIS_MODULE,
373 .act = tcf_act_police,
374 .dump = tcf_act_police_dump,
375 .cleanup = tcf_act_police_cleanup,
376 .lookup = tcf_hash_search,
377 .init = tcf_act_police_locate,
378 .walk = tcf_act_police_walker
379 };
380
381 static int __init
382 police_init_module(void)
383 {
384 return tcf_register_action(&act_police_ops);
385 }
386
387 static void __exit
388 police_cleanup_module(void)
389 {
390 tcf_unregister_action(&act_police_ops);
391 }
392
393 module_init(police_init_module);
394 module_exit(police_cleanup_module);
395
396 #else /* CONFIG_NET_CLS_ACT */
397
398 static struct tcf_common *tcf_police_lookup(u32 index)
399 {
400 struct tcf_hashinfo *hinfo = &police_hash_info;
401 struct tcf_common *p;
402
403 read_lock(hinfo->lock);
404 for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
405 p = p->tcfc_next) {
406 if (p->tcfc_index == index)
407 break;
408 }
409 read_unlock(hinfo->lock);
410
411 return p;
412 }
413
414 static u32 tcf_police_new_index(void)
415 {
416 u32 *idx_gen = &police_idx_gen;
417 u32 val = *idx_gen;
418
419 do {
420 if (++val == 0)
421 val = 1;
422 } while (tcf_police_lookup(val));
423
424 return (*idx_gen = val);
425 }
426
427 struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
428 {
429 unsigned int h;
430 struct tcf_police *police;
431 struct rtattr *tb[TCA_POLICE_MAX];
432 struct tc_police *parm;
433 int size;
434
435 if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
436 return NULL;
437
438 if (tb[TCA_POLICE_TBF-1] == NULL)
439 return NULL;
440 size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]);
441 if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
442 return NULL;
443
444 parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
445
446 if (parm->index) {
447 struct tcf_common *pc;
448
449 pc = tcf_police_lookup(parm->index);
450 if (pc) {
451 police = to_police(pc);
452 police->tcf_refcnt++;
453 return police;
454 }
455 }
456 police = kzalloc(sizeof(*police), GFP_KERNEL);
457 if (unlikely(!police))
458 return NULL;
459
460 police->tcf_refcnt = 1;
461 spin_lock_init(&police->tcf_lock);
462 police->tcf_stats_lock = &police->tcf_lock;
463 if (parm->rate.rate) {
464 police->tcfp_R_tab =
465 qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
466 if (police->tcfp_R_tab == NULL)
467 goto failure;
468 if (parm->peakrate.rate) {
469 police->tcfp_P_tab =
470 qdisc_get_rtab(&parm->peakrate,
471 tb[TCA_POLICE_PEAKRATE-1]);
472 if (police->tcfp_P_tab == NULL)
473 goto failure;
474 }
475 }
476 if (tb[TCA_POLICE_RESULT-1]) {
477 if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
478 goto failure;
479 police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
480 }
481 #ifdef CONFIG_NET_ESTIMATOR
482 if (tb[TCA_POLICE_AVRATE-1]) {
483 if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
484 goto failure;
485 police->tcfp_ewma_rate =
486 *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
487 }
488 #endif
489 police->tcfp_toks = police->tcfp_burst = parm->burst;
490 police->tcfp_mtu = parm->mtu;
491 if (police->tcfp_mtu == 0) {
492 police->tcfp_mtu = ~0;
493 if (police->tcfp_R_tab)
494 police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
495 }
496 if (police->tcfp_P_tab)
497 police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
498 PSCHED_GET_TIME(police->tcfp_t_c);
499 police->tcf_index = parm->index ? parm->index :
500 tcf_police_new_index();
501 police->tcf_action = parm->action;
502 #ifdef CONFIG_NET_ESTIMATOR
503 if (est)
504 gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
505 police->tcf_stats_lock, est);
506 #endif
507 h = tcf_hash(police->tcf_index, POL_TAB_MASK);
508 write_lock_bh(&police_lock);
509 police->tcf_next = tcf_police_ht[h];
510 tcf_police_ht[h] = &police->common;
511 write_unlock_bh(&police_lock);
512 return police;
513
514 failure:
515 if (police->tcfp_R_tab)
516 qdisc_put_rtab(police->tcfp_R_tab);
517 kfree(police);
518 return NULL;
519 }
520
521 int tcf_police(struct sk_buff *skb, struct tcf_police *police)
522 {
523 psched_time_t now;
524 long toks;
525 long ptoks = 0;
526
527 spin_lock(&police->tcf_lock);
528
529 police->tcf_bstats.bytes += skb->len;
530 police->tcf_bstats.packets++;
531
532 #ifdef CONFIG_NET_ESTIMATOR
533 if (police->tcfp_ewma_rate &&
534 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
535 police->tcf_qstats.overlimits++;
536 spin_unlock(&police->tcf_lock);
537 return police->tcf_action;
538 }
539 #endif
540 if (skb->len <= police->tcfp_mtu) {
541 if (police->tcfp_R_tab == NULL) {
542 spin_unlock(&police->tcf_lock);
543 return police->tcfp_result;
544 }
545
546 PSCHED_GET_TIME(now);
547 toks = PSCHED_TDIFF_SAFE(now, police->tcfp_t_c,
548 police->tcfp_burst);
549 if (police->tcfp_P_tab) {
550 ptoks = toks + police->tcfp_ptoks;
551 if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
552 ptoks = (long)L2T_P(police, police->tcfp_mtu);
553 ptoks -= L2T_P(police, skb->len);
554 }
555 toks += police->tcfp_toks;
556 if (toks > (long)police->tcfp_burst)
557 toks = police->tcfp_burst;
558 toks -= L2T(police, skb->len);
559 if ((toks|ptoks) >= 0) {
560 police->tcfp_t_c = now;
561 police->tcfp_toks = toks;
562 police->tcfp_ptoks = ptoks;
563 spin_unlock(&police->tcf_lock);
564 return police->tcfp_result;
565 }
566 }
567
568 police->tcf_qstats.overlimits++;
569 spin_unlock(&police->tcf_lock);
570 return police->tcf_action;
571 }
572 EXPORT_SYMBOL(tcf_police);
573
574 int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
575 {
576 unsigned char *b = skb->tail;
577 struct tc_police opt;
578
579 opt.index = police->tcf_index;
580 opt.action = police->tcf_action;
581 opt.mtu = police->tcfp_mtu;
582 opt.burst = police->tcfp_burst;
583 if (police->tcfp_R_tab)
584 opt.rate = police->tcfp_R_tab->rate;
585 else
586 memset(&opt.rate, 0, sizeof(opt.rate));
587 if (police->tcfp_P_tab)
588 opt.peakrate = police->tcfp_P_tab->rate;
589 else
590 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
591 RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
592 if (police->tcfp_result)
593 RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
594 &police->tcfp_result);
595 #ifdef CONFIG_NET_ESTIMATOR
596 if (police->tcfp_ewma_rate)
597 RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
598 #endif
599 return skb->len;
600
601 rtattr_failure:
602 skb_trim(skb, b - skb->data);
603 return -1;
604 }
605
606 int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
607 {
608 struct gnet_dump d;
609
610 if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
611 TCA_XSTATS, police->tcf_stats_lock,
612 &d) < 0)
613 goto errout;
614
615 if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
616 #ifdef CONFIG_NET_ESTIMATOR
617 gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
618 #endif
619 gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
620 goto errout;
621
622 if (gnet_stats_finish_copy(&d) < 0)
623 goto errout;
624
625 return 0;
626
627 errout:
628 return -1;
629 }
630
631 #endif /* CONFIG_NET_CLS_ACT */