]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - net/sched/act_ife.c
UBUNTU: Ubuntu-5.4.0-117.132
[mirror_ubuntu-focal-kernel.git] / net / sched / act_ife.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/ife.c Inter-FE action based on ForCES WG InterFE LFB
4 *
5 * Refer to:
6 * draft-ietf-forces-interfelfb-03
7 * and
8 * netdev01 paper:
9 * "Distributing Linux Traffic Control Classifier-Action
10 * Subsystem"
11 * Authors: Jamal Hadi Salim and Damascene M. Joachimpillai
12 *
13 * copyright Jamal Hadi Salim (2015)
14 */
15
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/errno.h>
20 #include <linux/skbuff.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <net/net_namespace.h>
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
27 #include <net/pkt_cls.h>
28 #include <uapi/linux/tc_act/tc_ife.h>
29 #include <net/tc_act/tc_ife.h>
30 #include <linux/etherdevice.h>
31 #include <net/ife.h>
32
33 static unsigned int ife_net_id;
34 static int max_metacnt = IFE_META_MAX + 1;
35 static struct tc_action_ops act_ife_ops;
36
37 static const struct nla_policy ife_policy[TCA_IFE_MAX + 1] = {
38 [TCA_IFE_PARMS] = { .len = sizeof(struct tc_ife)},
39 [TCA_IFE_DMAC] = { .len = ETH_ALEN},
40 [TCA_IFE_SMAC] = { .len = ETH_ALEN},
41 [TCA_IFE_TYPE] = { .type = NLA_U16},
42 };
43
44 int ife_encode_meta_u16(u16 metaval, void *skbdata, struct tcf_meta_info *mi)
45 {
46 u16 edata = 0;
47
48 if (mi->metaval)
49 edata = *(u16 *)mi->metaval;
50 else if (metaval)
51 edata = metaval;
52
53 if (!edata) /* will not encode */
54 return 0;
55
56 edata = htons(edata);
57 return ife_tlv_meta_encode(skbdata, mi->metaid, 2, &edata);
58 }
59 EXPORT_SYMBOL_GPL(ife_encode_meta_u16);
60
61 int ife_get_meta_u32(struct sk_buff *skb, struct tcf_meta_info *mi)
62 {
63 if (mi->metaval)
64 return nla_put_u32(skb, mi->metaid, *(u32 *)mi->metaval);
65 else
66 return nla_put(skb, mi->metaid, 0, NULL);
67 }
68 EXPORT_SYMBOL_GPL(ife_get_meta_u32);
69
70 int ife_check_meta_u32(u32 metaval, struct tcf_meta_info *mi)
71 {
72 if (metaval || mi->metaval)
73 return 8; /* T+L+V == 2+2+4 */
74
75 return 0;
76 }
77 EXPORT_SYMBOL_GPL(ife_check_meta_u32);
78
79 int ife_check_meta_u16(u16 metaval, struct tcf_meta_info *mi)
80 {
81 if (metaval || mi->metaval)
82 return 8; /* T+L+(V) == 2+2+(2+2bytepad) */
83
84 return 0;
85 }
86 EXPORT_SYMBOL_GPL(ife_check_meta_u16);
87
88 int ife_encode_meta_u32(u32 metaval, void *skbdata, struct tcf_meta_info *mi)
89 {
90 u32 edata = metaval;
91
92 if (mi->metaval)
93 edata = *(u32 *)mi->metaval;
94 else if (metaval)
95 edata = metaval;
96
97 if (!edata) /* will not encode */
98 return 0;
99
100 edata = htonl(edata);
101 return ife_tlv_meta_encode(skbdata, mi->metaid, 4, &edata);
102 }
103 EXPORT_SYMBOL_GPL(ife_encode_meta_u32);
104
105 int ife_get_meta_u16(struct sk_buff *skb, struct tcf_meta_info *mi)
106 {
107 if (mi->metaval)
108 return nla_put_u16(skb, mi->metaid, *(u16 *)mi->metaval);
109 else
110 return nla_put(skb, mi->metaid, 0, NULL);
111 }
112 EXPORT_SYMBOL_GPL(ife_get_meta_u16);
113
114 int ife_alloc_meta_u32(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
115 {
116 mi->metaval = kmemdup(metaval, sizeof(u32), gfp);
117 if (!mi->metaval)
118 return -ENOMEM;
119
120 return 0;
121 }
122 EXPORT_SYMBOL_GPL(ife_alloc_meta_u32);
123
124 int ife_alloc_meta_u16(struct tcf_meta_info *mi, void *metaval, gfp_t gfp)
125 {
126 mi->metaval = kmemdup(metaval, sizeof(u16), gfp);
127 if (!mi->metaval)
128 return -ENOMEM;
129
130 return 0;
131 }
132 EXPORT_SYMBOL_GPL(ife_alloc_meta_u16);
133
134 void ife_release_meta_gen(struct tcf_meta_info *mi)
135 {
136 kfree(mi->metaval);
137 }
138 EXPORT_SYMBOL_GPL(ife_release_meta_gen);
139
140 int ife_validate_meta_u32(void *val, int len)
141 {
142 if (len == sizeof(u32))
143 return 0;
144
145 return -EINVAL;
146 }
147 EXPORT_SYMBOL_GPL(ife_validate_meta_u32);
148
149 int ife_validate_meta_u16(void *val, int len)
150 {
151 /* length will not include padding */
152 if (len == sizeof(u16))
153 return 0;
154
155 return -EINVAL;
156 }
157 EXPORT_SYMBOL_GPL(ife_validate_meta_u16);
158
159 static LIST_HEAD(ifeoplist);
160 static DEFINE_RWLOCK(ife_mod_lock);
161
162 static struct tcf_meta_ops *find_ife_oplist(u16 metaid)
163 {
164 struct tcf_meta_ops *o;
165
166 read_lock(&ife_mod_lock);
167 list_for_each_entry(o, &ifeoplist, list) {
168 if (o->metaid == metaid) {
169 if (!try_module_get(o->owner))
170 o = NULL;
171 read_unlock(&ife_mod_lock);
172 return o;
173 }
174 }
175 read_unlock(&ife_mod_lock);
176
177 return NULL;
178 }
179
180 int register_ife_op(struct tcf_meta_ops *mops)
181 {
182 struct tcf_meta_ops *m;
183
184 if (!mops->metaid || !mops->metatype || !mops->name ||
185 !mops->check_presence || !mops->encode || !mops->decode ||
186 !mops->get || !mops->alloc)
187 return -EINVAL;
188
189 write_lock(&ife_mod_lock);
190
191 list_for_each_entry(m, &ifeoplist, list) {
192 if (m->metaid == mops->metaid ||
193 (strcmp(mops->name, m->name) == 0)) {
194 write_unlock(&ife_mod_lock);
195 return -EEXIST;
196 }
197 }
198
199 if (!mops->release)
200 mops->release = ife_release_meta_gen;
201
202 list_add_tail(&mops->list, &ifeoplist);
203 write_unlock(&ife_mod_lock);
204 return 0;
205 }
206 EXPORT_SYMBOL_GPL(unregister_ife_op);
207
208 int unregister_ife_op(struct tcf_meta_ops *mops)
209 {
210 struct tcf_meta_ops *m;
211 int err = -ENOENT;
212
213 write_lock(&ife_mod_lock);
214 list_for_each_entry(m, &ifeoplist, list) {
215 if (m->metaid == mops->metaid) {
216 list_del(&mops->list);
217 err = 0;
218 break;
219 }
220 }
221 write_unlock(&ife_mod_lock);
222
223 return err;
224 }
225 EXPORT_SYMBOL_GPL(register_ife_op);
226
227 static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
228 {
229 int ret = 0;
230 /* XXX: unfortunately cant use nla_policy at this point
231 * because a length of 0 is valid in the case of
232 * "allow". "use" semantics do enforce for proper
233 * length and i couldve use nla_policy but it makes it hard
234 * to use it just for that..
235 */
236 if (ops->validate)
237 return ops->validate(val, len);
238
239 if (ops->metatype == NLA_U32)
240 ret = ife_validate_meta_u32(val, len);
241 else if (ops->metatype == NLA_U16)
242 ret = ife_validate_meta_u16(val, len);
243
244 return ret;
245 }
246
247 #ifdef CONFIG_MODULES
248 static const char *ife_meta_id2name(u32 metaid)
249 {
250 switch (metaid) {
251 case IFE_META_SKBMARK:
252 return "skbmark";
253 case IFE_META_PRIO:
254 return "skbprio";
255 case IFE_META_TCINDEX:
256 return "tcindex";
257 default:
258 return "unknown";
259 }
260 }
261 #endif
262
263 /* called when adding new meta information
264 */
265 static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
266 {
267 struct tcf_meta_ops *ops = find_ife_oplist(metaid);
268 int ret = 0;
269
270 if (!ops) {
271 ret = -ENOENT;
272 #ifdef CONFIG_MODULES
273 if (rtnl_held)
274 rtnl_unlock();
275 request_module("ife-meta-%s", ife_meta_id2name(metaid));
276 if (rtnl_held)
277 rtnl_lock();
278 ops = find_ife_oplist(metaid);
279 #endif
280 }
281
282 if (ops) {
283 ret = 0;
284 if (len)
285 ret = ife_validate_metatype(ops, val, len);
286
287 module_put(ops->owner);
288 }
289
290 return ret;
291 }
292
293 /* called when adding new meta information
294 */
295 static int __add_metainfo(const struct tcf_meta_ops *ops,
296 struct tcf_ife_info *ife, u32 metaid, void *metaval,
297 int len, bool atomic, bool exists)
298 {
299 struct tcf_meta_info *mi = NULL;
300 int ret = 0;
301
302 mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
303 if (!mi)
304 return -ENOMEM;
305
306 mi->metaid = metaid;
307 mi->ops = ops;
308 if (len > 0) {
309 ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
310 if (ret != 0) {
311 kfree(mi);
312 return ret;
313 }
314 }
315
316 if (exists)
317 spin_lock_bh(&ife->tcf_lock);
318 list_add_tail(&mi->metalist, &ife->metalist);
319 if (exists)
320 spin_unlock_bh(&ife->tcf_lock);
321
322 return ret;
323 }
324
325 static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
326 struct tcf_ife_info *ife, u32 metaid,
327 bool exists)
328 {
329 int ret;
330
331 if (!try_module_get(ops->owner))
332 return -ENOENT;
333 ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
334 if (ret)
335 module_put(ops->owner);
336 return ret;
337 }
338
339 static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
340 int len, bool exists)
341 {
342 const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
343 int ret;
344
345 if (!ops)
346 return -ENOENT;
347 ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
348 if (ret)
349 /*put back what find_ife_oplist took */
350 module_put(ops->owner);
351 return ret;
352 }
353
354 static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
355 {
356 struct tcf_meta_ops *o;
357 int rc = 0;
358 int installed = 0;
359
360 read_lock(&ife_mod_lock);
361 list_for_each_entry(o, &ifeoplist, list) {
362 rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
363 if (rc == 0)
364 installed += 1;
365 }
366 read_unlock(&ife_mod_lock);
367
368 if (installed)
369 return 0;
370 else
371 return -EINVAL;
372 }
373
374 static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
375 {
376 struct tcf_meta_info *e;
377 struct nlattr *nest;
378 unsigned char *b = skb_tail_pointer(skb);
379 int total_encoded = 0;
380
381 /*can only happen on decode */
382 if (list_empty(&ife->metalist))
383 return 0;
384
385 nest = nla_nest_start_noflag(skb, TCA_IFE_METALST);
386 if (!nest)
387 goto out_nlmsg_trim;
388
389 list_for_each_entry(e, &ife->metalist, metalist) {
390 if (!e->ops->get(skb, e))
391 total_encoded += 1;
392 }
393
394 if (!total_encoded)
395 goto out_nlmsg_trim;
396
397 nla_nest_end(skb, nest);
398
399 return 0;
400
401 out_nlmsg_trim:
402 nlmsg_trim(skb, b);
403 return -1;
404 }
405
406 /* under ife->tcf_lock */
407 static void _tcf_ife_cleanup(struct tc_action *a)
408 {
409 struct tcf_ife_info *ife = to_ife(a);
410 struct tcf_meta_info *e, *n;
411
412 list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
413 list_del(&e->metalist);
414 if (e->metaval) {
415 if (e->ops->release)
416 e->ops->release(e);
417 else
418 kfree(e->metaval);
419 }
420 module_put(e->ops->owner);
421 kfree(e);
422 }
423 }
424
425 static void tcf_ife_cleanup(struct tc_action *a)
426 {
427 struct tcf_ife_info *ife = to_ife(a);
428 struct tcf_ife_params *p;
429
430 spin_lock_bh(&ife->tcf_lock);
431 _tcf_ife_cleanup(a);
432 spin_unlock_bh(&ife->tcf_lock);
433
434 p = rcu_dereference_protected(ife->params, 1);
435 if (p)
436 kfree_rcu(p, rcu);
437 }
438
439 static int load_metalist(struct nlattr **tb, bool rtnl_held)
440 {
441 int i;
442
443 for (i = 1; i < max_metacnt; i++) {
444 if (tb[i]) {
445 void *val = nla_data(tb[i]);
446 int len = nla_len(tb[i]);
447 int rc;
448
449 rc = load_metaops_and_vet(i, val, len, rtnl_held);
450 if (rc != 0)
451 return rc;
452 }
453 }
454
455 return 0;
456 }
457
458 static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
459 bool exists, bool rtnl_held)
460 {
461 int len = 0;
462 int rc = 0;
463 int i = 0;
464 void *val;
465
466 for (i = 1; i < max_metacnt; i++) {
467 if (tb[i]) {
468 val = nla_data(tb[i]);
469 len = nla_len(tb[i]);
470
471 rc = add_metainfo(ife, i, val, len, exists);
472 if (rc)
473 return rc;
474 }
475 }
476
477 return rc;
478 }
479
480 static int tcf_ife_init(struct net *net, struct nlattr *nla,
481 struct nlattr *est, struct tc_action **a,
482 int ovr, int bind, bool rtnl_held,
483 struct tcf_proto *tp, struct netlink_ext_ack *extack)
484 {
485 struct tc_action_net *tn = net_generic(net, ife_net_id);
486 struct nlattr *tb[TCA_IFE_MAX + 1];
487 struct nlattr *tb2[IFE_META_MAX + 1];
488 struct tcf_chain *goto_ch = NULL;
489 struct tcf_ife_params *p;
490 struct tcf_ife_info *ife;
491 u16 ife_type = ETH_P_IFE;
492 struct tc_ife *parm;
493 u8 *daddr = NULL;
494 u8 *saddr = NULL;
495 bool exists = false;
496 int ret = 0;
497 u32 index;
498 int err;
499
500 if (!nla) {
501 NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed");
502 return -EINVAL;
503 }
504
505 err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy,
506 NULL);
507 if (err < 0)
508 return err;
509
510 if (!tb[TCA_IFE_PARMS])
511 return -EINVAL;
512
513 parm = nla_data(tb[TCA_IFE_PARMS]);
514
515 /* IFE_DECODE is 0 and indicates the opposite of IFE_ENCODE because
516 * they cannot run as the same time. Check on all other values which
517 * are not supported right now.
518 */
519 if (parm->flags & ~IFE_ENCODE)
520 return -EINVAL;
521
522 p = kzalloc(sizeof(*p), GFP_KERNEL);
523 if (!p)
524 return -ENOMEM;
525
526 if (tb[TCA_IFE_METALST]) {
527 err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
528 tb[TCA_IFE_METALST], NULL,
529 NULL);
530 if (err) {
531 kfree(p);
532 return err;
533 }
534 err = load_metalist(tb2, rtnl_held);
535 if (err) {
536 kfree(p);
537 return err;
538 }
539 }
540
541 index = parm->index;
542 err = tcf_idr_check_alloc(tn, &index, a, bind);
543 if (err < 0) {
544 kfree(p);
545 return err;
546 }
547 exists = err;
548 if (exists && bind) {
549 kfree(p);
550 return 0;
551 }
552
553 if (!exists) {
554 ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
555 bind, true);
556 if (ret) {
557 tcf_idr_cleanup(tn, index);
558 kfree(p);
559 return ret;
560 }
561 ret = ACT_P_CREATED;
562 } else if (!ovr) {
563 tcf_idr_release(*a, bind);
564 kfree(p);
565 return -EEXIST;
566 }
567
568 ife = to_ife(*a);
569 if (ret == ACT_P_CREATED)
570 INIT_LIST_HEAD(&ife->metalist);
571
572 err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
573 if (err < 0)
574 goto release_idr;
575
576 p->flags = parm->flags;
577
578 if (parm->flags & IFE_ENCODE) {
579 if (tb[TCA_IFE_TYPE])
580 ife_type = nla_get_u16(tb[TCA_IFE_TYPE]);
581 if (tb[TCA_IFE_DMAC])
582 daddr = nla_data(tb[TCA_IFE_DMAC]);
583 if (tb[TCA_IFE_SMAC])
584 saddr = nla_data(tb[TCA_IFE_SMAC]);
585 }
586
587 if (parm->flags & IFE_ENCODE) {
588 if (daddr)
589 ether_addr_copy(p->eth_dst, daddr);
590 else
591 eth_zero_addr(p->eth_dst);
592
593 if (saddr)
594 ether_addr_copy(p->eth_src, saddr);
595 else
596 eth_zero_addr(p->eth_src);
597
598 p->eth_type = ife_type;
599 }
600
601 if (tb[TCA_IFE_METALST]) {
602 err = populate_metalist(ife, tb2, exists, rtnl_held);
603 if (err)
604 goto metadata_parse_err;
605 } else {
606 /* if no passed metadata allow list or passed allow-all
607 * then here we process by adding as many supported metadatum
608 * as we can. You better have at least one else we are
609 * going to bail out
610 */
611 err = use_all_metadata(ife, exists);
612 if (err)
613 goto metadata_parse_err;
614 }
615
616 if (exists)
617 spin_lock_bh(&ife->tcf_lock);
618 /* protected by tcf_lock when modifying existing action */
619 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
620 rcu_swap_protected(ife->params, p, 1);
621
622 if (exists)
623 spin_unlock_bh(&ife->tcf_lock);
624 if (goto_ch)
625 tcf_chain_put_by_act(goto_ch);
626 if (p)
627 kfree_rcu(p, rcu);
628
629 return ret;
630 metadata_parse_err:
631 if (goto_ch)
632 tcf_chain_put_by_act(goto_ch);
633 release_idr:
634 kfree(p);
635 tcf_idr_release(*a, bind);
636 return err;
637 }
638
639 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
640 int ref)
641 {
642 unsigned char *b = skb_tail_pointer(skb);
643 struct tcf_ife_info *ife = to_ife(a);
644 struct tcf_ife_params *p;
645 struct tc_ife opt = {
646 .index = ife->tcf_index,
647 .refcnt = refcount_read(&ife->tcf_refcnt) - ref,
648 .bindcnt = atomic_read(&ife->tcf_bindcnt) - bind,
649 };
650 struct tcf_t t;
651
652 spin_lock_bh(&ife->tcf_lock);
653 opt.action = ife->tcf_action;
654 p = rcu_dereference_protected(ife->params,
655 lockdep_is_held(&ife->tcf_lock));
656 opt.flags = p->flags;
657
658 if (nla_put(skb, TCA_IFE_PARMS, sizeof(opt), &opt))
659 goto nla_put_failure;
660
661 tcf_tm_dump(&t, &ife->tcf_tm);
662 if (nla_put_64bit(skb, TCA_IFE_TM, sizeof(t), &t, TCA_IFE_PAD))
663 goto nla_put_failure;
664
665 if (!is_zero_ether_addr(p->eth_dst)) {
666 if (nla_put(skb, TCA_IFE_DMAC, ETH_ALEN, p->eth_dst))
667 goto nla_put_failure;
668 }
669
670 if (!is_zero_ether_addr(p->eth_src)) {
671 if (nla_put(skb, TCA_IFE_SMAC, ETH_ALEN, p->eth_src))
672 goto nla_put_failure;
673 }
674
675 if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
676 goto nla_put_failure;
677
678 if (dump_metalist(skb, ife)) {
679 /*ignore failure to dump metalist */
680 pr_info("Failed to dump metalist\n");
681 }
682
683 spin_unlock_bh(&ife->tcf_lock);
684 return skb->len;
685
686 nla_put_failure:
687 spin_unlock_bh(&ife->tcf_lock);
688 nlmsg_trim(skb, b);
689 return -1;
690 }
691
692 static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
693 u16 metaid, u16 mlen, void *mdata)
694 {
695 struct tcf_meta_info *e;
696
697 /* XXX: use hash to speed up */
698 list_for_each_entry(e, &ife->metalist, metalist) {
699 if (metaid == e->metaid) {
700 if (e->ops) {
701 /* We check for decode presence already */
702 return e->ops->decode(skb, mdata, mlen);
703 }
704 }
705 }
706
707 return -ENOENT;
708 }
709
710 static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
711 struct tcf_result *res)
712 {
713 struct tcf_ife_info *ife = to_ife(a);
714 int action = ife->tcf_action;
715 u8 *ifehdr_end;
716 u8 *tlv_data;
717 u16 metalen;
718
719 bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
720 tcf_lastuse_update(&ife->tcf_tm);
721
722 if (skb_at_tc_ingress(skb))
723 skb_push(skb, skb->dev->hard_header_len);
724
725 tlv_data = ife_decode(skb, &metalen);
726 if (unlikely(!tlv_data)) {
727 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
728 return TC_ACT_SHOT;
729 }
730
731 ifehdr_end = tlv_data + metalen;
732 for (; tlv_data < ifehdr_end; tlv_data = ife_tlv_meta_next(tlv_data)) {
733 u8 *curr_data;
734 u16 mtype;
735 u16 dlen;
736
737 curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype,
738 &dlen, NULL);
739 if (!curr_data) {
740 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
741 return TC_ACT_SHOT;
742 }
743
744 if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
745 /* abuse overlimits to count when we receive metadata
746 * but dont have an ops for it
747 */
748 pr_info_ratelimited("Unknown metaid %d dlen %d\n",
749 mtype, dlen);
750 qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
751 }
752 }
753
754 if (WARN_ON(tlv_data != ifehdr_end)) {
755 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
756 return TC_ACT_SHOT;
757 }
758
759 skb->protocol = eth_type_trans(skb, skb->dev);
760 skb_reset_network_header(skb);
761
762 return action;
763 }
764
765 /*XXX: check if we can do this at install time instead of current
766 * send data path
767 **/
768 static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
769 {
770 struct tcf_meta_info *e, *n;
771 int tot_run_sz = 0, run_sz = 0;
772
773 list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
774 if (e->ops->check_presence) {
775 run_sz = e->ops->check_presence(skb, e);
776 tot_run_sz += run_sz;
777 }
778 }
779
780 return tot_run_sz;
781 }
782
783 static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
784 struct tcf_result *res, struct tcf_ife_params *p)
785 {
786 struct tcf_ife_info *ife = to_ife(a);
787 int action = ife->tcf_action;
788 struct ethhdr *oethh; /* outer ether header */
789 struct tcf_meta_info *e;
790 /*
791 OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
792 where ORIGDATA = original ethernet header ...
793 */
794 u16 metalen = ife_get_sz(skb, ife);
795 int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
796 unsigned int skboff = 0;
797 int new_len = skb->len + hdrm;
798 bool exceed_mtu = false;
799 void *ife_meta;
800 int err = 0;
801
802 if (!skb_at_tc_ingress(skb)) {
803 if (new_len > skb->dev->mtu)
804 exceed_mtu = true;
805 }
806
807 bstats_cpu_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
808 tcf_lastuse_update(&ife->tcf_tm);
809
810 if (!metalen) { /* no metadata to send */
811 /* abuse overlimits to count when we allow packet
812 * with no metadata
813 */
814 qstats_overlimit_inc(this_cpu_ptr(ife->common.cpu_qstats));
815 return action;
816 }
817 /* could be stupid policy setup or mtu config
818 * so lets be conservative.. */
819 if ((action == TC_ACT_SHOT) || exceed_mtu) {
820 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
821 return TC_ACT_SHOT;
822 }
823
824 if (skb_at_tc_ingress(skb))
825 skb_push(skb, skb->dev->hard_header_len);
826
827 ife_meta = ife_encode(skb, metalen);
828
829 spin_lock(&ife->tcf_lock);
830
831 /* XXX: we dont have a clever way of telling encode to
832 * not repeat some of the computations that are done by
833 * ops->presence_check...
834 */
835 list_for_each_entry(e, &ife->metalist, metalist) {
836 if (e->ops->encode) {
837 err = e->ops->encode(skb, (void *)(ife_meta + skboff),
838 e);
839 }
840 if (err < 0) {
841 /* too corrupt to keep around if overwritten */
842 spin_unlock(&ife->tcf_lock);
843 qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats));
844 return TC_ACT_SHOT;
845 }
846 skboff += err;
847 }
848 spin_unlock(&ife->tcf_lock);
849 oethh = (struct ethhdr *)skb->data;
850
851 if (!is_zero_ether_addr(p->eth_src))
852 ether_addr_copy(oethh->h_source, p->eth_src);
853 if (!is_zero_ether_addr(p->eth_dst))
854 ether_addr_copy(oethh->h_dest, p->eth_dst);
855 oethh->h_proto = htons(p->eth_type);
856
857 if (skb_at_tc_ingress(skb))
858 skb_pull(skb, skb->dev->hard_header_len);
859
860 return action;
861 }
862
863 static int tcf_ife_act(struct sk_buff *skb, const struct tc_action *a,
864 struct tcf_result *res)
865 {
866 struct tcf_ife_info *ife = to_ife(a);
867 struct tcf_ife_params *p;
868 int ret;
869
870 p = rcu_dereference_bh(ife->params);
871 if (p->flags & IFE_ENCODE) {
872 ret = tcf_ife_encode(skb, a, res, p);
873 return ret;
874 }
875
876 return tcf_ife_decode(skb, a, res);
877 }
878
879 static int tcf_ife_walker(struct net *net, struct sk_buff *skb,
880 struct netlink_callback *cb, int type,
881 const struct tc_action_ops *ops,
882 struct netlink_ext_ack *extack)
883 {
884 struct tc_action_net *tn = net_generic(net, ife_net_id);
885
886 return tcf_generic_walker(tn, skb, cb, type, ops, extack);
887 }
888
889 static int tcf_ife_search(struct net *net, struct tc_action **a, u32 index)
890 {
891 struct tc_action_net *tn = net_generic(net, ife_net_id);
892
893 return tcf_idr_search(tn, a, index);
894 }
895
896 static struct tc_action_ops act_ife_ops = {
897 .kind = "ife",
898 .id = TCA_ID_IFE,
899 .owner = THIS_MODULE,
900 .act = tcf_ife_act,
901 .dump = tcf_ife_dump,
902 .cleanup = tcf_ife_cleanup,
903 .init = tcf_ife_init,
904 .walk = tcf_ife_walker,
905 .lookup = tcf_ife_search,
906 .size = sizeof(struct tcf_ife_info),
907 };
908
909 static __net_init int ife_init_net(struct net *net)
910 {
911 struct tc_action_net *tn = net_generic(net, ife_net_id);
912
913 return tc_action_net_init(net, tn, &act_ife_ops);
914 }
915
916 static void __net_exit ife_exit_net(struct list_head *net_list)
917 {
918 tc_action_net_exit(net_list, ife_net_id);
919 }
920
921 static struct pernet_operations ife_net_ops = {
922 .init = ife_init_net,
923 .exit_batch = ife_exit_net,
924 .id = &ife_net_id,
925 .size = sizeof(struct tc_action_net),
926 };
927
928 static int __init ife_init_module(void)
929 {
930 return tcf_register_action(&act_ife_ops, &ife_net_ops);
931 }
932
933 static void __exit ife_cleanup_module(void)
934 {
935 tcf_unregister_action(&act_ife_ops, &ife_net_ops);
936 }
937
938 module_init(ife_init_module);
939 module_exit(ife_cleanup_module);
940
941 MODULE_AUTHOR("Jamal Hadi Salim(2015)");
942 MODULE_DESCRIPTION("Inter-FE LFB action");
943 MODULE_LICENSE("GPL");